mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-06 21:32:49 +00:00
Initial commit
This commit is contained in:
141
plugins/modules/remote_management/cobbler/cobbler_sync.py
Normal file
141
plugins/modules/remote_management/cobbler/cobbler_sync.py
Normal file
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cobbler_sync
|
||||
short_description: Sync Cobbler
|
||||
description:
|
||||
- Sync Cobbler to commit changes.
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The name or IP address of the Cobbler system.
|
||||
default: 127.0.0.1
|
||||
port:
|
||||
description:
|
||||
- Port number to be used for REST connection.
|
||||
- The default value depends on parameter C(use_ssl).
|
||||
username:
|
||||
description:
|
||||
- The username to log in to Cobbler.
|
||||
default: cobbler
|
||||
password:
|
||||
description:
|
||||
- The password to log in to Cobbler.
|
||||
required: yes
|
||||
use_ssl:
|
||||
description:
|
||||
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated.
|
||||
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
todo:
|
||||
notes:
|
||||
- Concurrently syncing Cobbler is bound to fail with weird errors.
|
||||
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
|
||||
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Commit Cobbler changes
|
||||
cobbler_sync:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
run_once: yes
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
# Default return values
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import ssl
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves import xmlrpc_client
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='127.0.0.1'),
|
||||
port=dict(type='int'),
|
||||
username=dict(type='str', default='cobbler'),
|
||||
password=dict(type='str', no_log=True),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
port = module.params['port']
|
||||
use_ssl = module.params['use_ssl']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
module.params['proto'] = 'https' if use_ssl else 'http'
|
||||
if not port:
|
||||
module.params['port'] = '443' if use_ssl else '80'
|
||||
|
||||
result = dict(
|
||||
changed=True,
|
||||
)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try: # Python 2.7.9 and newer
|
||||
ssl_context = ssl.create_unverified_context()
|
||||
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
|
||||
ssl._create_default_context = ssl._create_unverified_context
|
||||
else: # Python 2.7.8 and older
|
||||
ssl._create_default_https_context = ssl._create_unverified_https_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
|
||||
else:
|
||||
conn = xmlrpc_client.Server(url)
|
||||
|
||||
try:
|
||||
token = conn.login(username, password)
|
||||
except xmlrpc_client.Fault as e:
|
||||
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
conn.sync(token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
|
||||
|
||||
elapsed = datetime.datetime.utcnow() - start
|
||||
module.exit_json(elapsed=elapsed.seconds, **result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
336
plugins/modules/remote_management/cobbler/cobbler_system.py
Normal file
336
plugins/modules/remote_management/cobbler/cobbler_system.py
Normal file
@@ -0,0 +1,336 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cobbler_system
|
||||
short_description: Manage system objects in Cobbler
|
||||
description:
|
||||
- Add, modify or remove systems in Cobbler
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The name or IP address of the Cobbler system.
|
||||
default: 127.0.0.1
|
||||
port:
|
||||
description:
|
||||
- Port number to be used for REST connection.
|
||||
- The default value depends on parameter C(use_ssl).
|
||||
username:
|
||||
description:
|
||||
- The username to log in to Cobbler.
|
||||
default: cobbler
|
||||
password:
|
||||
description:
|
||||
- The password to log in to Cobbler.
|
||||
required: yes
|
||||
use_ssl:
|
||||
description:
|
||||
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated.
|
||||
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
name:
|
||||
description:
|
||||
- The system name to manage.
|
||||
properties:
|
||||
description:
|
||||
- A dictionary with system properties.
|
||||
interfaces:
|
||||
description:
|
||||
- A list of dictionaries containing interface options.
|
||||
sync:
|
||||
description:
|
||||
- Sync on changes.
|
||||
- Concurrently syncing Cobbler is bound to fail.
|
||||
type: bool
|
||||
default: no
|
||||
state:
|
||||
description:
|
||||
- Whether the system should be present, absent or a query is made.
|
||||
choices: [ absent, present, query ]
|
||||
default: present
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
notes:
|
||||
- Concurrently syncing Cobbler is bound to fail with weird errors.
|
||||
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
|
||||
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure the system exists in Cobbler
|
||||
cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: myhost
|
||||
properties:
|
||||
profile: CentOS6-x86_64
|
||||
name_servers: [ 2.3.4.5, 3.4.5.6 ]
|
||||
name_servers_search: foo.com, bar.com
|
||||
interfaces:
|
||||
eth0:
|
||||
macaddress: 00:01:02:03:04:05
|
||||
ipaddress: 1.2.3.4
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable network boot in Cobbler
|
||||
cobbler_system:
|
||||
host: bdsol-aci-cobbler-01
|
||||
username: cobbler
|
||||
password: ins3965!
|
||||
name: bdsol-aci51-apic1.cisco.com
|
||||
properties:
|
||||
netboot_enabled: yes
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Query all systems in Cobbler
|
||||
cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
state: query
|
||||
register: cobbler_systems
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Query a specific system in Cobbler
|
||||
cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: '{{ inventory_hostname }}'
|
||||
state: query
|
||||
register: cobbler_properties
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure the system does not exist in Cobbler
|
||||
cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: myhost
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
systems:
|
||||
description: List of systems
|
||||
returned: C(state=query) and C(name) is not provided
|
||||
type: list
|
||||
system:
|
||||
description: (Resulting) information about the system we are working with
|
||||
returned: when C(name) is provided
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
import ssl
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six.moves import xmlrpc_client
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
IFPROPS_MAPPING = dict(
|
||||
bondingopts='bonding_opts',
|
||||
bridgeopts='bridge_opts',
|
||||
connected_mode='connected_mode',
|
||||
cnames='cnames',
|
||||
dhcptag='dhcp_tag',
|
||||
dnsname='dns_name',
|
||||
ifgateway='if_gateway',
|
||||
interfacetype='interface_type',
|
||||
interfacemaster='interface_master',
|
||||
ipaddress='ip_address',
|
||||
ipv6address='ipv6_address',
|
||||
ipv6defaultgateway='ipv6_default_gateway',
|
||||
ipv6mtu='ipv6_mtu',
|
||||
ipv6prefix='ipv6_prefix',
|
||||
ipv6secondaries='ipv6_secondariesu',
|
||||
ipv6staticroutes='ipv6_static_routes',
|
||||
macaddress='mac_address',
|
||||
management='management',
|
||||
mtu='mtu',
|
||||
netmask='netmask',
|
||||
static='static',
|
||||
staticroutes='static_routes',
|
||||
virtbridge='virt_bridge',
|
||||
)
|
||||
|
||||
|
||||
def getsystem(conn, name, token):
|
||||
system = dict()
|
||||
if name:
|
||||
# system = conn.get_system(name, token)
|
||||
systems = conn.find_system(dict(name=name), token)
|
||||
if systems:
|
||||
system = systems[0]
|
||||
return system
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='127.0.0.1'),
|
||||
port=dict(type='int'),
|
||||
username=dict(type='str', default='cobbler'),
|
||||
password=dict(type='str', no_log=True),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
name=dict(type='str'),
|
||||
interfaces=dict(type='dict'),
|
||||
properties=dict(type='dict'),
|
||||
sync=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
port = module.params['port']
|
||||
use_ssl = module.params['use_ssl']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
||||
module.params['proto'] = 'https' if use_ssl else 'http'
|
||||
if not port:
|
||||
module.params['port'] = '443' if use_ssl else '80'
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try: # Python 2.7.9 and newer
|
||||
ssl_context = ssl.create_unverified_context()
|
||||
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
|
||||
ssl._create_default_context = ssl._create_unverified_context
|
||||
else: # Python 2.7.8 and older
|
||||
ssl._create_default_https_context = ssl._create_unverified_https_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
|
||||
else:
|
||||
conn = xmlrpc_client.Server(url)
|
||||
|
||||
try:
|
||||
token = conn.login(username, password)
|
||||
except xmlrpc_client.Fault as e:
|
||||
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
|
||||
system = getsystem(conn, name, token)
|
||||
# result['system'] = system
|
||||
|
||||
if state == 'query':
|
||||
if name:
|
||||
result['system'] = system
|
||||
else:
|
||||
# Turn it into a dictionary of dictionaries
|
||||
# all_systems = conn.get_systems()
|
||||
# result['systems'] = { system['name']: system for system in all_systems }
|
||||
|
||||
# Return a list of dictionaries
|
||||
result['systems'] = conn.get_systems()
|
||||
|
||||
elif state == 'present':
|
||||
|
||||
if system:
|
||||
# Update existing entry
|
||||
system_id = conn.get_system_handle(name, token)
|
||||
|
||||
for key, value in iteritems(module.params['properties']):
|
||||
if key not in system:
|
||||
module.warn("Property '{0}' is not a valid system property.".format(key))
|
||||
if system[key] != value:
|
||||
try:
|
||||
conn.modify_system(system_id, key, value, token)
|
||||
result['changed'] = True
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
|
||||
|
||||
else:
|
||||
# Create a new entry
|
||||
system_id = conn.new_system(token)
|
||||
conn.modify_system(system_id, 'name', name, token)
|
||||
result['changed'] = True
|
||||
|
||||
if module.params['properties']:
|
||||
for key, value in iteritems(module.params['properties']):
|
||||
try:
|
||||
conn.modify_system(system_id, key, value, token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
|
||||
|
||||
# Add interface properties
|
||||
interface_properties = dict()
|
||||
if module.params['interfaces']:
|
||||
for device, values in iteritems(module.params['interfaces']):
|
||||
for key, value in iteritems(values):
|
||||
if key == 'name':
|
||||
continue
|
||||
if key not in IFPROPS_MAPPING:
|
||||
module.warn("Property '{0}' is not a valid system property.".format(key))
|
||||
if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
|
||||
result['changed'] = True
|
||||
interface_properties['{0}-{1}'.format(key, device)] = value
|
||||
|
||||
if result['changed'] is True:
|
||||
conn.modify_system(system_id, "modify_interface", interface_properties, token)
|
||||
|
||||
# Only save when the entry was changed
|
||||
if not module.check_mode and result['changed']:
|
||||
conn.save_system(system_id, token)
|
||||
|
||||
elif state == 'absent':
|
||||
|
||||
if system:
|
||||
if not module.check_mode:
|
||||
conn.remove_system(name, token)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode and module.params['sync'] and result['changed']:
|
||||
try:
|
||||
conn.sync(token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
|
||||
|
||||
if state in ('absent', 'present'):
|
||||
result['system'] = getsystem(conn, name, token)
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=system, after=result['system'])
|
||||
|
||||
elapsed = datetime.datetime.utcnow() - start
|
||||
module.exit_json(elapsed=elapsed.seconds, **result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
211
plugins/modules/remote_management/dellemc/idrac_firmware.py
Normal file
211
plugins/modules/remote_management/dellemc/idrac_firmware.py
Normal file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 2.0
|
||||
# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: idrac_firmware
|
||||
short_description: Firmware update from a repository on a network share (CIFS, NFS).
|
||||
description:
|
||||
- Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
|
||||
available updates.
|
||||
- Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
|
||||
- All applicable updates contained in the repository are applied to the system.
|
||||
- This feature is available only with iDRAC Enterprise License.
|
||||
options:
|
||||
idrac_ip:
|
||||
description: iDRAC IP Address.
|
||||
type: str
|
||||
required: True
|
||||
idrac_user:
|
||||
description: iDRAC username.
|
||||
type: str
|
||||
required: True
|
||||
idrac_password:
|
||||
description: iDRAC user password.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['idrac_pwd']
|
||||
idrac_port:
|
||||
description: iDRAC port.
|
||||
type: int
|
||||
default: 443
|
||||
share_name:
|
||||
description: CIFS or NFS Network share.
|
||||
type: str
|
||||
required: True
|
||||
share_user:
|
||||
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
|
||||
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
|
||||
type: str
|
||||
share_password:
|
||||
description: Network share user password. This option is mandatory for CIFS Network Share.
|
||||
type: str
|
||||
aliases: ['share_pwd']
|
||||
share_mnt:
|
||||
description: Local mount path of the network share with read-write permission for ansible user.
|
||||
This option is mandatory for Network Share.
|
||||
type: str
|
||||
required: True
|
||||
reboot:
|
||||
description: Whether to reboots after applying the updates or not.
|
||||
type: bool
|
||||
default: false
|
||||
job_wait:
|
||||
description: Whether to wait for job completion or not.
|
||||
type: bool
|
||||
default: true
|
||||
catalog_file_name:
|
||||
required: False
|
||||
description: Catalog file name relative to the I(share_name).
|
||||
type: str
|
||||
default: 'Catalog.xml'
|
||||
|
||||
requirements:
|
||||
- "omsdk"
|
||||
- "python >= 2.7.5"
|
||||
author: "Rajeev Arakkal (@rajeevarakkal)"
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
- name: Update firmware from repository on a Network Share
|
||||
idrac_firmware:
|
||||
idrac_ip: "192.168.0.1"
|
||||
idrac_user: "user_name"
|
||||
idrac_password: "user_password"
|
||||
share_name: "192.168.0.0:/share"
|
||||
share_user: "share_user_name"
|
||||
share_password: "share_user_pwd"
|
||||
share_mnt: "/mnt/share"
|
||||
reboot: True
|
||||
job_wait: True
|
||||
catalog_file_name: "Catalog.xml"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
---
|
||||
msg:
|
||||
type: str
|
||||
description: Over all firmware update status.
|
||||
returned: always
|
||||
sample: "Successfully updated the firmware."
|
||||
update_status:
|
||||
type: dict
|
||||
description: Firmware Update job and progress details from the iDRAC.
|
||||
returned: success
|
||||
sample: {
|
||||
'InstanceID': 'JID_XXXXXXXXXXXX',
|
||||
'JobState': 'Completed',
|
||||
'Message': 'Job completed successfully.',
|
||||
'MessageId': 'REDXXX',
|
||||
'Name': 'Repository Update',
|
||||
'JobStartTime': 'NA',
|
||||
'Status': 'Success',
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
try:
|
||||
from omsdk.sdkcreds import UserCredentials
|
||||
from omsdk.sdkfile import FileOnShare
|
||||
HAS_OMSDK = True
|
||||
except ImportError:
|
||||
HAS_OMSDK = False
|
||||
|
||||
|
||||
def _validate_catalog_file(catalog_file_name):
|
||||
normilized_file_name = catalog_file_name.lower()
|
||||
if not normilized_file_name:
|
||||
raise ValueError('catalog_file_name should be a non-empty string.')
|
||||
elif not normilized_file_name.endswith("xml"):
|
||||
raise ValueError('catalog_file_name should be an XML file.')
|
||||
|
||||
|
||||
def update_firmware(idrac, module):
|
||||
"""Update firmware from a network share and return the job details."""
|
||||
msg = {}
|
||||
msg['changed'] = False
|
||||
msg['update_status'] = {}
|
||||
|
||||
try:
|
||||
upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
|
||||
mount_point=module.params['share_mnt'],
|
||||
isFolder=False,
|
||||
creds=UserCredentials(
|
||||
module.params['share_user'],
|
||||
module.params['share_password'])
|
||||
)
|
||||
|
||||
idrac.use_redfish = True
|
||||
if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
|
||||
idrac.use_redfish = False
|
||||
|
||||
apply_update = True
|
||||
msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
|
||||
apply_update,
|
||||
module.params['reboot'],
|
||||
module.params['job_wait'])
|
||||
except RuntimeError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if "Status" in msg['update_status']:
|
||||
if msg['update_status']['Status'] == "Success":
|
||||
if module.params['job_wait']:
|
||||
msg['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
|
||||
return msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
"idrac_ip": {"required": True, "type": 'str'},
|
||||
"idrac_user": {"required": True, "type": 'str'},
|
||||
"idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
|
||||
"idrac_port": {"required": False, "default": 443, "type": 'int'},
|
||||
|
||||
"share_name": {"required": True, "type": 'str'},
|
||||
"share_user": {"required": False, "type": 'str'},
|
||||
"share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
|
||||
"share_mnt": {"required": True, "type": 'str'},
|
||||
|
||||
"catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
|
||||
"reboot": {"required": False, "type": 'bool', "default": False},
|
||||
"job_wait": {"required": False, "type": 'bool', "default": True},
|
||||
},
|
||||
|
||||
supports_check_mode=False)
|
||||
|
||||
try:
|
||||
# Validate the catalog file
|
||||
_validate_catalog_file(module.params['catalog_file_name'])
|
||||
# Connect to iDRAC and update firmware
|
||||
with iDRACConnection(module.params) as idrac:
|
||||
update_status = update_firmware(idrac, module)
|
||||
except (ImportError, ValueError, RuntimeError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 2.0
|
||||
# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: idrac_server_config_profile
|
||||
short_description: Export or Import iDRAC Server Configuration Profile (SCP).
|
||||
description:
|
||||
- Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file.
|
||||
options:
|
||||
idrac_ip:
|
||||
description: iDRAC IP Address.
|
||||
type: str
|
||||
required: True
|
||||
idrac_user:
|
||||
description: iDRAC username.
|
||||
type: str
|
||||
required: True
|
||||
idrac_password:
|
||||
description: iDRAC user password.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['idrac_pwd']
|
||||
idrac_port:
|
||||
description: iDRAC port.
|
||||
type: int
|
||||
default: 443
|
||||
command:
|
||||
description:
|
||||
- If C(import), will perform SCP import operations.
|
||||
- If C(export), will perform SCP export operations.
|
||||
choices: ['import', 'export']
|
||||
default: 'export'
|
||||
job_wait:
|
||||
description: Whether to wait for job completion or not.
|
||||
type: bool
|
||||
required: True
|
||||
share_name:
|
||||
description: CIFS or NFS Network Share or a local path.
|
||||
type: str
|
||||
required: True
|
||||
share_user:
|
||||
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
|
||||
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
|
||||
type: str
|
||||
share_password:
|
||||
description: Network share user password. This option is mandatory for CIFS Network Share.
|
||||
type: str
|
||||
aliases: ['share_pwd']
|
||||
scp_file:
|
||||
description: Server Configuration Profile file name. This option is mandatory for C(import) command.
|
||||
type: str
|
||||
scp_components:
|
||||
description:
|
||||
- If C(ALL), this module will import all components configurations from SCP file.
|
||||
- If C(IDRAC), this module will import iDRAC configuration from SCP file.
|
||||
- If C(BIOS), this module will import BIOS configuration from SCP file.
|
||||
- If C(NIC), this module will import NIC configuration from SCP file.
|
||||
- If C(RAID), this module will import RAID configuration from SCP file.
|
||||
choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
|
||||
default: 'ALL'
|
||||
shutdown_type:
|
||||
description:
|
||||
- This option is applicable for C(import) command.
|
||||
- If C(Graceful), it gracefully shuts down the server.
|
||||
- If C(Forced), it forcefully shuts down the server.
|
||||
- If C(NoReboot), it does not reboot the server.
|
||||
choices: ['Graceful', 'Forced', 'NoReboot']
|
||||
default: 'Graceful'
|
||||
end_host_power_state:
|
||||
description:
|
||||
- This option is applicable for C(import) command.
|
||||
- If C(On), End host power state is on.
|
||||
- If C(Off), End host power state is off.
|
||||
choices: ['On' ,'Off']
|
||||
default: 'On'
|
||||
export_format:
|
||||
description: Specify the output file format. This option is applicable for C(export) command.
|
||||
choices: ['JSON', 'XML']
|
||||
default: 'XML'
|
||||
export_use:
|
||||
description: Specify the type of server configuration profile (SCP) to be exported.
|
||||
This option is applicable for C(export) command.
|
||||
choices: ['Default', 'Clone', 'Replace']
|
||||
default: 'Default'
|
||||
|
||||
requirements:
|
||||
- "omsdk"
|
||||
- "python >= 2.7.5"
|
||||
author: "Jagadeesh N V(@jagadeeshnv)"
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
---
|
||||
- name: Import Server Configuration Profile from a network share
|
||||
idrac_server_config_profile:
|
||||
idrac_ip: "192.168.0.1"
|
||||
idrac_user: "user_name"
|
||||
idrac_password: "user_password"
|
||||
command: "import"
|
||||
share_name: "192.168.0.2:/share"
|
||||
share_user: "share_user_name"
|
||||
share_password: "share_user_password"
|
||||
scp_file: "scp_filename.xml"
|
||||
scp_components: "ALL"
|
||||
job_wait: True
|
||||
|
||||
- name: Import Server Configuration Profile from a local path
|
||||
idrac_server_config_profile:
|
||||
idrac_ip: "192.168.0.1"
|
||||
idrac_user: "user_name"
|
||||
idrac_password: "user_password"
|
||||
command: "import"
|
||||
share_name: "/scp_folder"
|
||||
share_user: "share_user_name"
|
||||
share_password: "share_user_password"
|
||||
scp_file: "scp_filename.xml"
|
||||
scp_components: "ALL"
|
||||
job_wait: True
|
||||
|
||||
- name: Export Server Configuration Profile to a network share
|
||||
idrac_server_config_profile:
|
||||
idrac_ip: "192.168.0.1"
|
||||
idrac_user: "user_name"
|
||||
idrac_password: "user_password"
|
||||
share_name: "192.168.0.2:/share"
|
||||
share_user: "share_user_name"
|
||||
share_password: "share_user_password"
|
||||
job_wait: False
|
||||
|
||||
- name: Export Server Configuration Profile to a local path
|
||||
idrac_server_config_profile:
|
||||
idrac_ip: "192.168.0.1"
|
||||
idrac_user: "user_name"
|
||||
idrac_password: "user_password"
|
||||
share_name: "/scp_folder"
|
||||
share_user: "share_user_name"
|
||||
share_password: "share_user_password"
|
||||
job_wait: False
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
---
|
||||
msg:
|
||||
type: str
|
||||
description: Status of the import or export SCP job.
|
||||
returned: always
|
||||
sample: "Successfully imported the Server Configuration Profile"
|
||||
scp_status:
|
||||
type: dict
|
||||
description: SCP operation job and progress details from the iDRAC.
|
||||
returned: success
|
||||
sample:
|
||||
{
|
||||
"Id": "JID_XXXXXXXXX",
|
||||
"JobState": "Completed",
|
||||
"JobType": "ImportConfiguration",
|
||||
"Message": "Successfully imported and applied Server Configuration Profile.",
|
||||
"MessageArgs": [],
|
||||
"MessageId": "XXX123",
|
||||
"Name": "Import Configuration",
|
||||
"PercentComplete": 100,
|
||||
"StartTime": "TIME_NOW",
|
||||
"Status": "Success",
|
||||
"TargetSettingsURI": null,
|
||||
"retval": true
|
||||
}
|
||||
'''
|
||||
|
||||
import os
|
||||
from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
try:
|
||||
from omsdk.sdkfile import file_share_manager
|
||||
from omsdk.sdkcreds import UserCredentials
|
||||
from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum,
|
||||
ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def run_import_server_config_profile(idrac, module):
|
||||
"""Import Server Configuration Profile from a network share."""
|
||||
target = SCPTargetEnum[module.params['scp_components']]
|
||||
job_wait = module.params['job_wait']
|
||||
end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']]
|
||||
shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']]
|
||||
idrac.use_redfish = True
|
||||
|
||||
try:
|
||||
myshare = file_share_manager.create_share_obj(
|
||||
share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']),
|
||||
creds=UserCredentials(module.params['share_user'],
|
||||
module.params['share_password']), isFolder=False)
|
||||
import_status = idrac.config_mgr.scp_import(myshare,
|
||||
target=target, shutdown_type=shutdown_type,
|
||||
end_host_power_state=end_host_power_state,
|
||||
job_wait=job_wait)
|
||||
if not import_status or import_status.get('Status') != "Success":
|
||||
module.fail_json(msg='Failed to import scp.', scp_status=import_status)
|
||||
except RuntimeError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return import_status
|
||||
|
||||
|
||||
def run_export_server_config_profile(idrac, module):
|
||||
"""Export Server Configuration Profile to a network share."""
|
||||
export_format = ExportFormatEnum[module.params['export_format']]
|
||||
scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower())
|
||||
target = SCPTargetEnum[module.params['scp_components']]
|
||||
export_use = ExportUseEnum[module.params['export_use']]
|
||||
idrac.use_redfish = True
|
||||
|
||||
try:
|
||||
myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
|
||||
creds=UserCredentials(module.params['share_user'],
|
||||
module.params['share_password']),
|
||||
isFolder=True)
|
||||
scp_file_name = myshare.new_file(scp_file_name_format)
|
||||
export_status = idrac.config_mgr.scp_export(scp_file_name,
|
||||
target=target,
|
||||
export_format=export_format,
|
||||
export_use=export_use,
|
||||
job_wait=module.params['job_wait'])
|
||||
if not export_status or export_status.get('Status') != "Success":
|
||||
module.fail_json(msg='Failed to export scp.', scp_status=export_status)
|
||||
except RuntimeError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return export_status
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
"idrac_ip": {"required": True, "type": 'str'},
|
||||
"idrac_user": {"required": True, "type": 'str'},
|
||||
"idrac_password": {"required": True, "type": 'str',
|
||||
"aliases": ['idrac_pwd'], "no_log": True},
|
||||
"idrac_port": {"required": False, "default": 443, "type": 'int'},
|
||||
|
||||
"command": {"required": False, "type": 'str',
|
||||
"choices": ['export', 'import'], "default": 'export'},
|
||||
"job_wait": {"required": True, "type": 'bool'},
|
||||
|
||||
"share_name": {"required": True, "type": 'str'},
|
||||
"share_user": {"required": False, "type": 'str'},
|
||||
"share_password": {"required": False, "type": 'str',
|
||||
"aliases": ['share_pwd'], "no_log": True},
|
||||
"scp_components": {"required": False,
|
||||
"choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
|
||||
"default": 'ALL'},
|
||||
|
||||
"scp_file": {"required": False, "type": 'str'},
|
||||
"shutdown_type": {"required": False,
|
||||
"choices": ['Graceful', 'Forced', 'NoReboot'],
|
||||
"default": 'Graceful'},
|
||||
"end_host_power_state": {"required": False,
|
||||
"choices": ['On', 'Off'],
|
||||
"default": 'On'},
|
||||
|
||||
"export_format": {"required": False, "type": 'str',
|
||||
"choices": ['JSON', 'XML'], "default": 'XML'},
|
||||
"export_use": {"required": False, "type": 'str',
|
||||
"choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
|
||||
},
|
||||
required_if=[
|
||||
["command", "import", ["scp_file"]]
|
||||
],
|
||||
supports_check_mode=False)
|
||||
|
||||
try:
|
||||
changed = False
|
||||
with iDRACConnection(module.params) as idrac:
|
||||
command = module.params['command']
|
||||
if command == 'import':
|
||||
scp_status = run_import_server_config_profile(idrac, module)
|
||||
if "No changes were applied" not in scp_status.get('Message', ""):
|
||||
changed = True
|
||||
else:
|
||||
scp_status = run_export_server_config_profile(idrac, module)
|
||||
module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command),
|
||||
scp_status=scp_status)
|
||||
except (ImportError, ValueError, RuntimeError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
417
plugins/modules/remote_management/dellemc/ome_device_info.py
Normal file
417
plugins/modules/remote_management/dellemc/ome_device_info.py
Normal file
@@ -0,0 +1,417 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 1.2
|
||||
# Copyright (C) 2019 Dell Inc.
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
|
||||
# Other trademarks may be trademarks of their respective owners.
|
||||
#
|
||||
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: ome_device_info
|
||||
short_description: Retrieves the information about Device.
|
||||
description:
|
||||
- This module retrieves the list of all devices information with the exhaustive inventory of each
|
||||
device.
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- Target IP Address or hostname.
|
||||
type: str
|
||||
required: True
|
||||
username:
|
||||
description:
|
||||
- Target username.
|
||||
type: str
|
||||
required: True
|
||||
password:
|
||||
description:
|
||||
- Target user password.
|
||||
type: str
|
||||
required: True
|
||||
port:
|
||||
description:
|
||||
- Target HTTPS port.
|
||||
type: int
|
||||
default: 443
|
||||
fact_subset:
|
||||
description:
|
||||
- C(basic_inventory) returns the list of the devices.
|
||||
- C(detailed_inventory) returns the inventory details of specified devices.
|
||||
- C(subsystem_health) returns the health status of specified devices.
|
||||
type: str
|
||||
choices: [basic_inventory, detailed_inventory, subsystem_health ]
|
||||
default: basic_inventory
|
||||
system_query_options:
|
||||
description:
|
||||
- I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
|
||||
is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
|
||||
type: dict
|
||||
suboptions:
|
||||
device_id:
|
||||
description:
|
||||
- A list of unique identifier is applicable
|
||||
for C(detailed_inventory) and C(subsystem_health).
|
||||
type: list
|
||||
device_service_tag:
|
||||
description:
|
||||
- A list of service tags are applicable for C(detailed_inventory)
|
||||
and C(subsystem_health).
|
||||
type: list
|
||||
inventory_type:
|
||||
description:
|
||||
- For C(detailed_inventory), it returns details of the specified inventory type.
|
||||
type: str
|
||||
filter:
|
||||
description:
|
||||
- For C(basic_inventory), it filters the collection of devices.
|
||||
I(filter) query format should be aligned with OData standards.
|
||||
type: str
|
||||
|
||||
requirements:
|
||||
- "python >= 2.7.5"
|
||||
author: "Sajna Shetty(@Sajna-Shetty)"
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
- name: Retrieve basic inventory of all devices.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
|
||||
- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
fact_subset: "basic_inventory"
|
||||
system_query_options:
|
||||
filter: "Id eq 33333 or Id eq 11111"
|
||||
|
||||
- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
fact_subset: "detailed_inventory"
|
||||
system_query_options:
|
||||
device_id:
|
||||
- 11111
|
||||
- 22222
|
||||
|
||||
- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
fact_subset: "detailed_inventory"
|
||||
system_query_options:
|
||||
device_service_tag:
|
||||
- MXL1234
|
||||
- MXL4567
|
||||
|
||||
- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
fact_subset: "detailed_inventory"
|
||||
system_query_options:
|
||||
device_id:
|
||||
- 11111
|
||||
device_service_tag:
|
||||
- MXL1234
|
||||
- MXL4567
|
||||
inventory_type: "serverDeviceCards"
|
||||
|
||||
- name: Retrieve subsystem health of specified devices identified by service tags.
|
||||
ome_device_info:
|
||||
hostname: "192.168.0.1"
|
||||
username: "username"
|
||||
password: "password"
|
||||
fact_subset: "subsystem_health"
|
||||
system_query_options:
|
||||
device_service_tag:
|
||||
- MXL1234
|
||||
- MXL4567
|
||||
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
msg:
|
||||
type: str
|
||||
description: Over all device information status.
|
||||
returned: on error
|
||||
sample: "Failed to fetch the device information"
|
||||
device_info:
|
||||
type: dict
|
||||
description: Returns the information collected from the Device.
|
||||
returned: success
|
||||
sample: {
|
||||
"value": [
|
||||
{
|
||||
"Actions": null,
|
||||
"AssetTag": null,
|
||||
"ChassisServiceTag": null,
|
||||
"ConnectionState": true,
|
||||
"DeviceManagement": [
|
||||
{
|
||||
"DnsName": "dnsname.host.com",
|
||||
"InstrumentationName": "MX-12345",
|
||||
"MacAddress": "11:10:11:10:11:10",
|
||||
"ManagementId": 12345,
|
||||
"ManagementProfile": [
|
||||
{
|
||||
"HasCreds": 0,
|
||||
"ManagementId": 12345,
|
||||
"ManagementProfileId": 12345,
|
||||
"ManagementURL": "https://192.168.0.1:443",
|
||||
"Status": 1000,
|
||||
"StatusDateTime": "2019-01-21 06:30:08.501"
|
||||
}
|
||||
],
|
||||
"ManagementType": 2,
|
||||
"NetworkAddress": "192.168.0.1"
|
||||
}
|
||||
],
|
||||
"DeviceName": "MX-0003I",
|
||||
"DeviceServiceTag": "MXL1234",
|
||||
"DeviceSubscription": null,
|
||||
"LastInventoryTime": "2019-01-21 06:30:08.501",
|
||||
"LastStatusTime": "2019-01-21 06:30:02.492",
|
||||
"ManagedState": 3000,
|
||||
"Model": "PowerEdge MX7000",
|
||||
"PowerState": 17,
|
||||
"SlotConfiguration": {},
|
||||
"Status": 4000,
|
||||
"SystemId": 2031,
|
||||
"Type": 2000
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
|
||||
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
|
||||
from ansible.module_utils.urls import ConnectionError, SSLValidationError
|
||||
|
||||
DEVICES_INVENTORY_DETAILS = "detailed_inventory"
|
||||
DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
|
||||
DEVICES_INVENTORY_TYPE = "inventory_type"
|
||||
DEVICE_LIST = "basic_inventory"
|
||||
DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
|
||||
device_fact_error_report = {}
|
||||
|
||||
DEVICE_RESOURCE_COLLECTION = {
|
||||
DEVICE_LIST: {"resource": "DeviceService/Devices"},
|
||||
DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
|
||||
DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
|
||||
DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
|
||||
}
|
||||
|
||||
|
||||
def _get_device_id_from_service_tags(service_tags, rest_obj):
|
||||
"""
|
||||
Get device ids from device service tag
|
||||
Returns :dict : device_id to service_tag map
|
||||
:arg service_tags: service tag
|
||||
:arg rest_obj: RestOME class object in case of request with session.
|
||||
:returns: dict eg: {1345:"MXL1245"}
|
||||
"""
|
||||
try:
|
||||
path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]
|
||||
resp = rest_obj.invoke_request('GET', path)
|
||||
if resp.success:
|
||||
devices_list = resp.json_data["value"]
|
||||
service_tag_dict = {}
|
||||
for item in devices_list:
|
||||
if item["DeviceServiceTag"] in service_tags:
|
||||
service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
|
||||
available_service_tags = service_tag_dict.values()
|
||||
not_available_service_tag = list(set(service_tags) - set(available_service_tags))
|
||||
device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag))
|
||||
else:
|
||||
raise ValueError(resp.json_data)
|
||||
except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
|
||||
raise err
|
||||
return service_tag_dict
|
||||
|
||||
|
||||
def is_int(val):
|
||||
"""check when device_id numeric represented value is int"""
|
||||
try:
|
||||
int(val)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def _check_duplicate_device_id(device_id_list, service_tag_dict):
|
||||
"""If service_tag is duplicate of device_id, then updates the message as Duplicate report
|
||||
:arg1: device_id_list : list of device_id
|
||||
:arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
|
||||
if device_id_list:
|
||||
device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
|
||||
common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
|
||||
for device_id in common_val:
|
||||
device_fact_error_report.update(
|
||||
{service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
|
||||
del service_tag_dict[device_id]
|
||||
|
||||
|
||||
def _get_device_identifier_map(module_params, rest_obj):
|
||||
"""
|
||||
Builds the identifiers mapping
|
||||
:returns: the dict of device_id to server_tag map
|
||||
eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
|
||||
system_query_options_param = module_params.get("system_query_options")
|
||||
device_id_service_tag_dict = {}
|
||||
if system_query_options_param is not None:
|
||||
device_id_list = system_query_options_param.get("device_id")
|
||||
device_service_tag_list = system_query_options_param.get("device_service_tag")
|
||||
if device_id_list:
|
||||
device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
|
||||
device_id_service_tag_dict["device_id"] = device_id_dict
|
||||
if device_service_tag_list:
|
||||
service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
|
||||
rest_obj)
|
||||
|
||||
_check_duplicate_device_id(device_id_list, service_tag_dict)
|
||||
device_id_service_tag_dict["device_service_tag"] = service_tag_dict
|
||||
return device_id_service_tag_dict
|
||||
|
||||
|
||||
def _get_query_parameters(module_params):
|
||||
"""
|
||||
Builds query parameter
|
||||
:returns: dictionary, which is applicable builds the query format
|
||||
eg : {"$filter":"Type eq 2000"}
|
||||
"""
|
||||
system_query_options_param = module_params.get("system_query_options")
|
||||
query_parameter = None
|
||||
if system_query_options_param:
|
||||
filter_by_val = system_query_options_param.get("filter")
|
||||
if filter_by_val:
|
||||
query_parameter = {"$filter": filter_by_val}
|
||||
return query_parameter
|
||||
|
||||
|
||||
def _get_resource_parameters(module_params, rest_obj):
|
||||
"""
|
||||
Identifies the resource path by different states
|
||||
:returns: dictionary containing identifier with respective resource path
|
||||
eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
|
||||
"device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
|
||||
"""
|
||||
fact_subset = module_params["fact_subset"]
|
||||
path_dict = {}
|
||||
if fact_subset != DEVICE_LIST:
|
||||
inventory_type = None
|
||||
device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
|
||||
if fact_subset == DEVICES_INVENTORY_DETAILS:
|
||||
system_query_options = module_params.get("system_query_options")
|
||||
inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
|
||||
path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
|
||||
for identifier_type, identifier_dict in device_id_service_tag_dict.items():
|
||||
path_dict[identifier_type] = {}
|
||||
for device_id, service_tag in identifier_dict.items():
|
||||
key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
|
||||
path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
|
||||
InventoryType=inventory_type)
|
||||
path_dict[identifier_type].update({key_identifier: path})
|
||||
else:
|
||||
path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
|
||||
return path_dict
|
||||
|
||||
|
||||
def _check_mutually_inclusive_arguments(val, module_params, required_args):
|
||||
""""
|
||||
Throws error if arguments detailed_inventory, subsystem_health
|
||||
not exists with qualifier device_id or device_service_tag"""
|
||||
system_query_options_param = module_params.get("system_query_options")
|
||||
if system_query_options_param is None or (system_query_options_param is not None and not any(
|
||||
system_query_options_param.get(qualifier) for qualifier in required_args)):
|
||||
raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
|
||||
|
||||
|
||||
def _validate_inputs(module_params):
|
||||
"""validates input parameters"""
|
||||
fact_subset = module_params["fact_subset"]
|
||||
if fact_subset != "basic_inventory":
|
||||
_check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
|
||||
|
||||
|
||||
def main():
|
||||
system_query_options = {"type": 'dict', "required": False, "options": {
|
||||
"device_id": {"type": 'list'},
|
||||
"device_service_tag": {"type": 'list'},
|
||||
"inventory_type": {"type": 'str'},
|
||||
"filter": {"type": 'str', "required": False},
|
||||
}}
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
"hostname": {"required": True, "type": 'str'},
|
||||
"username": {"required": True, "type": 'str'},
|
||||
"password": {"required": True, "type": 'str', "no_log": True},
|
||||
"port": {"required": False, "default": 443, "type": 'int'},
|
||||
"fact_subset": {"required": False, "default": "basic_inventory",
|
||||
"choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
|
||||
"system_query_options": system_query_options,
|
||||
},
|
||||
required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
|
||||
['fact_subset', 'subsystem_health', ['system_query_options']], ],
|
||||
supports_check_mode=False)
|
||||
|
||||
try:
|
||||
_validate_inputs(module.params)
|
||||
with RestOME(module.params, req_session=True) as rest_obj:
|
||||
device_facts = _get_resource_parameters(module.params, rest_obj)
|
||||
resp_status = []
|
||||
if device_facts.get("basic_inventory"):
|
||||
query_param = _get_query_parameters(module.params)
|
||||
resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
|
||||
device_facts = resp.json_data
|
||||
resp_status.append(resp.status_code)
|
||||
else:
|
||||
for identifier_type, path_dict_map in device_facts.items():
|
||||
for identifier, path in path_dict_map.items():
|
||||
try:
|
||||
resp = rest_obj.invoke_request('GET', path)
|
||||
data = resp.json_data
|
||||
resp_status.append(resp.status_code)
|
||||
except HTTPError as err:
|
||||
data = str(err)
|
||||
path_dict_map[identifier] = data
|
||||
if any(device_fact_error_report):
|
||||
if "device_service_tag" in device_facts:
|
||||
device_facts["device_service_tag"].update(device_fact_error_report)
|
||||
else:
|
||||
device_facts["device_service_tag"] = device_fact_error_report
|
||||
if 200 in resp_status:
|
||||
module.exit_json(device_info=device_facts)
|
||||
else:
|
||||
module.fail_json(msg="Failed to fetch the device information")
|
||||
except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
|
||||
module.fail_json(msg=str(err))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
161
plugins/modules/remote_management/foreman/foreman.py
Normal file
161
plugins/modules/remote_management/foreman/foreman.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: foreman
|
||||
short_description: Manage Foreman Resources
|
||||
deprecated:
|
||||
removed_in: "2.12"
|
||||
why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
|
||||
alternative: https://github.com/theforeman/foreman-ansible-modules
|
||||
description:
|
||||
- Allows the management of Foreman resources inside your Foreman server.
|
||||
author:
|
||||
- Eric D Helms (@ehelms)
|
||||
requirements:
|
||||
- nailgun >= 0.28.0
|
||||
- python >= 2.6
|
||||
- datetime
|
||||
options:
|
||||
server_url:
|
||||
description:
|
||||
- URL of Foreman server.
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- Username on Foreman server.
|
||||
required: true
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether to verify an SSL connection to Foreman server.
|
||||
type: bool
|
||||
default: False
|
||||
password:
|
||||
description:
|
||||
- Password for user accessing Foreman server.
|
||||
required: true
|
||||
entity:
|
||||
description:
|
||||
- The Foreman resource that the action will be performed on (e.g. organization, host).
|
||||
required: true
|
||||
params:
|
||||
description:
|
||||
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description).
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create CI Organization
|
||||
foreman:
|
||||
username: admin
|
||||
password: admin
|
||||
server_url: https://fakeserver.com
|
||||
entity: organization
|
||||
params:
|
||||
name: My Cool New Organization
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''# '''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from nailgun import entities
|
||||
from nailgun.config import ServerConfig
|
||||
HAS_NAILGUN_PACKAGE = True
|
||||
except Exception:
|
||||
HAS_NAILGUN_PACKAGE = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class NailGun(object):
|
||||
def __init__(self, server, entities, module):
|
||||
self._server = server
|
||||
self._entities = entities
|
||||
self._module = module
|
||||
|
||||
def find_organization(self, name, **params):
|
||||
org = self._entities.Organization(self._server, name=name, **params)
|
||||
response = org.search(set(), {'search': 'name={0}'.format(name)})
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
|
||||
return None
|
||||
|
||||
def organization(self, params):
|
||||
name = params['name']
|
||||
del params['name']
|
||||
org = self.find_organization(name, **params)
|
||||
|
||||
if org:
|
||||
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
|
||||
org.update()
|
||||
else:
|
||||
org = self._entities.Organization(self._server, name=name, **params)
|
||||
org.create()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
server_url=dict(type='str', required=True),
|
||||
username=dict(type='str', required=True, no_log=True),
|
||||
password=dict(type='str', required=True, no_log=True),
|
||||
entity=dict(type='str', required=True),
|
||||
verify_ssl=dict(type='bool', default=False),
|
||||
params=dict(type='dict', required=True, no_log=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_NAILGUN_PACKAGE:
|
||||
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
|
||||
|
||||
server_url = module.params['server_url']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
entity = module.params['entity']
|
||||
params = module.params['params']
|
||||
verify_ssl = module.params['verify_ssl']
|
||||
|
||||
server = ServerConfig(
|
||||
url=server_url,
|
||||
auth=(username, password),
|
||||
verify=verify_ssl
|
||||
)
|
||||
ng = NailGun(server, entities, module)
|
||||
|
||||
# Lets make an connection to the server with username and password
|
||||
try:
|
||||
org = entities.Organization(server)
|
||||
org.search()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
if entity == 'organization':
|
||||
ng.organization(params)
|
||||
module.exit_json(changed=True, result="%s updated" % entity)
|
||||
else:
|
||||
module.fail_json(changed=False, result="Unsupported entity supplied")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
619
plugins/modules/remote_management/foreman/katello.py
Normal file
619
plugins/modules/remote_management/foreman/katello.py
Normal file
@@ -0,0 +1,619 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: katello
|
||||
short_description: Manage Katello Resources
|
||||
deprecated:
|
||||
removed_in: "2.12"
|
||||
why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
|
||||
alternative: https://github.com/theforeman/foreman-ansible-modules
|
||||
description:
|
||||
- Allows the management of Katello resources inside your Foreman server.
|
||||
author:
|
||||
- Eric D Helms (@ehelms)
|
||||
requirements:
|
||||
- nailgun >= 0.28.0
|
||||
- python >= 2.6
|
||||
- datetime
|
||||
options:
|
||||
server_url:
|
||||
description:
|
||||
- URL of Foreman server.
|
||||
required: true
|
||||
username:
|
||||
description:
|
||||
- Username on Foreman server.
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password for user accessing Foreman server.
|
||||
required: true
|
||||
entity:
|
||||
description:
|
||||
- The Foreman resource that the action will be performed on (e.g. organization, host).
|
||||
choices:
|
||||
|
||||
- repository
|
||||
- manifest
|
||||
- repository_set
|
||||
- sync_plan
|
||||
- content_view
|
||||
- lifecycle_environment
|
||||
- activation_key
|
||||
- product
|
||||
|
||||
required: true
|
||||
action:
|
||||
description:
|
||||
- action associated to the entity resource to set or edit in dictionary format.
|
||||
- Possible Action in relation to Entitys.
|
||||
- "sync (available when entity=product or entity=repository)"
|
||||
- "publish (available when entity=content_view)"
|
||||
- "promote (available when entity=content_view)"
|
||||
choices:
|
||||
- sync
|
||||
- publish
|
||||
- promote
|
||||
required: false
|
||||
params:
|
||||
description:
|
||||
- Parameters associated to the entity resource and action, to set or edit in dictionary format.
|
||||
- Each choice may be only available with specific entitys and actions.
|
||||
- "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
|
||||
- The action "None" means no action specified.
|
||||
- Possible Params in relation to entity and action.
|
||||
- "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
|
||||
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
|
||||
- "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
|
||||
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
|
||||
- "content ([manifest,None])"
|
||||
- "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
|
||||
- "basearch ([repository_set,None])"
|
||||
- "releaserver ([repository_set,None])"
|
||||
- "sync_date ([sync_plan,None])"
|
||||
- "interval ([sync_plan,None])"
|
||||
- "repositories ([content_view,None])"
|
||||
- "from_environment ([content_view,promote])"
|
||||
- "to_environment([content_view,promote])"
|
||||
- "prior ([lifecycle_environment,None])"
|
||||
- "content_view ([activation_key,None])"
|
||||
- "lifecycle_environment ([activation_key,None])"
|
||||
required: true
|
||||
task_timeout:
|
||||
description:
|
||||
- The timeout in seconds to wait for the started Foreman action to finish.
|
||||
- If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
|
||||
default: 1000
|
||||
required: false
|
||||
verify_ssl:
|
||||
description:
|
||||
- verify the ssl/https connection (e.g for a valid certificate)
|
||||
default: false
|
||||
type: bool
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Simple Example:
|
||||
|
||||
- name: Create Product
|
||||
katello:
|
||||
username: admin
|
||||
password: admin
|
||||
server_url: https://fakeserver.com
|
||||
entity: product
|
||||
params:
|
||||
name: Centos 7
|
||||
delegate_to: localhost
|
||||
|
||||
# Abstraction Example:
|
||||
# katello.yml
|
||||
---
|
||||
- name: "{{ name }}"
|
||||
katello:
|
||||
username: admin
|
||||
password: admin
|
||||
server_url: https://fakeserver.com
|
||||
entity: "{{ entity }}"
|
||||
params: "{{ params }}"
|
||||
delegate_to: localhost
|
||||
|
||||
# tasks.yml
|
||||
---
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Create Dev Environment
|
||||
entity: lifecycle_environment
|
||||
params:
|
||||
name: Dev
|
||||
prior: Library
|
||||
organization: Default Organization
|
||||
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Create Centos Product
|
||||
entity: product
|
||||
params:
|
||||
name: Centos 7
|
||||
organization: Default Organization
|
||||
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Create 7.2 Repository
|
||||
entity: repository
|
||||
params:
|
||||
name: Centos 7.2
|
||||
product: Centos 7
|
||||
organization: Default Organization
|
||||
content_type: yum
|
||||
url: http://mirror.centos.org/centos/7/os/x86_64/
|
||||
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Create Centos 7 View
|
||||
entity: content_view
|
||||
params:
|
||||
name: Centos 7 View
|
||||
organization: Default Organization
|
||||
repositories:
|
||||
- name: Centos 7.2
|
||||
product: Centos 7
|
||||
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Enable RHEL Product
|
||||
entity: repository_set
|
||||
params:
|
||||
name: Red Hat Enterprise Linux 7 Server (RPMs)
|
||||
product: Red Hat Enterprise Linux Server
|
||||
organization: Default Organization
|
||||
basearch: x86_64
|
||||
releasever: 7
|
||||
|
||||
- include: katello.yml
|
||||
vars:
|
||||
name: Promote Contentview Environment with longer timeout
|
||||
task_timeout: 10800
|
||||
entity: content_view
|
||||
action: promote
|
||||
params:
|
||||
name: MyContentView
|
||||
organization: MyOrganisation
|
||||
from_environment: Testing
|
||||
to_environment: Production
|
||||
|
||||
# Best Practices
|
||||
|
||||
# In Foreman, things can be done in parallel.
|
||||
# When a conflicting action is already running,
|
||||
# the task will fail instantly instead of waiting for the already running action to complete.
|
||||
# So you should use a "until success" loop to catch this.
|
||||
|
||||
- name: Promote Contentview Environment with increased Timeout
|
||||
katello:
|
||||
username: ansibleuser
|
||||
password: supersecret
|
||||
task_timeout: 10800
|
||||
entity: content_view
|
||||
action: promote
|
||||
params:
|
||||
name: MyContentView
|
||||
organization: MyOrganisation
|
||||
from_environment: Testing
|
||||
to_environment: Production
|
||||
register: task_result
|
||||
until: task_result is success
|
||||
retries: 9
|
||||
delay: 120
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''# '''
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from nailgun import entities, entity_fields, entity_mixins
|
||||
from nailgun.config import ServerConfig
|
||||
HAS_NAILGUN_PACKAGE = True
|
||||
except Exception:
|
||||
HAS_NAILGUN_PACKAGE = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class NailGun(object):
|
||||
def __init__(self, server, entities, module, task_timeout):
|
||||
self._server = server
|
||||
self._entities = entities
|
||||
self._module = module
|
||||
entity_mixins.TASK_TIMEOUT = task_timeout
|
||||
|
||||
def find_organization(self, name, **params):
|
||||
org = self._entities.Organization(self._server, name=name, **params)
|
||||
response = org.search(set(), {'search': 'name={0}'.format(name)})
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No organization found for %s" % name)
|
||||
|
||||
def find_lifecycle_environment(self, name, organization):
|
||||
org = self.find_organization(organization)
|
||||
|
||||
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
|
||||
response = lifecycle_env.search()
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
|
||||
|
||||
def find_product(self, name, organization):
|
||||
org = self.find_organization(organization)
|
||||
|
||||
product = self._entities.Product(self._server, name=name, organization=org)
|
||||
response = product.search()
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No Product found for %s" % name)
|
||||
|
||||
def find_repository(self, name, product, organization):
|
||||
product = self.find_product(product, organization)
|
||||
|
||||
repository = self._entities.Repository(self._server, name=name, product=product)
|
||||
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
|
||||
repository.organization = product.organization
|
||||
response = repository.search()
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No Repository found for %s" % name)
|
||||
|
||||
def find_content_view(self, name, organization):
|
||||
org = self.find_organization(organization)
|
||||
|
||||
content_view = self._entities.ContentView(self._server, name=name, organization=org)
|
||||
response = content_view.search()
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No Content View found for %s" % name)
|
||||
|
||||
def organization(self, params):
|
||||
name = params['name']
|
||||
del params['name']
|
||||
org = self.find_organization(name, **params)
|
||||
|
||||
if org:
|
||||
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
|
||||
org.update()
|
||||
else:
|
||||
org = self._entities.Organization(self._server, name=name, **params)
|
||||
org.create()
|
||||
|
||||
return True
|
||||
|
||||
def manifest(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
params['organization'] = org.id
|
||||
|
||||
try:
|
||||
file = open(os.getcwd() + params['content'], 'r')
|
||||
content = file.read()
|
||||
finally:
|
||||
file.close()
|
||||
|
||||
manifest = self._entities.Subscription(self._server)
|
||||
|
||||
try:
|
||||
manifest.upload(
|
||||
data={'organization_id': org.id},
|
||||
files={'content': content}
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
|
||||
if "Import is the same as existing data" in e.message:
|
||||
return False
|
||||
else:
|
||||
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def product(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
params['organization'] = org.id
|
||||
|
||||
product = self._entities.Product(self._server, **params)
|
||||
response = product.search()
|
||||
|
||||
if len(response) == 1:
|
||||
product.id = response[0].id
|
||||
product.update()
|
||||
else:
|
||||
product.create()
|
||||
|
||||
return True
|
||||
|
||||
def sync_product(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
product = self.find_product(params['name'], org.name)
|
||||
|
||||
return product.sync()
|
||||
|
||||
def repository(self, params):
|
||||
product = self.find_product(params['product'], params['organization'])
|
||||
params['product'] = product.id
|
||||
del params['organization']
|
||||
|
||||
repository = self._entities.Repository(self._server, **params)
|
||||
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
|
||||
repository.organization = product.organization
|
||||
response = repository.search()
|
||||
|
||||
if len(response) == 1:
|
||||
repository.id = response[0].id
|
||||
repository.update()
|
||||
else:
|
||||
repository.create()
|
||||
|
||||
return True
|
||||
|
||||
def sync_repository(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
repository = self.find_repository(params['name'], params['product'], org.name)
|
||||
|
||||
return repository.sync()
|
||||
|
||||
def repository_set(self, params):
|
||||
product = self.find_product(params['product'], params['organization'])
|
||||
del params['product']
|
||||
del params['organization']
|
||||
|
||||
if not product:
|
||||
return False
|
||||
else:
|
||||
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
|
||||
reposet = reposet.search()[0]
|
||||
|
||||
formatted_name = [params['name'].replace('(', '').replace(')', '')]
|
||||
formatted_name.append(params['basearch'])
|
||||
|
||||
if 'releasever' in params:
|
||||
formatted_name.append(params['releasever'])
|
||||
|
||||
formatted_name = ' '.join(formatted_name)
|
||||
|
||||
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
|
||||
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
|
||||
repository.organization = product.organization
|
||||
repository = repository.search()
|
||||
|
||||
if len(repository) == 0:
|
||||
if 'releasever' in params:
|
||||
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
|
||||
else:
|
||||
reposet.enable(data={'basearch': params['basearch']})
|
||||
|
||||
return True
|
||||
|
||||
def sync_plan(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
params['organization'] = org.id
|
||||
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
|
||||
|
||||
products = params['products']
|
||||
del params['products']
|
||||
|
||||
sync_plan = self._entities.SyncPlan(
|
||||
self._server,
|
||||
name=params['name'],
|
||||
organization=org
|
||||
)
|
||||
response = sync_plan.search()
|
||||
|
||||
sync_plan.sync_date = params['sync_date']
|
||||
sync_plan.interval = params['interval']
|
||||
|
||||
if len(response) == 1:
|
||||
sync_plan.id = response[0].id
|
||||
sync_plan.update()
|
||||
else:
|
||||
response = sync_plan.create()
|
||||
sync_plan.id = response[0].id
|
||||
|
||||
if products:
|
||||
ids = []
|
||||
|
||||
for name in products:
|
||||
product = self.find_product(name, org.name)
|
||||
ids.append(product.id)
|
||||
|
||||
sync_plan.add_products(data={'product_ids': ids})
|
||||
|
||||
return True
|
||||
|
||||
def content_view(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
|
||||
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
|
||||
response = content_view.search()
|
||||
|
||||
if len(response) == 1:
|
||||
content_view.id = response[0].id
|
||||
content_view.update()
|
||||
else:
|
||||
content_view = content_view.create()
|
||||
|
||||
if params['repositories']:
|
||||
repos = []
|
||||
|
||||
for repository in params['repositories']:
|
||||
repository = self.find_repository(repository['name'], repository['product'], org.name)
|
||||
repos.append(repository)
|
||||
|
||||
content_view.repository = repos
|
||||
content_view.update(['repository'])
|
||||
|
||||
def find_content_view_version(self, name, organization, environment):
|
||||
env = self.find_lifecycle_environment(environment, organization)
|
||||
content_view = self.find_content_view(name, organization)
|
||||
|
||||
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
|
||||
response = content_view_version.search(['content_view'], {'environment_id': env.id})
|
||||
|
||||
if len(response) == 1:
|
||||
return response[0]
|
||||
else:
|
||||
self._module.fail_json(msg="No Content View version found for %s" % response)
|
||||
|
||||
def publish(self, params):
|
||||
content_view = self.find_content_view(params['name'], params['organization'])
|
||||
|
||||
return content_view.publish()
|
||||
|
||||
def promote(self, params):
|
||||
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
|
||||
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
|
||||
|
||||
data = {'environment_id': to_environment.id}
|
||||
return version.promote(data=data)
|
||||
|
||||
def lifecycle_environment(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
|
||||
|
||||
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
|
||||
response = lifecycle_env.search()
|
||||
|
||||
if len(response) == 1:
|
||||
lifecycle_env.id = response[0].id
|
||||
lifecycle_env.update()
|
||||
else:
|
||||
lifecycle_env.create()
|
||||
|
||||
return True
|
||||
|
||||
def activation_key(self, params):
|
||||
org = self.find_organization(params['organization'])
|
||||
|
||||
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
|
||||
response = activation_key.search()
|
||||
|
||||
if len(response) == 1:
|
||||
activation_key.id = response[0].id
|
||||
activation_key.update()
|
||||
else:
|
||||
activation_key.create()
|
||||
|
||||
if params['content_view']:
|
||||
content_view = self.find_content_view(params['content_view'], params['organization'])
|
||||
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
|
||||
|
||||
activation_key.content_view = content_view
|
||||
activation_key.environment = lifecycle_environment
|
||||
activation_key.update()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
server_url=dict(type='str', required=True),
|
||||
username=dict(type='str', required=True, no_log=True),
|
||||
password=dict(type='str', required=True, no_log=True),
|
||||
entity=dict(type='str', required=True,
|
||||
choices=['repository', 'manifest', 'repository_set', 'sync_plan',
|
||||
'content_view', 'lifecycle_environment', 'activation_key', 'product']),
|
||||
action=dict(type='str', choices=['sync', 'publish', 'promote']),
|
||||
verify_ssl=dict(type='bool', default=False),
|
||||
task_timeout=dict(type='int', default=1000),
|
||||
params=dict(type='dict', required=True, no_log=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_NAILGUN_PACKAGE:
|
||||
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
|
||||
|
||||
server_url = module.params['server_url']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
entity = module.params['entity']
|
||||
action = module.params['action']
|
||||
params = module.params['params']
|
||||
verify_ssl = module.params['verify_ssl']
|
||||
task_timeout = module.params['task_timeout']
|
||||
|
||||
server = ServerConfig(
|
||||
url=server_url,
|
||||
auth=(username, password),
|
||||
verify=verify_ssl
|
||||
)
|
||||
ng = NailGun(server, entities, module, task_timeout)
|
||||
|
||||
# Lets make an connection to the server with username and password
|
||||
try:
|
||||
org = entities.Organization(server)
|
||||
org.search()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
|
||||
|
||||
result = False
|
||||
|
||||
if entity == 'product':
|
||||
if action == 'sync':
|
||||
result = ng.sync_product(params)
|
||||
else:
|
||||
result = ng.product(params)
|
||||
elif entity == 'repository':
|
||||
if action == 'sync':
|
||||
result = ng.sync_repository(params)
|
||||
else:
|
||||
result = ng.repository(params)
|
||||
elif entity == 'manifest':
|
||||
result = ng.manifest(params)
|
||||
elif entity == 'repository_set':
|
||||
result = ng.repository_set(params)
|
||||
elif entity == 'sync_plan':
|
||||
result = ng.sync_plan(params)
|
||||
elif entity == 'content_view':
|
||||
if action == 'publish':
|
||||
result = ng.publish(params)
|
||||
elif action == 'promote':
|
||||
result = ng.promote(params)
|
||||
else:
|
||||
result = ng.content_view(params)
|
||||
elif entity == 'lifecycle_environment':
|
||||
result = ng.lifecycle_environment(params)
|
||||
elif entity == 'activation_key':
|
||||
result = ng.activation_key(params)
|
||||
else:
|
||||
module.fail_json(changed=False, result="Unsupported entity supplied")
|
||||
|
||||
module.exit_json(changed=result, result="%s updated" % entity)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
208
plugins/modules/remote_management/hpilo/hpilo_boot.py
Normal file
208
plugins/modules/remote_management/hpilo/hpilo_boot.py
Normal file
@@ -0,0 +1,208 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2012 Dag Wieers <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: hpilo_boot
|
||||
author: Dag Wieers (@dagwieers)
|
||||
short_description: Boot system using specific media through HP iLO interface
|
||||
description:
|
||||
- "This module boots a system through its HP iLO interface. The boot media
|
||||
can be one of: cdrom, floppy, hdd, network or usb."
|
||||
- This module requires the hpilo python module.
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The HP iLO hostname/address that is linked to the physical system.
|
||||
required: true
|
||||
login:
|
||||
description:
|
||||
- The login name to authenticate to the HP iLO interface.
|
||||
default: Administrator
|
||||
password:
|
||||
description:
|
||||
- The password to authenticate to the HP iLO interface.
|
||||
default: admin
|
||||
media:
|
||||
description:
|
||||
- The boot media to boot the system from
|
||||
choices: [ "cdrom", "floppy", "hdd", "network", "normal", "usb" ]
|
||||
image:
|
||||
description:
|
||||
- The URL of a cdrom, floppy or usb boot media image.
|
||||
protocol://username:password@hostname:port/filename
|
||||
- protocol is either 'http' or 'https'
|
||||
- username:password is optional
|
||||
- port is optional
|
||||
state:
|
||||
description:
|
||||
- The state of the boot media.
|
||||
- "no_boot: Do not boot from the device"
|
||||
- "boot_once: Boot from the device once and then notthereafter"
|
||||
- "boot_always: Boot from the device each time the server is rebooted"
|
||||
- "connect: Connect the virtual media device and set to boot_always"
|
||||
- "disconnect: Disconnects the virtual media device and set to no_boot"
|
||||
- "poweroff: Power off the server"
|
||||
default: boot_once
|
||||
choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
|
||||
force:
|
||||
description:
|
||||
- Whether to force a reboot (even when the system is already booted).
|
||||
- As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
|
||||
default: no
|
||||
type: bool
|
||||
ssl_version:
|
||||
description:
|
||||
- Change the ssl_version used.
|
||||
default: TLSv1
|
||||
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
|
||||
requirements:
|
||||
- python-hpilo
|
||||
notes:
|
||||
- To use a USB key image you need to specify floppy as boot media.
|
||||
- This module ought to be run from a system that can access the HP iLO
|
||||
interface directly, either by using C(local_action) or using C(delegate_to).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
|
||||
hpilo_boot:
|
||||
host: YOUR_ILO_ADDRESS
|
||||
login: YOUR_ILO_LOGIN
|
||||
password: YOUR_ILO_PASSWORD
|
||||
media: cdrom
|
||||
image: http://some-web-server/iso/boot.iso
|
||||
when: cmdb_hwmodel.startswith('HP ')
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Power off a server
|
||||
hpilo_boot:
|
||||
host: YOUR_ILO_HOST
|
||||
login: YOUR_ILO_LOGIN
|
||||
password: YOUR_ILO_PASSWORD
|
||||
state: poweroff
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# Default return values
|
||||
'''
|
||||
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
HPILO_IMP_ERR = None
|
||||
try:
|
||||
import hpilo
|
||||
HAS_HPILO = True
|
||||
except ImportError:
|
||||
HPILO_IMP_ERR = traceback.format_exc()
|
||||
HAS_HPILO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
# Suppress warnings from hpilo
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', required=True),
|
||||
login=dict(type='str', default='Administrator'),
|
||||
password=dict(type='str', default='admin', no_log=True),
|
||||
media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
|
||||
image=dict(type='str'),
|
||||
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
|
||||
force=dict(type='bool', default=False),
|
||||
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_HPILO:
|
||||
module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
|
||||
|
||||
host = module.params['host']
|
||||
login = module.params['login']
|
||||
password = module.params['password']
|
||||
media = module.params['media']
|
||||
image = module.params['image']
|
||||
state = module.params['state']
|
||||
force = module.params['force']
|
||||
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
|
||||
|
||||
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
|
||||
changed = False
|
||||
status = {}
|
||||
power_status = 'UNKNOWN'
|
||||
|
||||
if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
|
||||
|
||||
# Workaround for: Error communicating with iLO: Problem manipulating EV
|
||||
try:
|
||||
ilo.set_one_time_boot(media)
|
||||
except hpilo.IloError:
|
||||
time.sleep(60)
|
||||
ilo.set_one_time_boot(media)
|
||||
|
||||
# TODO: Verify if image URL exists/works
|
||||
if image:
|
||||
ilo.insert_virtual_media(media, image)
|
||||
changed = True
|
||||
|
||||
if media == 'cdrom':
|
||||
ilo.set_vm_status('cdrom', state, True)
|
||||
status = ilo.get_vm_status()
|
||||
changed = True
|
||||
elif media in ('floppy', 'usb'):
|
||||
ilo.set_vf_status(state, True)
|
||||
status = ilo.get_vf_status()
|
||||
changed = True
|
||||
|
||||
# Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
|
||||
if state in ('boot_once', 'boot_always') or force:
|
||||
|
||||
power_status = ilo.get_host_power_status()
|
||||
|
||||
if not force and power_status == 'ON':
|
||||
module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
|
||||
|
||||
if power_status == 'ON':
|
||||
ilo.warm_boot_server()
|
||||
# ilo.cold_boot_server()
|
||||
changed = True
|
||||
else:
|
||||
ilo.press_pwr_btn()
|
||||
# ilo.reset_server()
|
||||
# ilo.set_host_power(host_power=True)
|
||||
changed = True
|
||||
|
||||
elif state in ('poweroff'):
|
||||
|
||||
power_status = ilo.get_host_power_status()
|
||||
|
||||
if not power_status == 'OFF':
|
||||
ilo.hold_pwr_btn()
|
||||
# ilo.set_host_power(host_power=False)
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, power=power_status, **status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/remote_management/hpilo/hpilo_facts.py
Symbolic link
1
plugins/modules/remote_management/hpilo/hpilo_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
hpilo_info.py
|
||||
262
plugins/modules/remote_management/hpilo/hpilo_info.py
Normal file
262
plugins/modules/remote_management/hpilo/hpilo_info.py
Normal file
@@ -0,0 +1,262 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2012 Dag Wieers <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: hpilo_info
|
||||
author: Dag Wieers (@dagwieers)
|
||||
short_description: Gather information through an HP iLO interface
|
||||
description:
|
||||
- This module gathers information on a specific system using its HP iLO interface.
|
||||
These information includes hardware and network related information useful
|
||||
for provisioning (e.g. macaddress, uuid).
|
||||
- This module requires the C(hpilo) python module.
|
||||
- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(hpilo_info) module no longer returns C(ansible_facts)!
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The HP iLO hostname/address that is linked to the physical system.
|
||||
required: true
|
||||
login:
|
||||
description:
|
||||
- The login name to authenticate to the HP iLO interface.
|
||||
default: Administrator
|
||||
password:
|
||||
description:
|
||||
- The password to authenticate to the HP iLO interface.
|
||||
default: admin
|
||||
ssl_version:
|
||||
description:
|
||||
- Change the ssl_version used.
|
||||
default: TLSv1
|
||||
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
|
||||
requirements:
|
||||
- hpilo
|
||||
notes:
|
||||
- This module ought to be run from a system that can access the HP iLO
|
||||
interface directly, either by using C(local_action) or using C(delegate_to).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# Task to gather facts from a HP iLO interface only if the system is an HP server
|
||||
- hpilo_info:
|
||||
host: YOUR_ILO_ADDRESS
|
||||
login: YOUR_ILO_LOGIN
|
||||
password: YOUR_ILO_PASSWORD
|
||||
when: cmdb_hwmodel.startswith('HP ')
|
||||
delegate_to: localhost
|
||||
register: results
|
||||
|
||||
- fail:
|
||||
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
|
||||
when: cmdb_serialno != results.hw_system_serial
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
# Typical output of HP iLO_info for a physical system
|
||||
hw_bios_date:
|
||||
description: BIOS date
|
||||
returned: always
|
||||
type: str
|
||||
sample: 05/05/2011
|
||||
|
||||
hw_bios_version:
|
||||
description: BIOS version
|
||||
returned: always
|
||||
type: str
|
||||
sample: P68
|
||||
|
||||
hw_ethX:
|
||||
description: Interface information (for each interface)
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
- macaddress: 00:11:22:33:44:55
|
||||
macaddress_dash: 00-11-22-33-44-55
|
||||
|
||||
hw_eth_ilo:
|
||||
description: Interface information (for the iLO network interface)
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
- macaddress: 00:11:22:33:44:BA
|
||||
- macaddress_dash: 00-11-22-33-44-BA
|
||||
|
||||
hw_product_name:
|
||||
description: Product name
|
||||
returned: always
|
||||
type: str
|
||||
sample: ProLiant DL360 G7
|
||||
|
||||
hw_product_uuid:
|
||||
description: Product UUID
|
||||
returned: always
|
||||
type: str
|
||||
sample: ef50bac8-2845-40ff-81d9-675315501dac
|
||||
|
||||
hw_system_serial:
|
||||
description: System serial number
|
||||
returned: always
|
||||
type: str
|
||||
sample: ABC12345D6
|
||||
|
||||
hw_uuid:
|
||||
description: Hardware UUID
|
||||
returned: always
|
||||
type: str
|
||||
sample: 123456ABC78901D2
|
||||
'''
|
||||
|
||||
import re
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
HPILO_IMP_ERR = None
|
||||
try:
|
||||
import hpilo
|
||||
HAS_HPILO = True
|
||||
except ImportError:
|
||||
HPILO_IMP_ERR = traceback.format_exc()
|
||||
HAS_HPILO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# Suppress warnings from hpilo
|
||||
warnings.simplefilter('ignore')
|
||||
|
||||
|
||||
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
|
||||
try:
|
||||
infoname = 'hw_eth' + str(int(entry['Port']) - 1)
|
||||
except Exception:
|
||||
infoname = non_numeric
|
||||
|
||||
info = {
|
||||
'macaddress': entry['MAC'].replace('-', ':'),
|
||||
'macaddress_dash': entry['MAC']
|
||||
}
|
||||
return (infoname, info)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', required=True),
|
||||
login=dict(type='str', default='Administrator'),
|
||||
password=dict(type='str', default='admin', no_log=True),
|
||||
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
is_old_facts = module._name == 'hpilo_facts'
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
if not HAS_HPILO:
|
||||
module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
|
||||
|
||||
host = module.params['host']
|
||||
login = module.params['login']
|
||||
password = module.params['password']
|
||||
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
|
||||
|
||||
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
|
||||
|
||||
info = {
|
||||
'module_hw': True,
|
||||
}
|
||||
|
||||
# TODO: Count number of CPUs, DIMMs and total memory
|
||||
try:
|
||||
data = ilo.get_host_data()
|
||||
except hpilo.IloCommunicationError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
for entry in data:
|
||||
if 'type' not in entry:
|
||||
continue
|
||||
elif entry['type'] == 0: # BIOS Information
|
||||
info['hw_bios_version'] = entry['Family']
|
||||
info['hw_bios_date'] = entry['Date']
|
||||
elif entry['type'] == 1: # System Information
|
||||
info['hw_uuid'] = entry['UUID']
|
||||
info['hw_system_serial'] = entry['Serial Number'].rstrip()
|
||||
info['hw_product_name'] = entry['Product Name']
|
||||
info['hw_product_uuid'] = entry['cUUID']
|
||||
elif entry['type'] == 209: # Embedded NIC MAC Assignment
|
||||
if 'fields' in entry:
|
||||
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
|
||||
if name.startswith('Port'):
|
||||
try:
|
||||
infoname = 'hw_eth' + str(int(value) - 1)
|
||||
except Exception:
|
||||
infoname = 'hw_eth_ilo'
|
||||
elif name.startswith('MAC'):
|
||||
info[infoname] = {
|
||||
'macaddress': value.replace('-', ':'),
|
||||
'macaddress_dash': value
|
||||
}
|
||||
else:
|
||||
(infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
|
||||
info[infoname] = entry_info
|
||||
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
|
||||
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
|
||||
if name.startswith('Port'):
|
||||
try:
|
||||
infoname = 'hw_iscsi' + str(int(value) - 1)
|
||||
except Exception:
|
||||
infoname = 'hw_iscsi_ilo'
|
||||
elif name.startswith('MAC'):
|
||||
info[infoname] = {
|
||||
'macaddress': value.replace('-', ':'),
|
||||
'macaddress_dash': value
|
||||
}
|
||||
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
|
||||
(infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
|
||||
info[infoname] = entry_info
|
||||
|
||||
# Collect health (RAM/CPU data)
|
||||
health = ilo.get_embedded_health()
|
||||
info['hw_health'] = health
|
||||
|
||||
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
|
||||
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
|
||||
if memory_details_summary:
|
||||
info['hw_memory_details_summary'] = memory_details_summary
|
||||
info['hw_memory_total'] = 0
|
||||
for cpu, details in memory_details_summary.items():
|
||||
cpu_total_memory_size = details.get('total_memory_size')
|
||||
if cpu_total_memory_size:
|
||||
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
|
||||
if ram:
|
||||
if ram.group(2) == 'GB':
|
||||
info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
|
||||
|
||||
# reformat into a text friendly format
|
||||
info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
|
||||
|
||||
if is_old_facts:
|
||||
module.exit_json(ansible_facts=info)
|
||||
else:
|
||||
module.exit_json(**info)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
116
plugins/modules/remote_management/hpilo/hponcfg.py
Normal file
116
plugins/modules/remote_management/hpilo/hponcfg.py
Normal file
@@ -0,0 +1,116 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2012, Dag Wieers <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: hponcfg
|
||||
author: Dag Wieers (@dagwieers)
|
||||
short_description: Configure HP iLO interface using hponcfg
|
||||
description:
|
||||
- This modules configures the HP iLO interface using hponcfg.
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- The XML file as accepted by hponcfg.
|
||||
required: true
|
||||
aliases: ['src']
|
||||
minfw:
|
||||
description:
|
||||
- The minimum firmware level needed.
|
||||
required: false
|
||||
executable:
|
||||
description:
|
||||
- Path to the hponcfg executable (`hponcfg` which uses $PATH).
|
||||
default: hponcfg
|
||||
verbose:
|
||||
description:
|
||||
- Run hponcfg in verbose mode (-v).
|
||||
default: no
|
||||
type: bool
|
||||
requirements:
|
||||
- hponcfg tool
|
||||
notes:
|
||||
- You need a working hponcfg on the target system.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Example hponcfg configuration XML
|
||||
copy:
|
||||
content: |
|
||||
<ribcl VERSION="2.0">
|
||||
<login USER_LOGIN="user" PASSWORD="password">
|
||||
<rib_info MODE="WRITE">
|
||||
<mod_global_settings>
|
||||
<session_timeout value="0"/>
|
||||
<ssh_status value="Y"/>
|
||||
<ssh_port value="22"/>
|
||||
<serial_cli_status value="3"/>
|
||||
<serial_cli_speed value="5"/>
|
||||
</mod_global_settings>
|
||||
</rib_info>
|
||||
</login>
|
||||
</ribcl>
|
||||
dest: /tmp/enable-ssh.xml
|
||||
|
||||
- name: Configure HP iLO using enable-ssh.xml
|
||||
hponcfg:
|
||||
src: /tmp/enable-ssh.xml
|
||||
|
||||
- name: Configure HP iLO on VMware ESXi hypervisor
|
||||
hponcfg:
|
||||
src: /tmp/enable-ssh.xml
|
||||
executable: /opt/hp/tools/hponcfg
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
src=dict(type='path', required=True, aliases=['path']),
|
||||
minfw=dict(type='str'),
|
||||
executable=dict(default='hponcfg', type='str'),
|
||||
verbose=dict(default=False, type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
# Consider every action a change (not idempotent yet!)
|
||||
changed = True
|
||||
|
||||
src = module.params['src']
|
||||
minfw = module.params['minfw']
|
||||
executable = module.params['executable']
|
||||
verbose = module.params['verbose']
|
||||
|
||||
options = ' -f %s' % src
|
||||
|
||||
if verbose:
|
||||
options += ' -v'
|
||||
|
||||
if minfw:
|
||||
options += ' -m %s' % minfw
|
||||
|
||||
rc, stdout, stderr = module.run_command('%s %s' % (executable, options))
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
|
||||
|
||||
module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
431
plugins/modules/remote_management/imc/imc_rest.py
Normal file
431
plugins/modules/remote_management/imc/imc_rest.py
Normal file
@@ -0,0 +1,431 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Dag Wieers <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: imc_rest
|
||||
short_description: Manage Cisco IMC hardware through its REST API
|
||||
description:
|
||||
- Provides direct access to the Cisco IMC REST API.
|
||||
- Perform any configuration changes and actions that the Cisco IMC supports.
|
||||
- More information about the IMC REST API is available from
|
||||
U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
requirements:
|
||||
- lxml
|
||||
- xmljson >= 0.1.8
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
|
||||
required: true
|
||||
aliases: [ host, ip ]
|
||||
username:
|
||||
description:
|
||||
- Username used to login to the switch.
|
||||
default: admin
|
||||
aliases: [ user ]
|
||||
password:
|
||||
description:
|
||||
- The password to use for authentication.
|
||||
default: password
|
||||
path:
|
||||
description:
|
||||
- Name of the absolute path of the filename that includes the body
|
||||
of the http request being sent to the Cisco IMC REST API.
|
||||
- Parameter C(path) is mutual exclusive with parameter C(content).
|
||||
aliases: [ 'src', 'config_file' ]
|
||||
content:
|
||||
description:
|
||||
- When used instead of C(path), sets the content of the API requests directly.
|
||||
- This may be convenient to template simple requests, for anything complex use the M(template) module.
|
||||
- You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
|
||||
the Cisco IMC output is subsequently merged.
|
||||
- Parameter C(content) is mutual exclusive with parameter C(path).
|
||||
protocol:
|
||||
description:
|
||||
- Connection protocol to use.
|
||||
default: https
|
||||
choices: [ http, https ]
|
||||
timeout:
|
||||
description:
|
||||
- The socket level timeout in seconds.
|
||||
- This is the time that every single connection (every fragment) can spend.
|
||||
If this C(timeout) is reached, the module will fail with a
|
||||
C(Connection failure) indicating that C(The read operation timed out).
|
||||
default: 60
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(no), SSL certificates will not be validated.
|
||||
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
notes:
|
||||
- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
|
||||
- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
|
||||
- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
|
||||
from the previous configuration. As a result, this module will always report a change on subsequent runs.
|
||||
In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
|
||||
- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
|
||||
parameter. Some XML fragments can take longer than the default timeout.
|
||||
- More information about the IMC REST API is available from
|
||||
U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Power down server
|
||||
imc_rest:
|
||||
hostname: '{{ imc_hostname }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
content: |
|
||||
<configConfMo><inConfig>
|
||||
<computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Configure IMC using multiple XML fragments
|
||||
imc_rest:
|
||||
hostname: '{{ imc_hostname }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
timeout: 120
|
||||
content: |
|
||||
<!-- Configure Serial-on-LAN -->
|
||||
<configConfMo><inConfig>
|
||||
<solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
|
||||
</inConfig></configConfMo>
|
||||
|
||||
<!-- Configure Console Redirection -->
|
||||
<configConfMo><inConfig>
|
||||
<biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
|
||||
vpBaudRate="115200"
|
||||
vpConsoleRedirection="com-0"
|
||||
vpFlowControl="none"
|
||||
vpTerminalType="vt100"
|
||||
vpPuttyKeyPad="LINUX"
|
||||
vpRedirectionAfterPOST="Always Enable"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable PXE boot and power-cycle server
|
||||
imc_rest:
|
||||
hostname: '{{ imc_hostname }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
content: |
|
||||
<!-- Configure PXE boot -->
|
||||
<configConfMo><inConfig>
|
||||
<lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
|
||||
</inConfig></configConfMo>
|
||||
|
||||
<!-- Power cycle server -->
|
||||
<configConfMo><inConfig>
|
||||
<computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Reconfigure IMC to boot from storage
|
||||
imc_rest:
|
||||
hostname: '{{ imc_host }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
content: |
|
||||
<configConfMo><inConfig>
|
||||
<lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add customer description to server
|
||||
imc_rest:
|
||||
hostname: '{{ imc_host }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
content: |
|
||||
<configConfMo><inConfig>
|
||||
<computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Disable HTTP and increase session timeout to max value 10800 secs
|
||||
imc_rest:
|
||||
hostname: '{{ imc_host }}'
|
||||
username: '{{ imc_username }}'
|
||||
password: '{{ imc_password }}'
|
||||
validate_certs: no
|
||||
timeout: 120
|
||||
content: |
|
||||
<configConfMo><inConfig>
|
||||
<commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
|
||||
</inConfig></configConfMo>
|
||||
|
||||
<configConfMo><inConfig>
|
||||
<commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
|
||||
</inConfig></configConfMo>
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
aaLogin:
|
||||
description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
|
||||
returned: success
|
||||
type: dict
|
||||
sample: |
|
||||
"attributes": {
|
||||
"cookie": "",
|
||||
"outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
|
||||
"outPriv": "admin",
|
||||
"outRefreshPeriod": "600",
|
||||
"outSessionId": "114",
|
||||
"outVersion": "2.0(13e)",
|
||||
"response": "yes"
|
||||
}
|
||||
configConfMo:
|
||||
description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
|
||||
returned: success
|
||||
type: dict
|
||||
sample: |
|
||||
elapsed:
|
||||
description: Elapsed time in seconds
|
||||
returned: always
|
||||
type: int
|
||||
sample: 31
|
||||
response:
|
||||
description: HTTP response message, including content length
|
||||
returned: always
|
||||
type: str
|
||||
sample: OK (729 bytes)
|
||||
status:
|
||||
description: The HTTP response status code
|
||||
returned: always
|
||||
type: dict
|
||||
sample: 200
|
||||
error:
|
||||
description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
|
||||
returned: failed
|
||||
type: dict
|
||||
sample: |
|
||||
"attributes": {
|
||||
"cookie": "",
|
||||
"errorCode": "ERR-xml-parse-error",
|
||||
"errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
|
||||
"invocationResult": "594",
|
||||
"response": "yes"
|
||||
}
|
||||
error_code:
|
||||
description: Cisco IMC error code
|
||||
returned: failed
|
||||
type: str
|
||||
sample: ERR-xml-parse-error
|
||||
error_text:
|
||||
description: Cisco IMC error message
|
||||
returned: failed
|
||||
type: str
|
||||
sample: |
|
||||
XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
|
||||
input:
|
||||
description: RAW XML input sent to the Cisco IMC, causing the error
|
||||
returned: failed
|
||||
type: str
|
||||
sample: |
|
||||
<configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
|
||||
output:
|
||||
description: RAW XML output received from the Cisco IMC, with error details
|
||||
returned: failed
|
||||
type: str
|
||||
sample: >
|
||||
<error cookie=""
|
||||
response="yes"
|
||||
errorCode="ERR-xml-parse-error"
|
||||
invocationResult="594"
|
||||
errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
|
||||
'''
|
||||
|
||||
import atexit
|
||||
import datetime
|
||||
import itertools
|
||||
import os
|
||||
import traceback
|
||||
|
||||
LXML_ETREE_IMP_ERR = None
|
||||
try:
|
||||
import lxml.etree
|
||||
HAS_LXML_ETREE = True
|
||||
except ImportError:
|
||||
LXML_ETREE_IMP_ERR = traceback.format_exc()
|
||||
HAS_LXML_ETREE = False
|
||||
|
||||
XMLJSON_COBRA_IMP_ERR = None
|
||||
try:
|
||||
from xmljson import cobra
|
||||
HAS_XMLJSON_COBRA = True
|
||||
except ImportError:
|
||||
XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
|
||||
HAS_XMLJSON_COBRA = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def imc_response(module, rawoutput, rawinput=''):
|
||||
''' Handle IMC returned data '''
|
||||
xmloutput = lxml.etree.fromstring(rawoutput)
|
||||
result = cobra.data(xmloutput)
|
||||
|
||||
# Handle errors
|
||||
if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
|
||||
if rawinput:
|
||||
result['input'] = rawinput
|
||||
result['output'] = rawoutput
|
||||
result['error_code'] = xmloutput.get('errorCode')
|
||||
result['error_text'] = xmloutput.get('errorDescr')
|
||||
module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def logout(module, url, cookie, timeout):
|
||||
''' Perform a logout, if needed '''
|
||||
data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
|
||||
resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
|
||||
|
||||
|
||||
def merge(one, two):
|
||||
''' Merge two complex nested datastructures into one'''
|
||||
if isinstance(one, dict) and isinstance(two, dict):
|
||||
copy = dict(one)
|
||||
# copy.update({key: merge(one.get(key, None), two[key]) for key in two})
|
||||
copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
|
||||
return copy
|
||||
|
||||
elif isinstance(one, list) and isinstance(two, list):
|
||||
return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
|
||||
|
||||
return one if two is None else two
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
hostname=dict(type='str', required=True, aliases=['host', 'ip']),
|
||||
username=dict(type='str', default='admin', aliases=['user']),
|
||||
password=dict(type='str', default='password', no_log=True),
|
||||
content=dict(type='str'),
|
||||
path=dict(type='path', aliases=['config_file', 'src']),
|
||||
protocol=dict(type='str', default='https', choices=['http', 'https']),
|
||||
timeout=dict(type='int', default=60),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['content', 'path']],
|
||||
)
|
||||
|
||||
if not HAS_LXML_ETREE:
|
||||
module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
|
||||
|
||||
if not HAS_XMLJSON_COBRA:
|
||||
module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
|
||||
|
||||
hostname = module.params['hostname']
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
|
||||
content = module.params['content']
|
||||
path = module.params['path']
|
||||
|
||||
protocol = module.params['protocol']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
result = dict(
|
||||
failed=False,
|
||||
changed=False,
|
||||
)
|
||||
|
||||
# Report missing file
|
||||
file_exists = False
|
||||
if path:
|
||||
if os.path.isfile(path):
|
||||
file_exists = True
|
||||
else:
|
||||
module.fail_json(msg='Cannot find/access path:\n%s' % path)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
|
||||
# Perform login first
|
||||
url = '%s://%s/nuova' % (protocol, hostname)
|
||||
data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
|
||||
resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
|
||||
if resp is None or auth['status'] != 200:
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
|
||||
result.update(imc_response(module, resp.read()))
|
||||
|
||||
# Store cookie for future requests
|
||||
try:
|
||||
cookie = result['aaaLogin']['attributes']['outCookie']
|
||||
except Exception:
|
||||
module.fail_json(msg='Could not find cookie in output', **result)
|
||||
|
||||
# If we would not log out properly, we run out of sessions quickly
|
||||
atexit.register(logout, module, url, cookie, timeout)
|
||||
|
||||
# Prepare request data
|
||||
if content:
|
||||
rawdata = content
|
||||
elif file_exists:
|
||||
with open(path, 'r') as config_object:
|
||||
rawdata = config_object.read()
|
||||
|
||||
# Wrap the XML documents in a <root> element
|
||||
xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
|
||||
|
||||
# Handle each XML document separately in the same session
|
||||
for xmldoc in list(xmldata):
|
||||
if xmldoc.tag is lxml.etree.Comment:
|
||||
continue
|
||||
# Add cookie to XML
|
||||
xmldoc.set('cookie', cookie)
|
||||
data = lxml.etree.tostring(xmldoc)
|
||||
|
||||
# Perform actual request
|
||||
resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
|
||||
if resp is None or info['status'] != 200:
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
|
||||
|
||||
# Merge results with previous results
|
||||
rawoutput = resp.read()
|
||||
result = merge(result, imc_response(module, rawoutput, rawinput=data))
|
||||
result['response'] = info['msg']
|
||||
result['status'] = info['status']
|
||||
|
||||
# Check for any changes
|
||||
# NOTE: Unfortunately IMC API always report status as 'modified'
|
||||
xmloutput = lxml.etree.fromstring(rawoutput)
|
||||
results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
|
||||
result['changed'] = ('modified' in results)
|
||||
|
||||
# Report success
|
||||
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
118
plugins/modules/remote_management/intersight/intersight_info.py
Normal file
118
plugins/modules/remote_management/intersight/intersight_info.py
Normal file
@@ -0,0 +1,118 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: intersight_info
|
||||
short_description: Gather information about Intersight
|
||||
description:
|
||||
- Gathers information about servers in L(Cisco Intersight,https://intersight.com).
|
||||
- This module was called C(intersight_facts) before Ansible 2.9. The usage did not change.
|
||||
extends_documentation_fragment:
|
||||
- cisco.intersight.intersight
|
||||
|
||||
options:
|
||||
server_names:
|
||||
description:
|
||||
- Server names to retrieve information from.
|
||||
- An empty list will return all servers.
|
||||
type: list
|
||||
required: yes
|
||||
author:
|
||||
- David Soper (@dsoper2)
|
||||
- CiscoUcs (@CiscoUcs)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get info for all servers
|
||||
intersight_info:
|
||||
api_private_key: ~/Downloads/SecretKey.txt
|
||||
api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650
|
||||
server_names:
|
||||
- debug:
|
||||
msg: "server name {{ item.Name }}, moid {{ item.Moid }}"
|
||||
loop: "{{ intersight_servers }}"
|
||||
when: intersight_servers is defined
|
||||
|
||||
- name: Get info for servers by name
|
||||
intersight_info:
|
||||
api_private_key: ~/Downloads/SecretKey.txt
|
||||
api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650
|
||||
server_names:
|
||||
- SJC18-L14-UCS1-1
|
||||
- debug:
|
||||
msg: "server moid {{ intersight_servers[0].Moid }}"
|
||||
when: intersight_servers[0] is defined
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
intersight_servers:
|
||||
description: A list of Intersight Servers. See L(Cisco Intersight,https://intersight.com/apidocs) for details.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
Name:
|
||||
description: The name of the server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: SJC18-L14-UCS1-1
|
||||
Moid:
|
||||
description: The unique identifier of this Managed Object instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 5978bea36ad4b000018d63dc
|
||||
'''
|
||||
|
||||
from ansible_collections.cisco.intersight.plugins.module_utils.remote_management.intersight import IntersightModule, intersight_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def get_servers(module, intersight):
|
||||
query_list = []
|
||||
if module.params['server_names']:
|
||||
for server in module.params['server_names']:
|
||||
query_list.append("Name eq '%s'" % server)
|
||||
query_str = ' or '.join(query_list)
|
||||
options = {
|
||||
'http_method': 'get',
|
||||
'resource_path': '/compute/PhysicalSummaries',
|
||||
'query_params': {
|
||||
'$filter': query_str,
|
||||
'$top': 5000
|
||||
}
|
||||
}
|
||||
response_dict = intersight.call_api(**options)
|
||||
|
||||
return response_dict.get('Results')
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = intersight_argument_spec
|
||||
argument_spec.update(
|
||||
server_names=dict(type='list', required=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
if module._name == 'intersight_facts':
|
||||
module.deprecate("The 'intersight_facts' module has been renamed to 'intersight_info'", version='2.13')
|
||||
|
||||
intersight = IntersightModule(module)
|
||||
|
||||
# one API call returning all requested servers
|
||||
module.exit_json(intersight_servers=get_servers(module, intersight))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
190
plugins/modules/remote_management/ipmi/ipmi_boot.py
Normal file
190
plugins/modules/remote_management/ipmi/ipmi_boot.py
Normal file
@@ -0,0 +1,190 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ipmi_boot
|
||||
short_description: Management of order of boot devices
|
||||
description:
|
||||
- Use this module to manage order of boot devices
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Hostname or ip address of the BMC.
|
||||
required: true
|
||||
port:
|
||||
description:
|
||||
- Remote RMCP port.
|
||||
default: 623
|
||||
user:
|
||||
description:
|
||||
- Username to use to connect to the BMC.
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to connect to the BMC.
|
||||
required: true
|
||||
bootdev:
|
||||
description:
|
||||
- Set boot device to use on next reboot
|
||||
required: true
|
||||
choices:
|
||||
- network -- Request network boot
|
||||
- floppy -- Boot from floppy
|
||||
- hd -- Boot from hard drive
|
||||
- safe -- Boot from hard drive, requesting 'safe mode'
|
||||
- optical -- boot from CD/DVD/BD drive
|
||||
- setup -- Boot into setup utility
|
||||
- default -- remove any IPMI directed boot device request
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure that boot devices is desired.
|
||||
default: present
|
||||
choices:
|
||||
- present -- Request system turn on
|
||||
- absent -- Request system turn on
|
||||
persistent:
|
||||
description:
|
||||
- If set, ask that system firmware uses this device beyond next boot.
|
||||
Be aware many systems do not honor this.
|
||||
type: bool
|
||||
default: 'no'
|
||||
uefiboot:
|
||||
description:
|
||||
- If set, request UEFI boot explicitly.
|
||||
Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
|
||||
In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
|
||||
type: bool
|
||||
default: 'no'
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- pyghmi
|
||||
author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
bootdev:
|
||||
description: The boot device name which will be used beyond next boot.
|
||||
returned: success
|
||||
type: str
|
||||
sample: default
|
||||
persistent:
|
||||
description: If True, system firmware will use this device beyond next boot.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
uefimode:
|
||||
description: If True, system firmware will use UEFI boot explicitly beyond next boot.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Ensure bootdevice is HD.
|
||||
- ipmi_boot:
|
||||
name: test.testdomain.com
|
||||
user: admin
|
||||
password: password
|
||||
bootdev: hd
|
||||
|
||||
# Ensure bootdevice is not Network
|
||||
- ipmi_boot:
|
||||
name: test.testdomain.com
|
||||
user: admin
|
||||
password: password
|
||||
bootdev: network
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
PYGHMI_IMP_ERR = None
|
||||
try:
|
||||
from pyghmi.ipmi import command
|
||||
except ImportError:
|
||||
PYGHMI_IMP_ERR = traceback.format_exc()
|
||||
command = None
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
port=dict(default=623, type='int'),
|
||||
user=dict(required=True, no_log=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
|
||||
persistent=dict(default=False, type='bool'),
|
||||
uefiboot=dict(default=False, type='bool')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if command is None:
|
||||
module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
|
||||
|
||||
name = module.params['name']
|
||||
port = module.params['port']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
state = module.params['state']
|
||||
bootdev = module.params['bootdev']
|
||||
persistent = module.params['persistent']
|
||||
uefiboot = module.params['uefiboot']
|
||||
request = dict()
|
||||
|
||||
if state == 'absent' and bootdev == 'default':
|
||||
module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
|
||||
|
||||
# --- run command ---
|
||||
try:
|
||||
ipmi_cmd = command.Command(
|
||||
bmc=name, userid=user, password=password, port=port
|
||||
)
|
||||
module.debug('ipmi instantiated - name: "%s"' % name)
|
||||
current = ipmi_cmd.get_bootdev()
|
||||
# uefimode may not supported by BMC, so use desired value as default
|
||||
current.setdefault('uefimode', uefiboot)
|
||||
if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
|
||||
request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
|
||||
elif state == 'absent' and current['bootdev'] == bootdev:
|
||||
request = dict(bootdev='default')
|
||||
else:
|
||||
module.exit_json(changed=False, **current)
|
||||
|
||||
if module.check_mode:
|
||||
response = dict(bootdev=request['bootdev'])
|
||||
else:
|
||||
response = ipmi_cmd.set_bootdev(**request)
|
||||
|
||||
if 'error' in response:
|
||||
module.fail_json(msg=response['error'])
|
||||
|
||||
if 'persist' in request:
|
||||
response['persistent'] = request['persist']
|
||||
if 'uefiboot' in request:
|
||||
response['uefimode'] = request['uefiboot']
|
||||
|
||||
module.exit_json(changed=True, **response)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
135
plugins/modules/remote_management/ipmi/ipmi_power.py
Normal file
135
plugins/modules/remote_management/ipmi/ipmi_power.py
Normal file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ipmi_power
|
||||
short_description: Power management for machine
|
||||
description:
|
||||
- Use this module for power management
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Hostname or ip address of the BMC.
|
||||
required: true
|
||||
port:
|
||||
description:
|
||||
- Remote RMCP port.
|
||||
default: 623
|
||||
user:
|
||||
description:
|
||||
- Username to use to connect to the BMC.
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to connect to the BMC.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure that the machine in desired state.
|
||||
required: true
|
||||
choices:
|
||||
- on -- Request system turn on
|
||||
- off -- Request system turn off without waiting for OS to shutdown
|
||||
- shutdown -- Have system request OS proper shutdown
|
||||
- reset -- Request system reset without waiting for OS
|
||||
- boot -- If system is off, then 'on', else 'reset'
|
||||
timeout:
|
||||
description:
|
||||
- Maximum number of seconds before interrupt request.
|
||||
default: 300
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- pyghmi
|
||||
author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
powerstate:
|
||||
description: The current power state of the machine.
|
||||
returned: success
|
||||
type: str
|
||||
sample: on
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Ensure machine is powered on.
|
||||
- ipmi_power:
|
||||
name: test.testdomain.com
|
||||
user: admin
|
||||
password: password
|
||||
state: on
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
PYGHMI_IMP_ERR = None
|
||||
try:
|
||||
from pyghmi.ipmi import command
|
||||
except ImportError:
|
||||
PYGHMI_IMP_ERR = traceback.format_exc()
|
||||
command = None
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
port=dict(default=623, type='int'),
|
||||
state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
|
||||
user=dict(required=True, no_log=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(default=300, type='int'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if command is None:
|
||||
module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
|
||||
|
||||
name = module.params['name']
|
||||
port = module.params['port']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
state = module.params['state']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# --- run command ---
|
||||
try:
|
||||
ipmi_cmd = command.Command(
|
||||
bmc=name, userid=user, password=password, port=port
|
||||
)
|
||||
module.debug('ipmi instantiated - name: "%s"' % name)
|
||||
|
||||
current = ipmi_cmd.get_power()
|
||||
if current['powerstate'] != state:
|
||||
response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
|
||||
changed = True
|
||||
else:
|
||||
response = current
|
||||
changed = False
|
||||
|
||||
if 'error' in response:
|
||||
module.fail_json(msg=response['error'])
|
||||
|
||||
module.exit_json(changed=changed, **response)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
177
plugins/modules/remote_management/lxca/lxca_cmms.py
Normal file
177
plugins/modules/remote_management/lxca/lxca_cmms.py
Normal file
@@ -0,0 +1,177 @@
|
||||
#!/usr/bin/python
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'supported_by': 'community',
|
||||
'status': ['preview']
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author:
|
||||
- Naval Patel (@navalkp)
|
||||
- Prashant Bhosale (@prabhosa)
|
||||
module: lxca_cmms
|
||||
short_description: Custom module for lxca cmms inventory utility
|
||||
description:
|
||||
- This module returns/displays a inventory details of cmms
|
||||
|
||||
options:
|
||||
uuid:
|
||||
description:
|
||||
uuid of device, this is string with length greater than 16.
|
||||
|
||||
command_options:
|
||||
description:
|
||||
options to filter nodes information
|
||||
default: cmms
|
||||
choices:
|
||||
- cmms
|
||||
- cmms_by_uuid
|
||||
- cmms_by_chassis_uuid
|
||||
|
||||
chassis:
|
||||
description:
|
||||
uuid of chassis, this is string with length greater than 16.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.lxca_common
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# get all cmms info
|
||||
- name: get nodes data from LXCA
|
||||
lxca_cmms:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
|
||||
# get specific cmms info by uuid
|
||||
- name: get nodes data from LXCA
|
||||
lxca_cmms:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
uuid: "3C737AA5E31640CE949B10C129A8B01F"
|
||||
command_options: cmms_by_uuid
|
||||
|
||||
# get specific cmms info by chassis uuid
|
||||
- name: get nodes data from LXCA
|
||||
lxca_cmms:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
chassis: "3C737AA5E31640CE949B10C129A8B01F"
|
||||
command_options: cmms_by_chassis_uuid
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
result:
|
||||
description: cmms detail from lxca
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
cmmList:
|
||||
- machineType: ''
|
||||
model: ''
|
||||
type: 'CMM'
|
||||
uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
|
||||
# bunch of properties
|
||||
- machineType: ''
|
||||
model: ''
|
||||
type: 'CMM'
|
||||
uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
|
||||
# bunch of properties
|
||||
# Multiple cmms details
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
|
||||
try:
|
||||
from pylxca import cmms
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
|
||||
CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
|
||||
SUCCESS_MSG = "Success %s result"
|
||||
|
||||
|
||||
def _cmms(module, lxca_con):
|
||||
return cmms(lxca_con)
|
||||
|
||||
|
||||
def _cmms_by_uuid(module, lxca_con):
|
||||
if not module.params['uuid']:
|
||||
module.fail_json(msg=UUID_REQUIRED)
|
||||
return cmms(lxca_con, module.params['uuid'])
|
||||
|
||||
|
||||
def _cmms_by_chassis_uuid(module, lxca_con):
|
||||
if not module.params['chassis']:
|
||||
module.fail_json(msg=CHASSIS_UUID_REQUIRED)
|
||||
return cmms(lxca_con, chassis=module.params['chassis'])
|
||||
|
||||
|
||||
def setup_module_object():
|
||||
"""
|
||||
this function merge argument spec and create ansible module object
|
||||
:return:
|
||||
"""
|
||||
args_spec = dict(LXCA_COMMON_ARGS)
|
||||
args_spec.update(INPUT_ARG_SPEC)
|
||||
module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
|
||||
|
||||
return module
|
||||
|
||||
|
||||
FUNC_DICT = {
|
||||
'cmms': _cmms,
|
||||
'cmms_by_uuid': _cmms_by_uuid,
|
||||
'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
|
||||
}
|
||||
|
||||
|
||||
INPUT_ARG_SPEC = dict(
|
||||
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
|
||||
'cmms_by_chassis_uuid']),
|
||||
uuid=dict(default=None),
|
||||
chassis=dict(default=None)
|
||||
)
|
||||
|
||||
|
||||
def execute_module(module):
|
||||
"""
|
||||
This function invoke commands
|
||||
:param module: Ansible module object
|
||||
"""
|
||||
try:
|
||||
with connection_object(module) as lxca_con:
|
||||
result = FUNC_DICT[module.params['command_options']](module, lxca_con)
|
||||
module.exit_json(changed=False,
|
||||
msg=SUCCESS_MSG % module.params['command_options'],
|
||||
result=result)
|
||||
except Exception as exception:
|
||||
error_msg = '; '.join((e) for e in exception.args)
|
||||
module.fail_json(msg=error_msg, exception=traceback.format_exc())
|
||||
|
||||
|
||||
def main():
|
||||
module = setup_module_object()
|
||||
has_pylxca(module)
|
||||
execute_module(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
207
plugins/modules/remote_management/lxca/lxca_nodes.py
Normal file
207
plugins/modules/remote_management/lxca/lxca_nodes.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/python
|
||||
# GNU General Public License v3.0+ (see COPYING or
|
||||
# https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'supported_by': 'community',
|
||||
'status': ['preview']
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author:
|
||||
- Naval Patel (@navalkp)
|
||||
- Prashant Bhosale (@prabhosa)
|
||||
module: lxca_nodes
|
||||
short_description: Custom module for lxca nodes inventory utility
|
||||
description:
|
||||
- This module returns/displays a inventory details of nodes
|
||||
|
||||
options:
|
||||
uuid:
|
||||
description:
|
||||
uuid of device, this is string with length greater than 16.
|
||||
|
||||
command_options:
|
||||
description:
|
||||
options to filter nodes information
|
||||
default: nodes
|
||||
choices:
|
||||
- nodes
|
||||
- nodes_by_uuid
|
||||
- nodes_by_chassis_uuid
|
||||
- nodes_status_managed
|
||||
- nodes_status_unmanaged
|
||||
|
||||
chassis:
|
||||
description:
|
||||
uuid of chassis, this is string with length greater than 16.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.lxca_common
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# get all nodes info
|
||||
- name: get nodes data from LXCA
|
||||
lxca_nodes:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
command_options: nodes
|
||||
|
||||
# get specific nodes info by uuid
|
||||
- name: get nodes data from LXCA
|
||||
lxca_nodes:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
uuid: "3C737AA5E31640CE949B10C129A8B01F"
|
||||
command_options: nodes_by_uuid
|
||||
|
||||
# get specific nodes info by chassis uuid
|
||||
- name: get nodes data from LXCA
|
||||
lxca_nodes:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
chassis: "3C737AA5E31640CE949B10C129A8B01F"
|
||||
command_options: nodes_by_chassis_uuid
|
||||
|
||||
# get managed nodes
|
||||
- name: get nodes data from LXCA
|
||||
lxca_nodes:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
command_options: nodes_status_managed
|
||||
|
||||
# get unmanaged nodes
|
||||
- name: get nodes data from LXCA
|
||||
lxca_nodes:
|
||||
login_user: USERID
|
||||
login_password: Password
|
||||
auth_url: "https://10.243.15.168"
|
||||
command_options: nodes_status_unmanaged
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
result:
|
||||
description: nodes detail from lxca
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
nodeList:
|
||||
- machineType: '6241'
|
||||
model: 'AC1'
|
||||
type: 'Rack-TowerServer'
|
||||
uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
|
||||
# bunch of properties
|
||||
- machineType: '8871'
|
||||
model: 'AC1'
|
||||
type: 'Rack-TowerServer'
|
||||
uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
|
||||
# bunch of properties
|
||||
# Multiple nodes details
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
|
||||
try:
|
||||
from pylxca import nodes
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
|
||||
CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
|
||||
SUCCESS_MSG = "Success %s result"
|
||||
|
||||
|
||||
def _nodes(module, lxca_con):
|
||||
return nodes(lxca_con)
|
||||
|
||||
|
||||
def _nodes_by_uuid(module, lxca_con):
|
||||
if not module.params['uuid']:
|
||||
module.fail_json(msg=UUID_REQUIRED)
|
||||
return nodes(lxca_con, module.params['uuid'])
|
||||
|
||||
|
||||
def _nodes_by_chassis_uuid(module, lxca_con):
|
||||
if not module.params['chassis']:
|
||||
module.fail_json(msg=CHASSIS_UUID_REQUIRED)
|
||||
return nodes(lxca_con, chassis=module.params['chassis'])
|
||||
|
||||
|
||||
def _nodes_status_managed(module, lxca_con):
|
||||
return nodes(lxca_con, status='managed')
|
||||
|
||||
|
||||
def _nodes_status_unmanaged(module, lxca_con):
|
||||
return nodes(lxca_con, status='unmanaged')
|
||||
|
||||
|
||||
def setup_module_object():
|
||||
"""
|
||||
this function merge argument spec and create ansible module object
|
||||
:return:
|
||||
"""
|
||||
args_spec = dict(LXCA_COMMON_ARGS)
|
||||
args_spec.update(INPUT_ARG_SPEC)
|
||||
module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
|
||||
|
||||
return module
|
||||
|
||||
|
||||
FUNC_DICT = {
|
||||
'nodes': _nodes,
|
||||
'nodes_by_uuid': _nodes_by_uuid,
|
||||
'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
|
||||
'nodes_status_managed': _nodes_status_managed,
|
||||
'nodes_status_unmanaged': _nodes_status_unmanaged,
|
||||
}
|
||||
|
||||
|
||||
INPUT_ARG_SPEC = dict(
|
||||
command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
|
||||
'nodes_by_chassis_uuid',
|
||||
'nodes_status_managed',
|
||||
'nodes_status_unmanaged']),
|
||||
uuid=dict(default=None), chassis=dict(default=None)
|
||||
)
|
||||
|
||||
|
||||
def execute_module(module):
|
||||
"""
|
||||
This function invoke commands
|
||||
:param module: Ansible module object
|
||||
"""
|
||||
try:
|
||||
with connection_object(module) as lxca_con:
|
||||
result = FUNC_DICT[module.params['command_options']](module, lxca_con)
|
||||
module.exit_json(changed=False,
|
||||
msg=SUCCESS_MSG % module.params['command_options'],
|
||||
result=result)
|
||||
except Exception as exception:
|
||||
error_msg = '; '.join(exception.args)
|
||||
module.fail_json(msg=error_msg, exception=traceback.format_exc())
|
||||
|
||||
|
||||
def main():
|
||||
module = setup_module_object()
|
||||
has_pylxca(module)
|
||||
execute_module(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017 Red Hat Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_alert_profiles
|
||||
|
||||
short_description: Configuration of alert profiles for ManageIQ
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
|
||||
description:
|
||||
- The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - alert profile should not exist,
|
||||
- present - alert profile should exist,
|
||||
choices: ['absent', 'present']
|
||||
default: 'present'
|
||||
name:
|
||||
description:
|
||||
- The unique alert profile name in ManageIQ.
|
||||
- Required when state is "absent" or "present".
|
||||
resource_type:
|
||||
description:
|
||||
- The resource type for the alert profile in ManageIQ. Required when state is "present".
|
||||
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
|
||||
'ExtManagementSystem', 'MiddlewareServer']
|
||||
alerts:
|
||||
description:
|
||||
- List of alert descriptions to assign to this profile.
|
||||
- Required if state is "present"
|
||||
notes:
|
||||
description:
|
||||
- Optional notes for this profile
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add an alert profile to ManageIQ
|
||||
manageiq_alert_profiles:
|
||||
state: present
|
||||
name: Test profile
|
||||
resource_type: ContainerNode
|
||||
alerts:
|
||||
- Test Alert 01
|
||||
- Test Alert 02
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete an alert profile from ManageIQ
|
||||
manageiq_alert_profiles:
|
||||
state: absent
|
||||
name: Test profile
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
class ManageIQAlertProfiles(object):
|
||||
""" Object to execute alert profile management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
|
||||
|
||||
def get_profiles(self):
|
||||
""" Get all alert profiles from ManageIQ
|
||||
"""
|
||||
try:
|
||||
response = self.client.get(self.url + '?expand=alert_definitions,resources')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
|
||||
return response.get('resources') or []
|
||||
|
||||
def get_alerts(self, alert_descriptions):
|
||||
""" Get a list of alert hrefs from a list of alert descriptions
|
||||
"""
|
||||
alerts = []
|
||||
for alert_description in alert_descriptions:
|
||||
alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
|
||||
description=alert_description)
|
||||
alerts.append(alert['href'])
|
||||
|
||||
return alerts
|
||||
|
||||
def add_profile(self, profile):
|
||||
""" Add a new alert profile to ManageIQ
|
||||
"""
|
||||
# find all alerts to add to the profile
|
||||
# we do this first to fail early if one is missing.
|
||||
alerts = self.get_alerts(profile['alerts'])
|
||||
|
||||
# build the profile dict to send to the server
|
||||
|
||||
profile_dict = dict(name=profile['name'],
|
||||
description=profile['name'],
|
||||
mode=profile['resource_type'])
|
||||
if profile['notes']:
|
||||
profile_dict['set_data'] = dict(notes=profile['notes'])
|
||||
|
||||
# send it to the server
|
||||
try:
|
||||
result = self.client.post(self.url, resource=profile_dict, action="create")
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
|
||||
|
||||
# now that it has been created, we can assign the alerts
|
||||
self.assign_or_unassign(result['results'][0], alerts, "assign")
|
||||
|
||||
msg = "Profile {name} created successfully"
|
||||
msg = msg.format(name=profile['name'])
|
||||
return dict(changed=True, msg=msg)
|
||||
|
||||
def delete_profile(self, profile):
|
||||
""" Delete an alert profile from ManageIQ
|
||||
"""
|
||||
try:
|
||||
self.client.post(profile['href'], action="delete")
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
|
||||
|
||||
msg = "Successfully deleted profile {name}".format(name=profile['name'])
|
||||
return dict(changed=True, msg=msg)
|
||||
|
||||
def get_alert_href(self, alert):
|
||||
""" Get an absolute href for an alert
|
||||
"""
|
||||
return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
|
||||
|
||||
def assign_or_unassign(self, profile, resources, action):
|
||||
""" Assign or unassign alerts to profile, and validate the result.
|
||||
"""
|
||||
alerts = [dict(href=href) for href in resources]
|
||||
|
||||
subcollection_url = profile['href'] + '/alert_definitions'
|
||||
try:
|
||||
result = self.client.post(subcollection_url, resources=alerts, action=action)
|
||||
if len(result['results']) != len(alerts):
|
||||
msg = "Failed to {action} alerts to profile '{name}'," +\
|
||||
"expected {expected} alerts to be {action}ed," +\
|
||||
"but only {changed} were {action}ed"
|
||||
msg = msg.format(action=action,
|
||||
name=profile['name'],
|
||||
expected=len(alerts),
|
||||
changed=result['results'])
|
||||
self.module.fail_json(msg=msg)
|
||||
except Exception as e:
|
||||
msg = "Failed to {action} alerts to profile '{name}': {error}"
|
||||
msg = msg.format(action=action, name=profile['name'], error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
return result['results']
|
||||
|
||||
def update_profile(self, old_profile, desired_profile):
|
||||
""" Update alert profile in ManageIQ
|
||||
"""
|
||||
changed = False
|
||||
# we need to use client.get to query the alert definitions
|
||||
old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
|
||||
|
||||
# figure out which alerts we need to assign / unassign
|
||||
# alerts listed by the user:
|
||||
desired_alerts = set(self.get_alerts(desired_profile['alerts']))
|
||||
|
||||
# alert which currently exist in the profile
|
||||
if 'alert_definitions' in old_profile:
|
||||
# we use get_alert_href to have a direct href to the alert
|
||||
existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
|
||||
else:
|
||||
# no alerts in this profile
|
||||
existing_alerts = set()
|
||||
|
||||
to_add = list(desired_alerts - existing_alerts)
|
||||
to_remove = list(existing_alerts - desired_alerts)
|
||||
|
||||
# assign / unassign the alerts, if needed
|
||||
|
||||
if to_remove:
|
||||
self.assign_or_unassign(old_profile, to_remove, "unassign")
|
||||
changed = True
|
||||
if to_add:
|
||||
self.assign_or_unassign(old_profile, to_add, "assign")
|
||||
changed = True
|
||||
|
||||
# update other properties
|
||||
profile_dict = dict()
|
||||
|
||||
if old_profile['mode'] != desired_profile['resource_type']:
|
||||
# mode needs to be updated
|
||||
profile_dict['mode'] = desired_profile['resource_type']
|
||||
|
||||
# check if notes need to be updated
|
||||
old_notes = old_profile.get('set_data', {}).get('notes')
|
||||
|
||||
if desired_profile['notes'] != old_notes:
|
||||
profile_dict['set_data'] = dict(notes=desired_profile['notes'])
|
||||
|
||||
if profile_dict:
|
||||
# if we have any updated values
|
||||
changed = True
|
||||
try:
|
||||
result = self.client.post(old_profile['href'],
|
||||
resource=profile_dict,
|
||||
action="edit")
|
||||
except Exception as e:
|
||||
msg = "Updating profile '{name}' failed: {error}"
|
||||
msg = msg.format(name=old_profile['name'], error=e)
|
||||
self.module.fail_json(msg=msg, result=result)
|
||||
|
||||
if changed:
|
||||
msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
|
||||
else:
|
||||
msg = "No update needed for profile {name}".format(name=desired_profile['name'])
|
||||
return dict(changed=changed, msg=msg)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
resource_type=dict(type='str', choices=['Vm',
|
||||
'ContainerNode',
|
||||
'MiqServer',
|
||||
'Host',
|
||||
'Storage',
|
||||
'EmsCluster',
|
||||
'ExtManagementSystem',
|
||||
'MiddlewareServer']),
|
||||
alerts=dict(type='list'),
|
||||
notes=dict(type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_if=[('state', 'present', ['name', 'resource_type']),
|
||||
('state', 'absent', ['name'])])
|
||||
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
|
||||
|
||||
existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
|
||||
name=name)
|
||||
|
||||
# we need to add or update the alert profile
|
||||
if state == "present":
|
||||
if not existing_profile:
|
||||
# a profile with this name doesn't exist yet, let's create it
|
||||
res_args = manageiq_alert_profiles.add_profile(module.params)
|
||||
else:
|
||||
# a profile with this name exists, we might need to update it
|
||||
res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
|
||||
|
||||
# this alert profile should not exist
|
||||
if state == "absent":
|
||||
# if we have an alert profile with this name, delete it
|
||||
if existing_profile:
|
||||
res_args = manageiq_alert_profiles.delete_profile(existing_profile)
|
||||
else:
|
||||
# This alert profile does not exist in ManageIQ, and that's okay
|
||||
msg = "Alert profile '{name}' does not exist in ManageIQ"
|
||||
msg = msg.format(name=name)
|
||||
res_args = dict(changed=False, msg=msg)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
347
plugins/modules/remote_management/manageiq/manageiq_alerts.py
Normal file
347
plugins/modules/remote_management/manageiq/manageiq_alerts.py
Normal file
@@ -0,0 +1,347 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017 Red Hat Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_alerts
|
||||
|
||||
short_description: Configuration of alerts in ManageIQ
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Elad Alfassa (@elad661) <ealfassa@redhat.com
|
||||
description:
|
||||
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - alert should not exist,
|
||||
- present - alert should exist,
|
||||
required: False
|
||||
choices: ['absent', 'present']
|
||||
default: 'present'
|
||||
description:
|
||||
description:
|
||||
- The unique alert description in ManageIQ.
|
||||
- Required when state is "absent" or "present".
|
||||
resource_type:
|
||||
description:
|
||||
- The entity type for the alert in ManageIQ. Required when state is "present".
|
||||
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
|
||||
'ExtManagementSystem', 'MiddlewareServer']
|
||||
expression_type:
|
||||
description:
|
||||
- Expression type.
|
||||
default: hash
|
||||
choices: ["hash", "miq"]
|
||||
expression:
|
||||
description:
|
||||
- The alert expression for ManageIQ.
|
||||
- Can either be in the "Miq Expression" format or the "Hash Expression format".
|
||||
- Required if state is "present".
|
||||
enabled:
|
||||
description:
|
||||
- Enable or disable the alert. Required if state is "present".
|
||||
type: bool
|
||||
options:
|
||||
description:
|
||||
- Additional alert options, such as notification type and frequency
|
||||
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add an alert with a "hash expression" to ManageIQ
|
||||
manageiq_alerts:
|
||||
state: present
|
||||
description: Test Alert 01
|
||||
options:
|
||||
notifications:
|
||||
email:
|
||||
to: ["example@example.com"]
|
||||
from: "example@example.com"
|
||||
resource_type: ContainerNode
|
||||
expression:
|
||||
eval_method: hostd_log_threshold
|
||||
mode: internal
|
||||
options: {}
|
||||
enabled: true
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Add an alert with a "miq expression" to ManageIQ
|
||||
manageiq_alerts:
|
||||
state: present
|
||||
description: Test Alert 02
|
||||
options:
|
||||
notifications:
|
||||
email:
|
||||
to: ["example@example.com"]
|
||||
from: "example@example.com"
|
||||
resource_type: Vm
|
||||
expression_type: miq
|
||||
expression:
|
||||
and:
|
||||
- CONTAINS:
|
||||
tag: Vm.managed-environment
|
||||
value: prod
|
||||
- not:
|
||||
CONTAINS:
|
||||
tag: Vm.host.managed-environment
|
||||
value: prod
|
||||
enabled: true
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete an alert from ManageIQ
|
||||
manageiq_alerts:
|
||||
state: absent
|
||||
description: Test Alert 01
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
class ManageIQAlert(object):
|
||||
""" Represent a ManageIQ alert. Can be initialized with both the format
|
||||
we receive from the server and the format we get from the user.
|
||||
"""
|
||||
def __init__(self, alert):
|
||||
self.description = alert['description']
|
||||
self.db = alert['db']
|
||||
self.enabled = alert['enabled']
|
||||
self.options = alert['options']
|
||||
self.hash_expression = None
|
||||
self.miq_expressipn = None
|
||||
|
||||
if 'hash_expression' in alert:
|
||||
self.hash_expression = alert['hash_expression']
|
||||
if 'miq_expression' in alert:
|
||||
self.miq_expression = alert['miq_expression']
|
||||
if 'exp' in self.miq_expression:
|
||||
# miq_expression is a field that needs a special case, because
|
||||
# it's returned surrounded by a dict named exp even though we don't
|
||||
# send it with that dict.
|
||||
self.miq_expression = self.miq_expression['exp']
|
||||
|
||||
def __eq__(self, other):
|
||||
""" Compare two ManageIQAlert objects
|
||||
"""
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
|
||||
class ManageIQAlerts(object):
|
||||
""" Object to execute alert management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
|
||||
|
||||
def get_alerts(self):
|
||||
""" Get all alerts from ManageIQ
|
||||
"""
|
||||
try:
|
||||
response = self.client.get(self.alerts_url + '?expand=resources')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
|
||||
return response.get('resources', [])
|
||||
|
||||
def validate_hash_expression(self, expression):
|
||||
""" Validate a 'hash expression' alert definition
|
||||
"""
|
||||
# hash expressions must have the following fields
|
||||
for key in ['options', 'eval_method', 'mode']:
|
||||
if key not in expression:
|
||||
msg = "Hash expression is missing required field {key}".format(key=key)
|
||||
self.module.fail_json(msg)
|
||||
|
||||
def create_alert_dict(self, params):
|
||||
""" Create a dict representing an alert
|
||||
"""
|
||||
if params['expression_type'] == 'hash':
|
||||
# hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
|
||||
self.validate_hash_expression(params['expression'])
|
||||
expression_type = 'hash_expression'
|
||||
else:
|
||||
# actually miq_expression, but we call it "expression" for backwards-compatibility
|
||||
expression_type = 'expression'
|
||||
|
||||
# build the alret
|
||||
alert = dict(description=params['description'],
|
||||
db=params['resource_type'],
|
||||
options=params['options'],
|
||||
enabled=params['enabled'])
|
||||
|
||||
# add the actual expression.
|
||||
alert.update({expression_type: params['expression']})
|
||||
|
||||
return alert
|
||||
|
||||
def add_alert(self, alert):
|
||||
""" Add a new alert to ManageIQ
|
||||
"""
|
||||
try:
|
||||
result = self.client.post(self.alerts_url, action='create', resource=alert)
|
||||
|
||||
msg = "Alert {description} created successfully: {details}"
|
||||
msg = msg.format(description=alert['description'], details=result)
|
||||
return dict(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
msg = "Creating alert {description} failed: {error}"
|
||||
if "Resource expression needs be specified" in str(e):
|
||||
# Running on an older version of ManageIQ and trying to create a hash expression
|
||||
msg = msg.format(description=alert['description'],
|
||||
error="Your version of ManageIQ does not support hash_expression")
|
||||
else:
|
||||
msg = msg.format(description=alert['description'], error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
def delete_alert(self, alert):
|
||||
""" Delete an alert
|
||||
"""
|
||||
try:
|
||||
result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
|
||||
id=alert['id']),
|
||||
action="delete")
|
||||
msg = "Alert {description} deleted: {details}"
|
||||
msg = msg.format(description=alert['description'], details=result)
|
||||
return dict(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
msg = "Deleting alert {description} failed: {error}"
|
||||
msg = msg.format(description=alert['description'], error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
def update_alert(self, existing_alert, new_alert):
|
||||
""" Update an existing alert with the values from `new_alert`
|
||||
"""
|
||||
new_alert_obj = ManageIQAlert(new_alert)
|
||||
if new_alert_obj == ManageIQAlert(existing_alert):
|
||||
# no change needed - alerts are identical
|
||||
return dict(changed=False, msg="No update needed")
|
||||
else:
|
||||
try:
|
||||
url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
|
||||
result = self.client.post(url, action="edit", resource=new_alert)
|
||||
|
||||
# make sure that the update was indeed successful by comparing
|
||||
# the result to the expected result.
|
||||
if new_alert_obj == ManageIQAlert(result):
|
||||
# success!
|
||||
msg = "Alert {description} updated successfully: {details}"
|
||||
msg = msg.format(description=existing_alert['description'], details=result)
|
||||
|
||||
return dict(changed=True, msg=msg)
|
||||
else:
|
||||
# unexpected result
|
||||
msg = "Updating alert {description} failed, unexpected result {details}"
|
||||
msg = msg.format(description=existing_alert['description'], details=result)
|
||||
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
except Exception as e:
|
||||
msg = "Updating alert {description} failed: {error}"
|
||||
if "Resource expression needs be specified" in str(e):
|
||||
# Running on an older version of ManageIQ and trying to update a hash expression
|
||||
msg = msg.format(description=existing_alert['description'],
|
||||
error="Your version of ManageIQ does not support hash_expression")
|
||||
else:
|
||||
msg = msg.format(description=existing_alert['description'], error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
description=dict(type='str'),
|
||||
resource_type=dict(type='str', choices=['Vm',
|
||||
'ContainerNode',
|
||||
'MiqServer',
|
||||
'Host',
|
||||
'Storage',
|
||||
'EmsCluster',
|
||||
'ExtManagementSystem',
|
||||
'MiddlewareServer']),
|
||||
expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
|
||||
expression=dict(type='dict'),
|
||||
options=dict(type='dict'),
|
||||
enabled=dict(type='bool'),
|
||||
state=dict(required=False, default='present',
|
||||
choices=['present', 'absent']),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_if=[('state', 'present', ['description',
|
||||
'resource_type',
|
||||
'expression',
|
||||
'enabled',
|
||||
'options']),
|
||||
('state', 'absent', ['description'])])
|
||||
|
||||
state = module.params['state']
|
||||
description = module.params['description']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_alerts = ManageIQAlerts(manageiq)
|
||||
|
||||
existing_alert = manageiq.find_collection_resource_by("alert_definitions",
|
||||
description=description)
|
||||
|
||||
# we need to add or update the alert
|
||||
if state == "present":
|
||||
alert = manageiq_alerts.create_alert_dict(module.params)
|
||||
|
||||
if not existing_alert:
|
||||
# an alert with this description doesn't exist yet, let's create it
|
||||
res_args = manageiq_alerts.add_alert(alert)
|
||||
else:
|
||||
# an alert with this description exists, we might need to update it
|
||||
res_args = manageiq_alerts.update_alert(existing_alert, alert)
|
||||
|
||||
# this alert should not exist
|
||||
elif state == "absent":
|
||||
# if we have an alert with this description, delete it
|
||||
if existing_alert:
|
||||
res_args = manageiq_alerts.delete_alert(existing_alert)
|
||||
else:
|
||||
# it doesn't exist, and that's okay
|
||||
msg = "Alert '{description}' does not exist in ManageIQ"
|
||||
msg = msg.format(description=description)
|
||||
res_args = dict(changed=False, msg=msg)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
643
plugins/modules/remote_management/manageiq/manageiq_group.py
Normal file
643
plugins/modules/remote_management/manageiq/manageiq_group.py
Normal file
@@ -0,0 +1,643 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_group
|
||||
|
||||
short_description: Management of groups in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Evert Mulder (@evertmulder)
|
||||
description:
|
||||
- The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
|
||||
requirements:
|
||||
- manageiq-client
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - group should not exist, present - group should be.
|
||||
choices: ['absent', 'present']
|
||||
default: 'present'
|
||||
description:
|
||||
description:
|
||||
- The group description.
|
||||
required: true
|
||||
default: null
|
||||
role_id:
|
||||
description:
|
||||
- The the group role id
|
||||
required: false
|
||||
default: null
|
||||
role:
|
||||
description:
|
||||
- The the group role name
|
||||
- The C(role_id) has precedence over the C(role) when supplied.
|
||||
required: false
|
||||
default: null
|
||||
tenant_id:
|
||||
description:
|
||||
- The tenant for the group identified by the tenant id.
|
||||
required: false
|
||||
default: null
|
||||
tenant:
|
||||
description:
|
||||
- The tenant for the group identified by the tenant name.
|
||||
- The C(tenant_id) has precedence over the C(tenant) when supplied.
|
||||
- Tenant names are case sensitive.
|
||||
required: false
|
||||
default: null
|
||||
managed_filters:
|
||||
description: The tag values per category
|
||||
type: dict
|
||||
required: false
|
||||
default: null
|
||||
managed_filters_merge_mode:
|
||||
description:
|
||||
- In merge mode existing categories are kept or updated, new categories are added.
|
||||
- In replace mode all categories will be replaced with the supplied C(managed_filters).
|
||||
choices: [ merge, replace ]
|
||||
default: replace
|
||||
belongsto_filters:
|
||||
description: A list of strings with a reference to the allowed host, cluster or folder
|
||||
type: list
|
||||
required: false
|
||||
default: null
|
||||
belongsto_filters_merge_mode:
|
||||
description:
|
||||
- In merge mode existing settings are merged with the supplied C(belongsto_filters).
|
||||
- In replace mode current values are replaced with the supplied C(belongsto_filters).
|
||||
choices: [ merge, replace ]
|
||||
default: replace
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
|
||||
manageiq_group:
|
||||
description: 'MyGroup-user'
|
||||
role: 'EvmRole-user'
|
||||
tenant: 'my_tenant'
|
||||
manageiq_connection:
|
||||
url: 'https://manageiq_server'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
|
||||
manageiq_group:
|
||||
description: 'MyGroup-user'
|
||||
role: 'EvmRole-user'
|
||||
tenant_id: 4
|
||||
manageiq_connection:
|
||||
url: 'https://manageiq_server'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name:
|
||||
- Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
|
||||
- Apply 3 prov_max_cpu and 2 department tags to the group.
|
||||
- Limit access to a cluster for the group.
|
||||
manageiq_group:
|
||||
description: 'MyGroup-user'
|
||||
role: 'EvmRole-user'
|
||||
tenant: my_tenant
|
||||
managed_filters:
|
||||
prov_max_cpu:
|
||||
- '1'
|
||||
- '2'
|
||||
- '4'
|
||||
department:
|
||||
- defense
|
||||
- engineering
|
||||
managed_filters_merge_mode: replace
|
||||
belongsto_filters:
|
||||
- "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
|
||||
belongsto_filters_merge_mode: merge
|
||||
manageiq_connection:
|
||||
url: 'https://manageiq_server'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete a group in ManageIQ
|
||||
manageiq_group:
|
||||
state: 'absent'
|
||||
description: 'MyGroup-user'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
|
||||
- name: Delete a group in ManageIQ using a token
|
||||
manageiq_group:
|
||||
state: 'absent'
|
||||
description: 'MyGroup-user'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
token: 'sometoken'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
group:
|
||||
description: The group.
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
description:
|
||||
description: The group description
|
||||
returned: success
|
||||
type: str
|
||||
id:
|
||||
description: The group id
|
||||
returned: success
|
||||
type: int
|
||||
group_type:
|
||||
description: The group type, system or user
|
||||
returned: success
|
||||
type: str
|
||||
role:
|
||||
description: The group role name
|
||||
returned: success
|
||||
type: str
|
||||
tenant:
|
||||
description: The group tenant name
|
||||
returned: success
|
||||
type: str
|
||||
managed_filters:
|
||||
description: The tag values per category
|
||||
returned: success
|
||||
type: dict
|
||||
belongsto_filters:
|
||||
description: A list of strings with a reference to the allowed host, cluster or folder
|
||||
returned: success
|
||||
type: list
|
||||
created_on:
|
||||
description: Group creation date
|
||||
returned: success
|
||||
type: str
|
||||
sample: "2018-08-12T08:37:55+00:00"
|
||||
updated_on:
|
||||
description: Group update date
|
||||
returned: success
|
||||
type: int
|
||||
sample: "2018-08-12T08:37:55+00:00"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
class ManageIQgroup(object):
|
||||
"""
|
||||
Object to execute group management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
def group(self, description):
|
||||
""" Search for group object by description.
|
||||
Returns:
|
||||
the group, or None if group was not found.
|
||||
"""
|
||||
groups = self.client.collections.groups.find_by(description=description)
|
||||
if len(groups) == 0:
|
||||
return None
|
||||
else:
|
||||
return groups[0]
|
||||
|
||||
def tenant(self, tenant_id, tenant_name):
|
||||
""" Search for tenant entity by name or id
|
||||
Returns:
|
||||
the tenant entity, None if no id or name was supplied
|
||||
"""
|
||||
|
||||
if tenant_id:
|
||||
tenant = self.client.get_entity('tenants', tenant_id)
|
||||
if not tenant:
|
||||
self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
|
||||
return tenant
|
||||
else:
|
||||
if tenant_name:
|
||||
tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
|
||||
if not tenant_res:
|
||||
self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
|
||||
if len(tenant_res) > 1:
|
||||
self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
|
||||
tenant = tenant_res[0]
|
||||
return tenant
|
||||
else:
|
||||
# No tenant name or tenant id supplied
|
||||
return None
|
||||
|
||||
def role(self, role_id, role_name):
|
||||
""" Search for a role object by name or id.
|
||||
Returns:
|
||||
the role entity, None no id or name was supplied
|
||||
|
||||
the role, or send a module Fail signal if role not found.
|
||||
"""
|
||||
if role_id:
|
||||
role = self.client.get_entity('roles', role_id)
|
||||
if not role:
|
||||
self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
|
||||
return role
|
||||
else:
|
||||
if role_name:
|
||||
role_res = self.client.collections.roles.find_by(name=role_name)
|
||||
if not role_res:
|
||||
self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
|
||||
if len(role_res) > 1:
|
||||
self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
|
||||
return role_res[0]
|
||||
else:
|
||||
# No role name or role id supplied
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def merge_dict_values(norm_current_values, norm_updated_values):
|
||||
""" Create an merged update object for manageiq group filters.
|
||||
|
||||
The input dict contain the tag values per category.
|
||||
If the new values contain the category, all tags for that category are replaced
|
||||
If the new values do not contain the category, the existing tags are kept
|
||||
|
||||
Returns:
|
||||
the nested array with the merged values, used in the update post body
|
||||
"""
|
||||
|
||||
# If no updated values are supplied, in merge mode, the original values must be returned
|
||||
# otherwise the existing tag filters will be removed.
|
||||
if norm_current_values and (not norm_updated_values):
|
||||
return norm_current_values
|
||||
|
||||
# If no existing tag filters exist, use the user supplied values
|
||||
if (not norm_current_values) and norm_updated_values:
|
||||
return norm_updated_values
|
||||
|
||||
# start with norm_current_values's keys and values
|
||||
res = norm_current_values.copy()
|
||||
# replace res with norm_updated_values's keys and values
|
||||
res.update(norm_updated_values)
|
||||
return res
|
||||
|
||||
def delete_group(self, group):
|
||||
""" Deletes a group from manageiq.
|
||||
|
||||
Returns:
|
||||
a dict of:
|
||||
changed: boolean indicating if the entity was updated.
|
||||
msg: a short message describing the operation executed.
|
||||
"""
|
||||
try:
|
||||
url = '%s/groups/%s' % (self.api_url, group['id'])
|
||||
result = self.client.post(url, action='delete')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
|
||||
|
||||
if result['success'] is False:
|
||||
self.module.fail_json(msg=result['message'])
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="deleted group %s with id %s" % (group['description'], group['id']))
|
||||
|
||||
def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
|
||||
belongsto_filters, belongsto_filters_merge_mode):
|
||||
""" Edit a manageiq group.
|
||||
|
||||
Returns:
|
||||
a dict of:
|
||||
changed: boolean indicating if the entity was updated.
|
||||
msg: a short message describing the operation executed.
|
||||
"""
|
||||
|
||||
if role or norm_managed_filters or belongsto_filters:
|
||||
group.reload(attributes=['miq_user_role_name', 'entitlement'])
|
||||
|
||||
try:
|
||||
current_role = group['miq_user_role_name']
|
||||
except AttributeError:
|
||||
current_role = None
|
||||
|
||||
changed = False
|
||||
resource = {}
|
||||
|
||||
if description and group['description'] != description:
|
||||
resource['description'] = description
|
||||
changed = True
|
||||
|
||||
if tenant and group['tenant_id'] != tenant['id']:
|
||||
resource['tenant'] = dict(id=tenant['id'])
|
||||
changed = True
|
||||
|
||||
if role and current_role != role['name']:
|
||||
resource['role'] = dict(id=role['id'])
|
||||
changed = True
|
||||
|
||||
if norm_managed_filters or belongsto_filters:
|
||||
|
||||
# Only compare if filters are supplied
|
||||
entitlement = group['entitlement']
|
||||
|
||||
if 'filters' not in entitlement:
|
||||
# No existing filters exist, use supplied filters
|
||||
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
|
||||
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
|
||||
changed = True
|
||||
else:
|
||||
current_filters = entitlement['filters']
|
||||
new_filters = self.edit_group_edit_filters(current_filters,
|
||||
norm_managed_filters, managed_filters_merge_mode,
|
||||
belongsto_filters, belongsto_filters_merge_mode)
|
||||
if new_filters:
|
||||
resource['filters'] = new_filters
|
||||
changed = True
|
||||
|
||||
if not changed:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="group %s is not changed." % group['description'])
|
||||
|
||||
# try to update group
|
||||
try:
|
||||
self.client.post(group['href'], action='edit', resource=resource)
|
||||
changed = True
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
|
||||
|
||||
return dict(
|
||||
changed=changed,
|
||||
msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
|
||||
|
||||
def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
|
||||
belongsto_filters, belongsto_filters_merge_mode):
|
||||
""" Edit a manageiq group filters.
|
||||
|
||||
Returns:
|
||||
None if no the group was not updated
|
||||
If the group was updated the post body part for updating the group
|
||||
"""
|
||||
filters_updated = False
|
||||
new_filters_resource = {}
|
||||
|
||||
current_belongsto_set = current_filters.get('belongsto', set())
|
||||
|
||||
if belongsto_filters:
|
||||
new_belongsto_set = set(belongsto_filters)
|
||||
else:
|
||||
new_belongsto_set = set()
|
||||
|
||||
if current_belongsto_set == new_belongsto_set:
|
||||
new_filters_resource['belongsto'] = current_filters['belongsto']
|
||||
else:
|
||||
if belongsto_filters_merge_mode == 'merge':
|
||||
current_belongsto_set.update(new_belongsto_set)
|
||||
new_filters_resource['belongsto'] = list(current_belongsto_set)
|
||||
else:
|
||||
new_filters_resource['belongsto'] = list(new_belongsto_set)
|
||||
filters_updated = True
|
||||
|
||||
# Process belongsto managed filter tags
|
||||
# The input is in the form dict with keys are the categories and the tags are supplied string array
|
||||
# ManageIQ, the current_managed, uses an array of arrays. One array of categories.
|
||||
# We normalize the user input from a dict with arrays to a dict of sorted arrays
|
||||
# We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
|
||||
norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
|
||||
|
||||
if norm_current_filters == norm_managed_filters:
|
||||
if 'managed' in current_filters:
|
||||
new_filters_resource['managed'] = current_filters['managed']
|
||||
else:
|
||||
if managed_filters_merge_mode == 'merge':
|
||||
merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
|
||||
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
|
||||
else:
|
||||
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
|
||||
filters_updated = True
|
||||
|
||||
if not filters_updated:
|
||||
return None
|
||||
|
||||
return new_filters_resource
|
||||
|
||||
def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
|
||||
""" Creates the group in manageiq.
|
||||
|
||||
Returns:
|
||||
the created group id, name, created_on timestamp,
|
||||
updated_on timestamp.
|
||||
"""
|
||||
# check for required arguments
|
||||
for key, value in dict(description=description).items():
|
||||
if value in (None, ''):
|
||||
self.module.fail_json(msg="missing required argument: %s" % key)
|
||||
|
||||
url = '%s/groups' % self.api_url
|
||||
|
||||
resource = {'description': description}
|
||||
|
||||
if role is not None:
|
||||
resource['role'] = dict(id=role['id'])
|
||||
|
||||
if tenant is not None:
|
||||
resource['tenant'] = dict(id=tenant['id'])
|
||||
|
||||
if norm_managed_filters or belongsto_filters:
|
||||
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
|
||||
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
|
||||
|
||||
try:
|
||||
result = self.client.post(url, action='create', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully created group %s" % description,
|
||||
group_id=result['results'][0]['id']
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def normalized_managed_tag_filters_to_miq(norm_managed_filters):
|
||||
if not norm_managed_filters:
|
||||
return None
|
||||
|
||||
return list(norm_managed_filters.values())
|
||||
|
||||
@staticmethod
|
||||
def manageiq_filters_to_sorted_dict(current_filters):
|
||||
current_managed_filters = current_filters.get('managed')
|
||||
if not current_managed_filters:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for tag_list in current_managed_filters:
|
||||
tag_list.sort()
|
||||
key = tag_list[0].split('/')[2]
|
||||
res[key] = tag_list
|
||||
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
|
||||
if not managed_filters:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for cat_key in managed_filters:
|
||||
cat_array = []
|
||||
if not isinstance(managed_filters[cat_key], list):
|
||||
module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
|
||||
for tags in managed_filters[cat_key]:
|
||||
miq_managed_tag = "/managed/" + cat_key + "/" + tags
|
||||
cat_array.append(miq_managed_tag)
|
||||
# Do not add empty categories. ManageIQ will remove all categories that are not supplied
|
||||
if cat_array:
|
||||
cat_array.sort()
|
||||
res[cat_key] = cat_array
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def create_result_group(group):
|
||||
""" Creates the ansible result object from a manageiq group entity
|
||||
|
||||
Returns:
|
||||
a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
|
||||
"""
|
||||
try:
|
||||
role_name = group['miq_user_role_name']
|
||||
except AttributeError:
|
||||
role_name = None
|
||||
|
||||
managed_filters = None
|
||||
belongsto_filters = None
|
||||
if 'filters' in group['entitlement']:
|
||||
filters = group['entitlement']['filters']
|
||||
belongsto_filters = filters.get('belongsto')
|
||||
group_managed_filters = filters.get('managed')
|
||||
if group_managed_filters:
|
||||
managed_filters = {}
|
||||
for tag_list in group_managed_filters:
|
||||
key = tag_list[0].split('/')[2]
|
||||
tags = []
|
||||
for t in tag_list:
|
||||
tags.append(t.split('/')[3])
|
||||
managed_filters[key] = tags
|
||||
|
||||
return dict(
|
||||
id=group['id'],
|
||||
description=group['description'],
|
||||
role=role_name,
|
||||
tenant=group['tenant']['name'],
|
||||
managed_filters=managed_filters,
|
||||
belongsto_filters=belongsto_filters,
|
||||
group_type=group['group_type'],
|
||||
created_on=group['created_on'],
|
||||
updated_on=group['updated_on'],
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
description=dict(required=True, type='str'),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
role_id=dict(required=False, type='int'),
|
||||
role=dict(required=False, type='str'),
|
||||
tenant_id=dict(required=False, type='int'),
|
||||
tenant=dict(required=False, type='str'),
|
||||
managed_filters=dict(required=False, type='dict'),
|
||||
managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
|
||||
belongsto_filters=dict(required=False, type='list', elements='str'),
|
||||
belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec
|
||||
)
|
||||
|
||||
description = module.params['description']
|
||||
state = module.params['state']
|
||||
role_id = module.params['role_id']
|
||||
role_name = module.params['role']
|
||||
tenant_id = module.params['tenant_id']
|
||||
tenant_name = module.params['tenant']
|
||||
managed_filters = module.params['managed_filters']
|
||||
managed_filters_merge_mode = module.params['managed_filters_merge_mode']
|
||||
belongsto_filters = module.params['belongsto_filters']
|
||||
belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_group = ManageIQgroup(manageiq)
|
||||
|
||||
group = manageiq_group.group(description)
|
||||
|
||||
# group should not exist
|
||||
if state == "absent":
|
||||
# if we have a group, delete it
|
||||
if group:
|
||||
res_args = manageiq_group.delete_group(group)
|
||||
# if we do not have a group, nothing to do
|
||||
else:
|
||||
res_args = dict(
|
||||
changed=False,
|
||||
msg="group '%s' does not exist in manageiq" % description)
|
||||
|
||||
# group should exist
|
||||
if state == "present":
|
||||
|
||||
tenant = manageiq_group.tenant(tenant_id, tenant_name)
|
||||
role = manageiq_group.role(role_id, role_name)
|
||||
norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
|
||||
# if we have a group, edit it
|
||||
if group:
|
||||
res_args = manageiq_group.edit_group(group, description, role, tenant,
|
||||
norm_managed_filters, managed_filters_merge_mode,
|
||||
belongsto_filters, belongsto_filters_merge_mode)
|
||||
|
||||
# if we do not have a group, create it
|
||||
else:
|
||||
res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
|
||||
group = manageiq.client.get_entity('groups', res_args['group_id'])
|
||||
|
||||
group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
|
||||
res_args['group'] = manageiq_group.create_result_group(group)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
347
plugins/modules/remote_management/manageiq/manageiq_policies.py
Normal file
347
plugins/modules/remote_management/manageiq/manageiq_policies.py
Normal file
@@ -0,0 +1,347 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
|
||||
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_policies
|
||||
|
||||
short_description: Management of resource policy_profiles in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Daniel Korn (@dkorn)
|
||||
description:
|
||||
- The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - policy_profiles should not exist,
|
||||
- present - policy_profiles should exist,
|
||||
- list - list current policy_profiles and policies.
|
||||
choices: ['absent', 'present', 'list']
|
||||
default: 'present'
|
||||
policy_profiles:
|
||||
description:
|
||||
- list of dictionaries, each includes the policy_profile 'name' key.
|
||||
- required if state is present or absent.
|
||||
resource_type:
|
||||
description:
|
||||
- the type of the resource to which the profile should be [un]assigned
|
||||
required: true
|
||||
choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
|
||||
'data store', 'group', 'resource pool', 'service', 'service template',
|
||||
'template', 'tenant', 'user']
|
||||
resource_name:
|
||||
description:
|
||||
- the name of the resource to which the profile should be [un]assigned
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Assign new policy_profile for a provider in ManageIQ
|
||||
manageiq_policies:
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
policy_profiles:
|
||||
- name: openscap profile
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Unassign a policy_profile for a provider in ManageIQ
|
||||
manageiq_policies:
|
||||
state: absent
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
policy_profiles:
|
||||
- name: openscap profile
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: List current policy_profile and policies for a provider in ManageIQ
|
||||
manageiq_policies:
|
||||
state: list
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
manageiq_policies:
|
||||
description:
|
||||
- List current policy_profile and policies for a provider in ManageIQ
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"changed": false,
|
||||
"profiles": [
|
||||
{
|
||||
"policies": [
|
||||
{
|
||||
"active": true,
|
||||
"description": "OpenSCAP",
|
||||
"name": "openscap policy"
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"description": "Analyse incoming container images",
|
||||
"name": "analyse incoming container images"
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"description": "Schedule compliance after smart state analysis",
|
||||
"name": "schedule compliance after smart state analysis"
|
||||
}
|
||||
],
|
||||
"profile_description": "OpenSCAP profile",
|
||||
"profile_name": "openscap profile"
|
||||
}
|
||||
]
|
||||
}'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
|
||||
|
||||
|
||||
class ManageIQPolicies(object):
|
||||
"""
|
||||
Object to execute policies management operations of manageiq resources.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq, resource_type, resource_id):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
self.resource_type = resource_type
|
||||
self.resource_id = resource_id
|
||||
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
|
||||
api_url=self.api_url,
|
||||
resource_type=resource_type,
|
||||
resource_id=resource_id)
|
||||
|
||||
def query_profile_href(self, profile):
|
||||
""" Add or Update the policy_profile href field
|
||||
|
||||
Example:
|
||||
{name: STR, ...} => {name: STR, href: STR}
|
||||
"""
|
||||
resource = self.manageiq.find_collection_resource_or_fail(
|
||||
"policy_profiles", **profile)
|
||||
return dict(name=profile['name'], href=resource['href'])
|
||||
|
||||
def query_resource_profiles(self):
|
||||
""" Returns a set of the profile objects objects assigned to the resource
|
||||
"""
|
||||
url = '{resource_url}/policy_profiles?expand=resources'
|
||||
try:
|
||||
response = self.client.get(url.format(resource_url=self.resource_url))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} policies: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('resources', [])
|
||||
|
||||
# clean the returned rest api profile object to look like:
|
||||
# {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
|
||||
profiles = [self.clean_profile_object(profile) for profile in resources]
|
||||
|
||||
return profiles
|
||||
|
||||
def query_profile_policies(self, profile_id):
|
||||
""" Returns a set of the policy objects assigned to the resource
|
||||
"""
|
||||
url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
|
||||
try:
|
||||
response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} policies: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('policies', [])
|
||||
|
||||
# clean the returned rest api policy object to look like:
|
||||
# {name: STR, description: STR, active: BOOL}
|
||||
policies = [self.clean_policy_object(policy) for policy in resources]
|
||||
|
||||
return policies
|
||||
|
||||
def clean_policy_object(self, policy):
|
||||
""" Clean a policy object to have human readable form of:
|
||||
{
|
||||
name: STR,
|
||||
description: STR,
|
||||
active: BOOL
|
||||
}
|
||||
"""
|
||||
name = policy.get('name')
|
||||
description = policy.get('description')
|
||||
active = policy.get('active')
|
||||
|
||||
return dict(
|
||||
name=name,
|
||||
description=description,
|
||||
active=active)
|
||||
|
||||
def clean_profile_object(self, profile):
|
||||
""" Clean a profile object to have human readable form of:
|
||||
{
|
||||
profile_name: STR,
|
||||
profile_description: STR,
|
||||
policies: ARR<POLICIES>
|
||||
}
|
||||
"""
|
||||
profile_id = profile['id']
|
||||
name = profile.get('name')
|
||||
description = profile.get('description')
|
||||
policies = self.query_profile_policies(profile_id)
|
||||
|
||||
return dict(
|
||||
profile_name=name,
|
||||
profile_description=description,
|
||||
policies=policies)
|
||||
|
||||
def profiles_to_update(self, profiles, action):
|
||||
""" Create a list of policies we need to update in ManageIQ.
|
||||
|
||||
Returns:
|
||||
Whether or not a change took place and a message describing the
|
||||
operation executed.
|
||||
"""
|
||||
profiles_to_post = []
|
||||
assigned_profiles = self.query_resource_profiles()
|
||||
|
||||
# make a list of assigned full profile names strings
|
||||
# e.g. ['openscap profile', ...]
|
||||
assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
|
||||
|
||||
for profile in profiles:
|
||||
assigned = profile.get('name') in assigned_profiles_set
|
||||
|
||||
if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
|
||||
# add/update the policy profile href field
|
||||
# {name: STR, ...} => {name: STR, href: STR}
|
||||
profile = self.query_profile_href(profile)
|
||||
profiles_to_post.append(profile)
|
||||
|
||||
return profiles_to_post
|
||||
|
||||
def assign_or_unassign_profiles(self, profiles, action):
|
||||
""" Perform assign/unassign action
|
||||
"""
|
||||
# get a list of profiles needed to be changed
|
||||
profiles_to_post = self.profiles_to_update(profiles, action)
|
||||
if not profiles_to_post:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="Profiles {profiles} already {action}ed, nothing to do".format(
|
||||
action=action,
|
||||
profiles=profiles))
|
||||
|
||||
# try to assign or unassign profiles to resource
|
||||
url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
|
||||
try:
|
||||
response = self.client.post(url, action=action, resources=profiles_to_post)
|
||||
except Exception as e:
|
||||
msg = "Failed to {action} profile: {error}".format(
|
||||
action=action,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# check all entities in result to be successful
|
||||
for result in response['results']:
|
||||
if not result['success']:
|
||||
msg = "Failed to {action}: {message}".format(
|
||||
action=action,
|
||||
message=result['message'])
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# successfully changed all needed profiles
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="Successfully {action}ed profiles: {profiles}".format(
|
||||
action=action,
|
||||
profiles=profiles))
|
||||
|
||||
|
||||
def main():
|
||||
actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
|
||||
argument_spec = dict(
|
||||
policy_profiles=dict(type='list'),
|
||||
resource_name=dict(required=True, type='str'),
|
||||
resource_type=dict(required=True, type='str',
|
||||
choices=manageiq_entities().keys()),
|
||||
state=dict(required=False, type='str',
|
||||
choices=['present', 'absent', 'list'], default='present'),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['policy_profiles']),
|
||||
('state', 'absent', ['policy_profiles'])
|
||||
],
|
||||
)
|
||||
|
||||
policy_profiles = module.params['policy_profiles']
|
||||
resource_type_key = module.params['resource_type']
|
||||
resource_name = module.params['resource_name']
|
||||
state = module.params['state']
|
||||
|
||||
# get the action and resource type
|
||||
action = actions[state]
|
||||
resource_type = manageiq_entities()[resource_type_key]
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
|
||||
# query resource id, fail if resource does not exist
|
||||
resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
|
||||
|
||||
manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
|
||||
|
||||
if action == 'list':
|
||||
# return a list of current profiles for this object
|
||||
current_profiles = manageiq_policies.query_resource_profiles()
|
||||
res_args = dict(changed=False, profiles=current_profiles)
|
||||
else:
|
||||
# assign or unassign the profiles
|
||||
res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
894
plugins/modules/remote_management/manageiq/manageiq_provider.py
Normal file
894
plugins/modules/remote_management/manageiq/manageiq_provider.py
Normal file
@@ -0,0 +1,894 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
|
||||
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: manageiq_provider
|
||||
short_description: Management of provider in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Daniel Korn (@dkorn)
|
||||
description:
|
||||
- The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
|
||||
choices: ['absent', 'present', 'refresh']
|
||||
default: 'present'
|
||||
name:
|
||||
description: The provider's name.
|
||||
required: true
|
||||
type:
|
||||
description: The provider's type.
|
||||
required: true
|
||||
choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
|
||||
zone:
|
||||
description: The ManageIQ zone name that will manage the provider.
|
||||
default: 'default'
|
||||
provider_region:
|
||||
description: The provider region name to connect to (e.g. AWS region for Amazon).
|
||||
host_default_vnc_port_start:
|
||||
description: The first port in the host VNC range. defaults to None.
|
||||
host_default_vnc_port_end:
|
||||
description: The last port in the host VNC range. defaults to None.
|
||||
subscription:
|
||||
description: Microsoft Azure subscription ID. defaults to None.
|
||||
project:
|
||||
description: Google Compute Engine Project ID. defaults to None.
|
||||
azure_tenant_id:
|
||||
description: Tenant ID. defaults to None.
|
||||
aliases: [ keystone_v3_domain_id ]
|
||||
tenant_mapping_enabled:
|
||||
type: bool
|
||||
default: 'no'
|
||||
description: Whether to enable mapping of existing tenants. defaults to False.
|
||||
api_version:
|
||||
description: The OpenStack Keystone API version. defaults to None.
|
||||
choices: ['v2', 'v3']
|
||||
|
||||
provider:
|
||||
description: Default endpoint connection information, required if state is true.
|
||||
suboptions:
|
||||
hostname:
|
||||
description: The provider's api hostname.
|
||||
required: true
|
||||
port:
|
||||
description: The provider's api port.
|
||||
userid:
|
||||
description: Provider's api endpoint authentication userid. defaults to None.
|
||||
password:
|
||||
description: Provider's api endpoint authentication password. defaults to None.
|
||||
auth_key:
|
||||
description: Provider's api endpoint authentication bearer token. defaults to None.
|
||||
validate_certs:
|
||||
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
security_protocol:
|
||||
description: How SSL certificates should be used for HTTPS requests. defaults to None.
|
||||
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
|
||||
certificate_authority:
|
||||
description: The CA bundle string with custom certificates. defaults to None.
|
||||
|
||||
metrics:
|
||||
description: Metrics endpoint connection information.
|
||||
suboptions:
|
||||
hostname:
|
||||
description: The provider's api hostname.
|
||||
required: true
|
||||
port:
|
||||
description: The provider's api port.
|
||||
userid:
|
||||
description: Provider's api endpoint authentication userid. defaults to None.
|
||||
password:
|
||||
description: Provider's api endpoint authentication password. defaults to None.
|
||||
auth_key:
|
||||
description: Provider's api endpoint authentication bearer token. defaults to None.
|
||||
validate_certs:
|
||||
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
security_protocol:
|
||||
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
|
||||
description: How SSL certificates should be used for HTTPS requests. defaults to None.
|
||||
certificate_authority:
|
||||
description: The CA bundle string with custom certificates. defaults to None.
|
||||
path:
|
||||
description: Database name for oVirt metrics. Defaults to ovirt_engine_history.
|
||||
default: ovirt_engine_history
|
||||
|
||||
alerts:
|
||||
description: Alerts endpoint connection information.
|
||||
suboptions:
|
||||
hostname:
|
||||
description: The provider's api hostname.
|
||||
required: true
|
||||
port:
|
||||
description: The provider's api port.
|
||||
userid:
|
||||
description: Provider's api endpoint authentication userid. defaults to None.
|
||||
password:
|
||||
description: Provider's api endpoint authentication password. defaults to None.
|
||||
auth_key:
|
||||
description: Provider's api endpoint authentication bearer token. defaults to None.
|
||||
validate_certs:
|
||||
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
|
||||
default: true
|
||||
security_protocol:
|
||||
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation']
|
||||
description: How SSL certificates should be used for HTTPS requests. defaults to None.
|
||||
certificate_authority:
|
||||
description: The CA bundle string with custom certificates. defaults to None.
|
||||
|
||||
ssh_keypair:
|
||||
description: SSH key pair used for SSH connections to all hosts in this provider.
|
||||
suboptions:
|
||||
hostname:
|
||||
description: Director hostname.
|
||||
required: true
|
||||
userid:
|
||||
description: SSH username.
|
||||
auth_key:
|
||||
description: SSH private key.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a new provider in ManageIQ ('Hawkular' metrics)
|
||||
manageiq_provider:
|
||||
name: 'EngLab'
|
||||
type: 'OpenShift'
|
||||
state: 'present'
|
||||
provider:
|
||||
auth_key: 'topSecret'
|
||||
hostname: 'example.com'
|
||||
port: 8443
|
||||
validate_certs: true
|
||||
security_protocol: 'ssl-with-validation-custom-ca'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
metrics:
|
||||
auth_key: 'topSecret'
|
||||
role: 'hawkular'
|
||||
hostname: 'example.com'
|
||||
port: 443
|
||||
validate_certs: true
|
||||
security_protocol: 'ssl-with-validation-custom-ca'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1:80'
|
||||
username: 'admin'
|
||||
password: 'password'
|
||||
validate_certs: true
|
||||
|
||||
|
||||
- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
|
||||
manageiq_provider:
|
||||
name: 'EngLab'
|
||||
type: 'Openshift'
|
||||
state: 'present'
|
||||
provider:
|
||||
auth_key: 'topSecret'
|
||||
hostname: 'next.example.com'
|
||||
port: 8443
|
||||
validate_certs: true
|
||||
security_protocol: 'ssl-with-validation-custom-ca'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
metrics:
|
||||
auth_key: 'topSecret'
|
||||
hostname: 'next.example.com'
|
||||
port: 443
|
||||
validate_certs: true
|
||||
security_protocol: 'ssl-with-validation-custom-ca'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1'
|
||||
username: 'admin'
|
||||
password: 'password'
|
||||
validate_certs: true
|
||||
|
||||
|
||||
- name: Delete a provider in ManageIQ
|
||||
manageiq_provider:
|
||||
name: 'EngLab'
|
||||
type: 'Openshift'
|
||||
state: 'absent'
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1'
|
||||
username: 'admin'
|
||||
password: 'password'
|
||||
validate_certs: true
|
||||
|
||||
|
||||
- name: Create a new Amazon provider in ManageIQ using token authentication
|
||||
manageiq_provider:
|
||||
name: 'EngAmazon'
|
||||
type: 'Amazon'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'amazon.example.com'
|
||||
userid: 'hello'
|
||||
password: 'world'
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1'
|
||||
token: 'VeryLongToken'
|
||||
validate_certs: true
|
||||
|
||||
|
||||
- name: Create a new oVirt provider in ManageIQ
|
||||
manageiq_provider:
|
||||
name: 'RHEV'
|
||||
type: 'oVirt'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'rhev01.example.com'
|
||||
userid: 'admin@internal'
|
||||
password: 'password'
|
||||
validate_certs: true
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
metrics:
|
||||
hostname: 'metrics.example.com'
|
||||
path: 'ovirt_engine_history'
|
||||
userid: 'user_id_metrics'
|
||||
password: 'password_metrics'
|
||||
validate_certs: true
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1'
|
||||
username: 'admin'
|
||||
password: 'password'
|
||||
validate_certs: true
|
||||
|
||||
- name: Create a new VMware provider in ManageIQ
|
||||
manageiq_provider:
|
||||
name: 'EngVMware'
|
||||
type: 'VMware'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'vcenter.example.com'
|
||||
host_default_vnc_port_start: 5800
|
||||
host_default_vnc_port_end: 5801
|
||||
userid: 'root'
|
||||
password: 'password'
|
||||
manageiq_connection:
|
||||
url: 'https://127.0.0.1'
|
||||
token: 'VeryLongToken'
|
||||
validate_certs: true
|
||||
|
||||
- name: Create a new Azure provider in ManageIQ
|
||||
manageiq_provider:
|
||||
name: 'EngAzure'
|
||||
type: 'Azure'
|
||||
provider_region: 'northeurope'
|
||||
subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
|
||||
azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'azure.example.com'
|
||||
userid: 'e272bd74-f661-484f-b223-88dd128a4049'
|
||||
password: 'password'
|
||||
manageiq_connection:
|
||||
url: 'https://cf-6af0.rhpds.opentlc.com'
|
||||
username: 'admin'
|
||||
password: 'password'
|
||||
validate_certs: false
|
||||
|
||||
- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
|
||||
manageiq_provider:
|
||||
name: 'EngDirector'
|
||||
type: 'Director'
|
||||
api_version: 'v3'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'director.example.com'
|
||||
userid: 'admin'
|
||||
password: 'password'
|
||||
security_protocol: 'ssl-with-validation'
|
||||
validate_certs: 'true'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
ssh_keypair:
|
||||
hostname: director.example.com
|
||||
userid: heat-admin
|
||||
auth_key: 'SecretSSHPrivateKey'
|
||||
|
||||
- name: Create a new OpenStack provider in ManageIQ with amqp metrics
|
||||
manageiq_provider:
|
||||
name: 'EngOpenStack'
|
||||
type: 'OpenStack'
|
||||
api_version: 'v3'
|
||||
state: 'present'
|
||||
provider_region: 'europe'
|
||||
tenant_mapping_enabled: 'False'
|
||||
keystone_v3_domain_id: 'mydomain'
|
||||
provider:
|
||||
hostname: 'openstack.example.com'
|
||||
userid: 'admin'
|
||||
password: 'password'
|
||||
security_protocol: 'ssl-with-validation'
|
||||
validate_certs: 'true'
|
||||
certificate_authority: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
|
||||
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
|
||||
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
|
||||
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
|
||||
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
|
||||
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
|
||||
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
|
||||
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
|
||||
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
|
||||
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
|
||||
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
|
||||
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
|
||||
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
|
||||
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
|
||||
-----END CERTIFICATE-----
|
||||
metrics:
|
||||
role: amqp
|
||||
hostname: 'amqp.example.com'
|
||||
security_protocol: 'non-ssl'
|
||||
port: 5666
|
||||
userid: admin
|
||||
password: password
|
||||
|
||||
|
||||
- name: Create a new GCE provider in ManageIQ
|
||||
manageiq_provider:
|
||||
name: 'EngGoogle'
|
||||
type: 'GCE'
|
||||
provider_region: 'europe-west1'
|
||||
project: 'project1'
|
||||
state: 'present'
|
||||
provider:
|
||||
hostname: 'gce.example.com'
|
||||
auth_key: 'google_json_key'
|
||||
validate_certs: 'false'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
def supported_providers():
|
||||
return dict(
|
||||
Openshift=dict(
|
||||
class_name='ManageIQ::Providers::Openshift::ContainerManager',
|
||||
authtype='bearer',
|
||||
default_role='default',
|
||||
metrics_role='prometheus',
|
||||
alerts_role='prometheus_alerts',
|
||||
),
|
||||
Amazon=dict(
|
||||
class_name='ManageIQ::Providers::Amazon::CloudManager',
|
||||
),
|
||||
oVirt=dict(
|
||||
class_name='ManageIQ::Providers::Redhat::InfraManager',
|
||||
default_role='default',
|
||||
metrics_role='metrics',
|
||||
),
|
||||
VMware=dict(
|
||||
class_name='ManageIQ::Providers::Vmware::InfraManager',
|
||||
),
|
||||
Azure=dict(
|
||||
class_name='ManageIQ::Providers::Azure::CloudManager',
|
||||
),
|
||||
Director=dict(
|
||||
class_name='ManageIQ::Providers::Openstack::InfraManager',
|
||||
ssh_keypair_role="ssh_keypair"
|
||||
),
|
||||
OpenStack=dict(
|
||||
class_name='ManageIQ::Providers::Openstack::CloudManager',
|
||||
),
|
||||
GCE=dict(
|
||||
class_name='ManageIQ::Providers::Google::CloudManager',
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def endpoint_list_spec():
|
||||
return dict(
|
||||
provider=dict(type='dict', options=endpoint_argument_spec()),
|
||||
metrics=dict(type='dict', options=endpoint_argument_spec()),
|
||||
alerts=dict(type='dict', options=endpoint_argument_spec()),
|
||||
ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
|
||||
)
|
||||
|
||||
|
||||
def endpoint_argument_spec():
|
||||
return dict(
|
||||
role=dict(),
|
||||
hostname=dict(required=True),
|
||||
port=dict(type='int'),
|
||||
validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
|
||||
certificate_authority=dict(),
|
||||
security_protocol=dict(
|
||||
choices=[
|
||||
'ssl-with-validation',
|
||||
'ssl-with-validation-custom-ca',
|
||||
'ssl-without-validation',
|
||||
'non-ssl',
|
||||
],
|
||||
),
|
||||
userid=dict(),
|
||||
password=dict(no_log=True),
|
||||
auth_key=dict(no_log=True),
|
||||
subscription=dict(no_log=True),
|
||||
project=dict(),
|
||||
uid_ems=dict(),
|
||||
path=dict(),
|
||||
)
|
||||
|
||||
|
||||
def delete_nulls(h):
|
||||
""" Remove null entries from a hash
|
||||
|
||||
Returns:
|
||||
a hash without nulls
|
||||
"""
|
||||
if isinstance(h, list):
|
||||
return map(delete_nulls, h)
|
||||
if isinstance(h, dict):
|
||||
return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
|
||||
|
||||
return h
|
||||
|
||||
|
||||
class ManageIQProvider(object):
|
||||
"""
|
||||
Object to execute provider management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
def class_name_to_type(self, class_name):
|
||||
""" Convert class_name to type
|
||||
|
||||
Returns:
|
||||
the type
|
||||
"""
|
||||
out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
|
||||
if len(out) == 1:
|
||||
return out[0]
|
||||
|
||||
return None
|
||||
|
||||
def zone_id(self, name):
|
||||
""" Search for zone id by zone name.
|
||||
|
||||
Returns:
|
||||
the zone id, or send a module Fail signal if zone not found.
|
||||
"""
|
||||
zone = self.manageiq.find_collection_resource_by('zones', name=name)
|
||||
if not zone: # zone doesn't exist
|
||||
self.module.fail_json(
|
||||
msg="zone %s does not exist in manageiq" % (name))
|
||||
|
||||
return zone['id']
|
||||
|
||||
def provider(self, name):
|
||||
""" Search for provider object by name.
|
||||
|
||||
Returns:
|
||||
the provider, or None if provider not found.
|
||||
"""
|
||||
return self.manageiq.find_collection_resource_by('providers', name=name)
|
||||
|
||||
def build_connection_configurations(self, provider_type, endpoints):
|
||||
""" Build "connection_configurations" objects from
|
||||
requested endpoints provided by user
|
||||
|
||||
Returns:
|
||||
the user requested provider endpoints list
|
||||
"""
|
||||
connection_configurations = []
|
||||
endpoint_keys = endpoint_list_spec().keys()
|
||||
provider_defaults = supported_providers().get(provider_type, {})
|
||||
|
||||
# get endpoint defaults
|
||||
endpoint = endpoints.get('provider')
|
||||
default_auth_key = endpoint.get('auth_key')
|
||||
|
||||
# build a connection_configuration object for each endpoint
|
||||
for endpoint_key in endpoint_keys:
|
||||
endpoint = endpoints.get(endpoint_key)
|
||||
if endpoint:
|
||||
# get role and authtype
|
||||
role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
|
||||
if role == 'default':
|
||||
authtype = provider_defaults.get('authtype') or role
|
||||
else:
|
||||
authtype = role
|
||||
|
||||
# set a connection_configuration
|
||||
connection_configurations.append({
|
||||
'endpoint': {
|
||||
'role': role,
|
||||
'hostname': endpoint.get('hostname'),
|
||||
'port': endpoint.get('port'),
|
||||
'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
|
||||
'security_protocol': endpoint.get('security_protocol'),
|
||||
'certificate_authority': endpoint.get('certificate_authority'),
|
||||
'path': endpoint.get('path'),
|
||||
},
|
||||
'authentication': {
|
||||
'authtype': authtype,
|
||||
'userid': endpoint.get('userid'),
|
||||
'password': endpoint.get('password'),
|
||||
'auth_key': endpoint.get('auth_key') or default_auth_key,
|
||||
}
|
||||
})
|
||||
|
||||
return connection_configurations
|
||||
|
||||
def delete_provider(self, provider):
|
||||
""" Deletes a provider from manageiq.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
try:
|
||||
url = '%s/providers/%s' % (self.api_url, provider['id'])
|
||||
result = self.client.post(url, action='delete')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
|
||||
|
||||
return dict(changed=True, msg=result['message'])
|
||||
|
||||
def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
|
||||
host_default_vnc_port_start, host_default_vnc_port_end,
|
||||
subscription, project, uid_ems, tenant_mapping_enabled, api_version):
|
||||
""" Edit a provider from manageiq.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
url = '%s/providers/%s' % (self.api_url, provider['id'])
|
||||
|
||||
resource = dict(
|
||||
name=name,
|
||||
zone={'id': zone_id},
|
||||
provider_region=provider_region,
|
||||
connection_configurations=endpoints,
|
||||
host_default_vnc_port_start=host_default_vnc_port_start,
|
||||
host_default_vnc_port_end=host_default_vnc_port_end,
|
||||
subscription=subscription,
|
||||
project=project,
|
||||
uid_ems=uid_ems,
|
||||
tenant_mapping_enabled=tenant_mapping_enabled,
|
||||
api_version=api_version,
|
||||
)
|
||||
|
||||
# NOTE: we do not check for diff's between requested and current
|
||||
# provider, we always submit endpoints with password or auth_keys,
|
||||
# since we can not compare with current password or auth_key,
|
||||
# every edit request is sent to ManageIQ API without comparing
|
||||
# it to current state.
|
||||
|
||||
# clean nulls, we do not send nulls to the api
|
||||
resource = delete_nulls(resource)
|
||||
|
||||
# try to update provider
|
||||
try:
|
||||
result = self.client.post(url, action='edit', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully updated the provider %s: %s" % (provider['name'], result))
|
||||
|
||||
def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
|
||||
host_default_vnc_port_start, host_default_vnc_port_end,
|
||||
subscription, project, uid_ems, tenant_mapping_enabled, api_version):
|
||||
""" Creates the provider in manageiq.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
resource = dict(
|
||||
name=name,
|
||||
zone={'id': zone_id},
|
||||
provider_region=provider_region,
|
||||
host_default_vnc_port_start=host_default_vnc_port_start,
|
||||
host_default_vnc_port_end=host_default_vnc_port_end,
|
||||
subscription=subscription,
|
||||
project=project,
|
||||
uid_ems=uid_ems,
|
||||
tenant_mapping_enabled=tenant_mapping_enabled,
|
||||
api_version=api_version,
|
||||
connection_configurations=endpoints,
|
||||
)
|
||||
|
||||
# clean nulls, we do not send nulls to the api
|
||||
resource = delete_nulls(resource)
|
||||
|
||||
# try to create a new provider
|
||||
try:
|
||||
url = '%s/providers' % (self.api_url)
|
||||
result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully created the provider %s: %s" % (name, result['results']))
|
||||
|
||||
def refresh(self, provider, name):
|
||||
""" Trigger provider refresh.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
try:
|
||||
url = '%s/providers/%s' % (self.api_url, provider['id'])
|
||||
result = self.client.post(url, action='refresh')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="refreshing provider %s" % name)
|
||||
|
||||
|
||||
def main():
|
||||
zone_id = None
|
||||
endpoints = []
|
||||
argument_spec = dict(
|
||||
state=dict(choices=['absent', 'present', 'refresh'], default='present'),
|
||||
name=dict(required=True),
|
||||
zone=dict(default='default'),
|
||||
provider_region=dict(),
|
||||
host_default_vnc_port_start=dict(),
|
||||
host_default_vnc_port_end=dict(),
|
||||
subscription=dict(),
|
||||
project=dict(),
|
||||
azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
|
||||
tenant_mapping_enabled=dict(default=False, type='bool'),
|
||||
api_version=dict(choices=['v2', 'v3']),
|
||||
type=dict(choices=supported_providers().keys()),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
# add the endpoint arguments to the arguments
|
||||
argument_spec.update(endpoint_list_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['provider']),
|
||||
('state', 'refresh', ['name'])],
|
||||
required_together=[
|
||||
['host_default_vnc_port_start', 'host_default_vnc_port_end']
|
||||
],
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
zone_name = module.params['zone']
|
||||
provider_type = module.params['type']
|
||||
raw_endpoints = module.params
|
||||
provider_region = module.params['provider_region']
|
||||
host_default_vnc_port_start = module.params['host_default_vnc_port_start']
|
||||
host_default_vnc_port_end = module.params['host_default_vnc_port_end']
|
||||
subscription = module.params['subscription']
|
||||
uid_ems = module.params['azure_tenant_id']
|
||||
project = module.params['project']
|
||||
tenant_mapping_enabled = module.params['tenant_mapping_enabled']
|
||||
api_version = module.params['api_version']
|
||||
state = module.params['state']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_provider = ManageIQProvider(manageiq)
|
||||
|
||||
provider = manageiq_provider.provider(name)
|
||||
|
||||
# provider should not exist
|
||||
if state == "absent":
|
||||
# if we have a provider, delete it
|
||||
if provider:
|
||||
res_args = manageiq_provider.delete_provider(provider)
|
||||
# if we do not have a provider, nothing to do
|
||||
else:
|
||||
res_args = dict(
|
||||
changed=False,
|
||||
msg="provider %s: does not exist in manageiq" % (name))
|
||||
|
||||
# provider should exist
|
||||
if state == "present":
|
||||
# get data user did not explicitly give
|
||||
if zone_name:
|
||||
zone_id = manageiq_provider.zone_id(zone_name)
|
||||
|
||||
# if we do not have a provider_type, use the current provider_type
|
||||
if provider and not provider_type:
|
||||
provider_type = manageiq_provider.class_name_to_type(provider['type'])
|
||||
|
||||
# check supported_providers types
|
||||
if not provider_type:
|
||||
manageiq_provider.module.fail_json(
|
||||
msg="missing required argument: provider_type")
|
||||
|
||||
# check supported_providers types
|
||||
if provider_type not in supported_providers().keys():
|
||||
manageiq_provider.module.fail_json(
|
||||
msg="provider_type %s is not supported" % (provider_type))
|
||||
|
||||
# build "connection_configurations" objects from user requested endpoints
|
||||
# "provider" is a required endpoint, if we have it, we have endpoints
|
||||
if raw_endpoints.get("provider"):
|
||||
endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
|
||||
|
||||
# if we have a provider, edit it
|
||||
if provider:
|
||||
res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
|
||||
host_default_vnc_port_start, host_default_vnc_port_end,
|
||||
subscription, project, uid_ems, tenant_mapping_enabled, api_version)
|
||||
# if we do not have a provider, create it
|
||||
else:
|
||||
res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
|
||||
host_default_vnc_port_start, host_default_vnc_port_end,
|
||||
subscription, project, uid_ems, tenant_mapping_enabled, api_version)
|
||||
|
||||
# refresh provider (trigger sync)
|
||||
if state == "refresh":
|
||||
if provider:
|
||||
res_args = manageiq_provider.refresh(provider, name)
|
||||
else:
|
||||
res_args = dict(
|
||||
changed=False,
|
||||
msg="provider %s: does not exist in manageiq" % (name))
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
292
plugins/modules/remote_management/manageiq/manageiq_tags.py
Normal file
292
plugins/modules/remote_management/manageiq/manageiq_tags.py
Normal file
@@ -0,0 +1,292 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
|
||||
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_tags
|
||||
|
||||
short_description: Management of resource tags in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Daniel Korn (@dkorn)
|
||||
description:
|
||||
- The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - tags should not exist,
|
||||
- present - tags should exist,
|
||||
- list - list current tags.
|
||||
choices: ['absent', 'present', 'list']
|
||||
default: 'present'
|
||||
tags:
|
||||
description:
|
||||
- tags - list of dictionaries, each includes 'name' and 'category' keys.
|
||||
- required if state is present or absent.
|
||||
resource_type:
|
||||
description:
|
||||
- the relevant resource type in manageiq
|
||||
required: true
|
||||
choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
|
||||
'data store', 'group', 'resource pool', 'service', 'service template',
|
||||
'template', 'tenant', 'user']
|
||||
resource_name:
|
||||
description:
|
||||
- the relevant resource name in manageiq
|
||||
required: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create new tags for a provider in ManageIQ
|
||||
manageiq_tags:
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
tags:
|
||||
- category: environment
|
||||
name: prod
|
||||
- category: owner
|
||||
name: prod_ops
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Remove tags for a provider in ManageIQ
|
||||
manageiq_tags:
|
||||
state: absent
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
tags:
|
||||
- category: environment
|
||||
name: prod
|
||||
- category: owner
|
||||
name: prod_ops
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: List current tags for a provider in ManageIQ
|
||||
manageiq_tags:
|
||||
state: list
|
||||
resource_name: 'EngLab'
|
||||
resource_type: 'provider'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
|
||||
|
||||
|
||||
def query_resource_id(manageiq, resource_type, resource_name):
|
||||
""" Query the resource name in ManageIQ.
|
||||
|
||||
Returns:
|
||||
the resource id if it exists in manageiq, Fail otherwise.
|
||||
"""
|
||||
resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
|
||||
if resource:
|
||||
return resource["id"]
|
||||
else:
|
||||
msg = "{resource_name} {resource_type} does not exist in manageiq".format(
|
||||
resource_name=resource_name, resource_type=resource_type)
|
||||
manageiq.module.fail_json(msg=msg)
|
||||
|
||||
|
||||
class ManageIQTags(object):
|
||||
"""
|
||||
Object to execute tags management operations of manageiq resources.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq, resource_type, resource_id):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
self.resource_type = resource_type
|
||||
self.resource_id = resource_id
|
||||
self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
|
||||
api_url=self.api_url,
|
||||
resource_type=resource_type,
|
||||
resource_id=resource_id)
|
||||
|
||||
def full_tag_name(self, tag):
|
||||
""" Returns the full tag name in manageiq
|
||||
"""
|
||||
return '/managed/{tag_category}/{tag_name}'.format(
|
||||
tag_category=tag['category'],
|
||||
tag_name=tag['name'])
|
||||
|
||||
def clean_tag_object(self, tag):
|
||||
""" Clean a tag object to have human readable form of:
|
||||
{
|
||||
full_name: STR,
|
||||
name: STR,
|
||||
display_name: STR,
|
||||
category: STR
|
||||
}
|
||||
"""
|
||||
full_name = tag.get('name')
|
||||
categorization = tag.get('categorization', {})
|
||||
|
||||
return dict(
|
||||
full_name=full_name,
|
||||
name=categorization.get('name'),
|
||||
display_name=categorization.get('display_name'),
|
||||
category=categorization.get('category', {}).get('name'))
|
||||
|
||||
def query_resource_tags(self):
|
||||
""" Returns a set of the tag objects assigned to the resource
|
||||
"""
|
||||
url = '{resource_url}/tags?expand=resources&attributes=categorization'
|
||||
try:
|
||||
response = self.client.get(url.format(resource_url=self.resource_url))
|
||||
except Exception as e:
|
||||
msg = "Failed to query {resource_type} tags: {error}".format(
|
||||
resource_type=self.resource_type,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
resources = response.get('resources', [])
|
||||
|
||||
# clean the returned rest api tag object to look like:
|
||||
# {full_name: STR, name: STR, display_name: STR, category: STR}
|
||||
tags = [self.clean_tag_object(tag) for tag in resources]
|
||||
|
||||
return tags
|
||||
|
||||
def tags_to_update(self, tags, action):
|
||||
""" Create a list of tags we need to update in ManageIQ.
|
||||
|
||||
Returns:
|
||||
Whether or not a change took place and a message describing the
|
||||
operation executed.
|
||||
"""
|
||||
tags_to_post = []
|
||||
assigned_tags = self.query_resource_tags()
|
||||
|
||||
# make a list of assigned full tag names strings
|
||||
# e.g. ['/managed/environment/prod', ...]
|
||||
assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
|
||||
|
||||
for tag in tags:
|
||||
assigned = self.full_tag_name(tag) in assigned_tags_set
|
||||
|
||||
if assigned and action == 'unassign':
|
||||
tags_to_post.append(tag)
|
||||
elif (not assigned) and action == 'assign':
|
||||
tags_to_post.append(tag)
|
||||
|
||||
return tags_to_post
|
||||
|
||||
def assign_or_unassign_tags(self, tags, action):
|
||||
""" Perform assign/unassign action
|
||||
"""
|
||||
# get a list of tags needed to be changed
|
||||
tags_to_post = self.tags_to_update(tags, action)
|
||||
if not tags_to_post:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="Tags already {action}ed, nothing to do".format(action=action))
|
||||
|
||||
# try to assign or unassign tags to resource
|
||||
url = '{resource_url}/tags'.format(resource_url=self.resource_url)
|
||||
try:
|
||||
response = self.client.post(url, action=action, resources=tags)
|
||||
except Exception as e:
|
||||
msg = "Failed to {action} tag: {error}".format(
|
||||
action=action,
|
||||
error=e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# check all entities in result to be successful
|
||||
for result in response['results']:
|
||||
if not result['success']:
|
||||
msg = "Failed to {action}: {message}".format(
|
||||
action=action,
|
||||
message=result['message'])
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
# successfully changed all needed tags
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="Successfully {action}ed tags".format(action=action))
|
||||
|
||||
|
||||
def main():
|
||||
actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
|
||||
argument_spec = dict(
|
||||
tags=dict(type='list'),
|
||||
resource_name=dict(required=True, type='str'),
|
||||
resource_type=dict(required=True, type='str',
|
||||
choices=manageiq_entities().keys()),
|
||||
state=dict(required=False, type='str',
|
||||
choices=['present', 'absent', 'list'], default='present'),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['tags']),
|
||||
('state', 'absent', ['tags'])
|
||||
],
|
||||
)
|
||||
|
||||
tags = module.params['tags']
|
||||
resource_type_key = module.params['resource_type']
|
||||
resource_name = module.params['resource_name']
|
||||
state = module.params['state']
|
||||
|
||||
# get the action and resource type
|
||||
action = actions[state]
|
||||
resource_type = manageiq_entities()[resource_type_key]
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
|
||||
# query resource id, fail if resource does not exist
|
||||
resource_id = query_resource_id(manageiq, resource_type, resource_name)
|
||||
|
||||
manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
|
||||
|
||||
if action == 'list':
|
||||
# return a list of current tags for this object
|
||||
current_tags = manageiq_tags.query_resource_tags()
|
||||
res_args = dict(changed=False, tags=current_tags)
|
||||
else:
|
||||
# assign or unassign the tags
|
||||
res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
555
plugins/modules/remote_management/manageiq/manageiq_tenant.py
Normal file
555
plugins/modules/remote_management/manageiq/manageiq_tenant.py
Normal file
@@ -0,0 +1,555 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_tenant
|
||||
|
||||
short_description: Management of tenants in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Evert Mulder (@evertmulder)
|
||||
description:
|
||||
- The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
|
||||
requirements:
|
||||
- manageiq-client
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - tenant should not exist, present - tenant should be.
|
||||
choices: ['absent', 'present']
|
||||
default: 'present'
|
||||
name:
|
||||
description:
|
||||
- The tenant name.
|
||||
required: true
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- The tenant description.
|
||||
required: true
|
||||
default: null
|
||||
parent_id:
|
||||
description:
|
||||
- The id of the parent tenant. If not supplied the root tenant is used.
|
||||
- The C(parent_id) takes president over C(parent) when supplied
|
||||
required: false
|
||||
default: null
|
||||
parent:
|
||||
description:
|
||||
- The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
|
||||
required: false
|
||||
default: null
|
||||
quotas:
|
||||
description:
|
||||
- The tenant quotas.
|
||||
- All parameters case sensitive.
|
||||
- 'Valid attributes are:'
|
||||
- ' - C(cpu_allocated) (int): use null to remove the quota.'
|
||||
- ' - C(mem_allocated) (GB): use null to remove the quota.'
|
||||
- ' - C(storage_allocated) (GB): use null to remove the quota.'
|
||||
- ' - C(vms_allocated) (int): use null to remove the quota.'
|
||||
- ' - C(templates_allocated) (int): use null to remove the quota.'
|
||||
required: false
|
||||
default: null
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Update the root tenant in ManageIQ
|
||||
manageiq_tenant:
|
||||
name: 'My Company'
|
||||
description: 'My company name'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Create a tenant in ManageIQ
|
||||
manageiq_tenant:
|
||||
name: 'Dep1'
|
||||
description: 'Manufacturing department'
|
||||
parent_id: 1
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete a tenant in ManageIQ
|
||||
manageiq_tenant:
|
||||
state: 'absent'
|
||||
name: 'Dep1'
|
||||
parent_id: 1
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
|
||||
manageiq_tenant:
|
||||
name: 'Dep1'
|
||||
parent_id: 1
|
||||
quotas:
|
||||
- cpu_allocated: 100
|
||||
- mem_allocated: 50
|
||||
- vms_allocated: null
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
|
||||
- name: Delete a tenant in ManageIQ using a token
|
||||
manageiq_tenant:
|
||||
state: 'absent'
|
||||
name: 'Dep1'
|
||||
parent_id: 1
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
token: 'sometoken'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
tenant:
|
||||
description: The tenant.
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: The tenant id
|
||||
returned: success
|
||||
type: int
|
||||
name:
|
||||
description: The tenant name
|
||||
returned: success
|
||||
type: str
|
||||
description:
|
||||
description: The tenant description
|
||||
returned: success
|
||||
type: str
|
||||
parent_id:
|
||||
description: The id of the parent tenant
|
||||
returned: success
|
||||
type: int
|
||||
quotas:
|
||||
description: List of tenant quotas
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
cpu_allocated: 100
|
||||
mem_allocated: 50
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
class ManageIQTenant(object):
|
||||
"""
|
||||
Object to execute tenant management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
def tenant(self, name, parent_id, parent):
|
||||
""" Search for tenant object by name and parent_id or parent
|
||||
or the root tenant if no parent or parent_id is supplied.
|
||||
Returns:
|
||||
the parent tenant, None for the root tenant
|
||||
the tenant or None if tenant was not found.
|
||||
"""
|
||||
|
||||
if parent_id:
|
||||
parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
|
||||
if not parent_tenant_res:
|
||||
self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
|
||||
parent_tenant = parent_tenant_res[0]
|
||||
tenants = self.client.collections.tenants.find_by(name=name)
|
||||
|
||||
for tenant in tenants:
|
||||
try:
|
||||
ancestry = tenant['ancestry']
|
||||
except AttributeError:
|
||||
ancestry = None
|
||||
|
||||
if ancestry:
|
||||
tenant_parent_id = int(ancestry.split("/")[-1])
|
||||
if int(tenant_parent_id) == parent_id:
|
||||
return parent_tenant, tenant
|
||||
|
||||
return parent_tenant, None
|
||||
else:
|
||||
if parent:
|
||||
parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
|
||||
if not parent_tenant_res:
|
||||
self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
|
||||
|
||||
if len(parent_tenant_res) > 1:
|
||||
self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
|
||||
|
||||
parent_tenant = parent_tenant_res[0]
|
||||
parent_id = int(parent_tenant['id'])
|
||||
tenants = self.client.collections.tenants.find_by(name=name)
|
||||
|
||||
for tenant in tenants:
|
||||
try:
|
||||
ancestry = tenant['ancestry']
|
||||
except AttributeError:
|
||||
ancestry = None
|
||||
|
||||
if ancestry:
|
||||
tenant_parent_id = int(ancestry.split("/")[-1])
|
||||
if tenant_parent_id == parent_id:
|
||||
return parent_tenant, tenant
|
||||
|
||||
return parent_tenant, None
|
||||
else:
|
||||
# No parent or parent id supplied we select the root tenant
|
||||
return None, self.client.collections.tenants.find_by(ancestry=None)[0]
|
||||
|
||||
def compare_tenant(self, tenant, name, description):
|
||||
""" Compare tenant fields with new field values.
|
||||
|
||||
Returns:
|
||||
false if tenant fields have some difference from new fields, true o/w.
|
||||
"""
|
||||
found_difference = (
|
||||
(name and tenant['name'] != name) or
|
||||
(description and tenant['description'] != description)
|
||||
)
|
||||
|
||||
return not found_difference
|
||||
|
||||
def delete_tenant(self, tenant):
|
||||
""" Deletes a tenant from manageiq.
|
||||
|
||||
Returns:
|
||||
dict with `msg` and `changed`
|
||||
"""
|
||||
try:
|
||||
url = '%s/tenants/%s' % (self.api_url, tenant['id'])
|
||||
result = self.client.post(url, action='delete')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
|
||||
|
||||
if result['success'] is False:
|
||||
self.module.fail_json(msg=result['message'])
|
||||
|
||||
return dict(changed=True, msg=result['message'])
|
||||
|
||||
def edit_tenant(self, tenant, name, description):
|
||||
""" Edit a manageiq tenant.
|
||||
|
||||
Returns:
|
||||
dict with `msg` and `changed`
|
||||
"""
|
||||
resource = dict(name=name, description=description, use_config_for_attributes=False)
|
||||
|
||||
# check if we need to update ( compare_tenant is true is no difference found )
|
||||
if self.compare_tenant(tenant, name, description):
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="tenant %s is not changed." % tenant['name'],
|
||||
tenant=tenant['_data'])
|
||||
|
||||
# try to update tenant
|
||||
try:
|
||||
result = self.client.post(tenant['href'], action='edit', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully updated the tenant with id %s" % (tenant['id']))
|
||||
|
||||
def create_tenant(self, name, description, parent_tenant):
|
||||
""" Creates the tenant in manageiq.
|
||||
|
||||
Returns:
|
||||
dict with `msg`, `changed` and `tenant_id`
|
||||
"""
|
||||
parent_id = parent_tenant['id']
|
||||
# check for required arguments
|
||||
for key, value in dict(name=name, description=description, parent_id=parent_id).items():
|
||||
if value in (None, ''):
|
||||
self.module.fail_json(msg="missing required argument: %s" % key)
|
||||
|
||||
url = '%s/tenants' % self.api_url
|
||||
|
||||
resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
|
||||
|
||||
try:
|
||||
result = self.client.post(url, action='create', resource=resource)
|
||||
tenant_id = result['results'][0]['id']
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
|
||||
tenant_id=tenant_id)
|
||||
|
||||
def tenant_quota(self, tenant, quota_key):
|
||||
""" Search for tenant quota object by tenant and quota_key.
|
||||
Returns:
|
||||
the quota for the tenant, or None if the tenant quota was not found.
|
||||
"""
|
||||
|
||||
tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
|
||||
|
||||
return tenant_quotas['resources']
|
||||
|
||||
def tenant_quotas(self, tenant):
|
||||
""" Search for tenant quotas object by tenant.
|
||||
Returns:
|
||||
the quotas for the tenant, or None if no tenant quotas were not found.
|
||||
"""
|
||||
|
||||
tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
|
||||
|
||||
return tenant_quotas['resources']
|
||||
|
||||
def update_tenant_quotas(self, tenant, quotas):
|
||||
""" Creates the tenant quotas in manageiq.
|
||||
|
||||
Returns:
|
||||
dict with `msg` and `changed`
|
||||
"""
|
||||
|
||||
changed = False
|
||||
messages = []
|
||||
for quota_key, quota_value in quotas.items():
|
||||
current_quota_filtered = self.tenant_quota(tenant, quota_key)
|
||||
if current_quota_filtered:
|
||||
current_quota = current_quota_filtered[0]
|
||||
else:
|
||||
current_quota = None
|
||||
|
||||
if quota_value:
|
||||
# Change the byte values to GB
|
||||
if quota_key in ['storage_allocated', 'mem_allocated']:
|
||||
quota_value_int = int(quota_value) * 1024 * 1024 * 1024
|
||||
else:
|
||||
quota_value_int = int(quota_value)
|
||||
if current_quota:
|
||||
res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
|
||||
else:
|
||||
res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
|
||||
else:
|
||||
if current_quota:
|
||||
res = self.delete_tenant_quota(tenant, current_quota)
|
||||
else:
|
||||
res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
|
||||
|
||||
if res['changed']:
|
||||
changed = True
|
||||
|
||||
messages.append(res['msg'])
|
||||
|
||||
return dict(
|
||||
changed=changed,
|
||||
msg=', '.join(messages))
|
||||
|
||||
def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
|
||||
""" Update the tenant quotas in manageiq.
|
||||
|
||||
Returns:
|
||||
result
|
||||
"""
|
||||
|
||||
if current_quota['value'] == quota_value:
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="tenant quota %s already has value %s" % (quota_key, quota_value))
|
||||
else:
|
||||
|
||||
url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
|
||||
resource = {'value': quota_value}
|
||||
try:
|
||||
self.client.post(url, action='edit', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully updated tenant quota %s" % quota_key)
|
||||
|
||||
def create_tenant_quota(self, tenant, quota_key, quota_value):
|
||||
""" Creates the tenant quotas in manageiq.
|
||||
|
||||
Returns:
|
||||
result
|
||||
"""
|
||||
url = '%s/quotas' % (tenant['href'])
|
||||
resource = {'name': quota_key, 'value': quota_value}
|
||||
try:
|
||||
self.client.post(url, action='create', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully created tenant quota %s" % quota_key)
|
||||
|
||||
def delete_tenant_quota(self, tenant, quota):
|
||||
""" deletes the tenant quotas in manageiq.
|
||||
|
||||
Returns:
|
||||
result
|
||||
"""
|
||||
try:
|
||||
result = self.client.post(quota['href'], action='delete')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
|
||||
|
||||
return dict(changed=True, msg=result['message'])
|
||||
|
||||
def create_tenant_response(self, tenant, parent_tenant):
|
||||
""" Creates the ansible result object from a manageiq tenant entity
|
||||
|
||||
Returns:
|
||||
a dict with the tenant id, name, description, parent id,
|
||||
quota's
|
||||
"""
|
||||
tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
|
||||
|
||||
try:
|
||||
ancestry = tenant['ancestry']
|
||||
tenant_parent_id = ancestry.split("/")[-1]
|
||||
except AttributeError:
|
||||
# The root tenant does not return the ancestry attribute
|
||||
tenant_parent_id = None
|
||||
|
||||
return dict(
|
||||
id=tenant['id'],
|
||||
name=tenant['name'],
|
||||
description=tenant['description'],
|
||||
parent_id=tenant_parent_id,
|
||||
quotas=tenant_quotas
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_tenant_quotas_response(tenant_quotas):
|
||||
""" Creates the ansible result object from a manageiq tenant_quotas entity
|
||||
|
||||
Returns:
|
||||
a dict with the applied quotas, name and value
|
||||
"""
|
||||
|
||||
if not tenant_quotas:
|
||||
return {}
|
||||
|
||||
result = {}
|
||||
for quota in tenant_quotas:
|
||||
if quota['unit'] == 'bytes':
|
||||
value = float(quota['value']) / (1024 * 1024 * 1024)
|
||||
else:
|
||||
value = quota['value']
|
||||
result[quota['name']] = value
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(required=True, type='str'),
|
||||
description=dict(required=True, type='str'),
|
||||
parent_id=dict(required=False, type='int'),
|
||||
parent=dict(required=False, type='str'),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
quotas=dict(type='dict', default={})
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
description = module.params['description']
|
||||
parent_id = module.params['parent_id']
|
||||
parent = module.params['parent']
|
||||
state = module.params['state']
|
||||
quotas = module.params['quotas']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_tenant = ManageIQTenant(manageiq)
|
||||
|
||||
parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
|
||||
|
||||
# tenant should not exist
|
||||
if state == "absent":
|
||||
# if we have a tenant, delete it
|
||||
if tenant:
|
||||
res_args = manageiq_tenant.delete_tenant(tenant)
|
||||
# if we do not have a tenant, nothing to do
|
||||
else:
|
||||
if parent_id:
|
||||
msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
|
||||
else:
|
||||
msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
|
||||
|
||||
res_args = dict(
|
||||
changed=False,
|
||||
msg=msg)
|
||||
|
||||
# tenant should exist
|
||||
if state == "present":
|
||||
# if we have a tenant, edit it
|
||||
if tenant:
|
||||
res_args = manageiq_tenant.edit_tenant(tenant, name, description)
|
||||
|
||||
# if we do not have a tenant, create it
|
||||
else:
|
||||
res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
|
||||
tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
|
||||
|
||||
# quotas as supplied and we have a tenant
|
||||
if quotas:
|
||||
tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
|
||||
if tenant_quotas_res['changed']:
|
||||
res_args['changed'] = True
|
||||
res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
|
||||
|
||||
tenant.reload(expand='resources', attributes=['tenant_quotas'])
|
||||
res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
329
plugins/modules/remote_management/manageiq/manageiq_user.py
Normal file
329
plugins/modules/remote_management/manageiq/manageiq_user.py
Normal file
@@ -0,0 +1,329 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
||||
module: manageiq_user
|
||||
|
||||
short_description: Management of users in ManageIQ.
|
||||
extends_documentation_fragment:
|
||||
- community.general.manageiq
|
||||
|
||||
author: Daniel Korn (@dkorn)
|
||||
description:
|
||||
- The manageiq_user module supports adding, updating and deleting users in ManageIQ.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- absent - user should not exist, present - user should be.
|
||||
choices: ['absent', 'present']
|
||||
default: 'present'
|
||||
userid:
|
||||
description:
|
||||
- The unique userid in manageiq, often mentioned as username.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The users' full name.
|
||||
password:
|
||||
description:
|
||||
- The users' password.
|
||||
group:
|
||||
description:
|
||||
- The name of the group to which the user belongs.
|
||||
email:
|
||||
description:
|
||||
- The users' E-mail address.
|
||||
update_password:
|
||||
default: always
|
||||
choices: ['always', 'on_create']
|
||||
description:
|
||||
- C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a new user in ManageIQ
|
||||
manageiq_user:
|
||||
userid: 'jdoe'
|
||||
name: 'Jane Doe'
|
||||
password: 'VerySecret'
|
||||
group: 'EvmGroup-user'
|
||||
email: 'jdoe@example.com'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Create a new user in ManageIQ using a token
|
||||
manageiq_user:
|
||||
userid: 'jdoe'
|
||||
name: 'Jane Doe'
|
||||
password: 'VerySecret'
|
||||
group: 'EvmGroup-user'
|
||||
email: 'jdoe@example.com'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
token: 'sometoken'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete a user in ManageIQ
|
||||
manageiq_user:
|
||||
state: 'absent'
|
||||
userid: 'jdoe'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Delete a user in ManageIQ using a token
|
||||
manageiq_user:
|
||||
state: 'absent'
|
||||
userid: 'jdoe'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
token: 'sometoken'
|
||||
validate_certs: False
|
||||
|
||||
- name: Update email of user in ManageIQ
|
||||
manageiq_user:
|
||||
userid: 'jdoe'
|
||||
email: 'jaustine@example.com'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
username: 'admin'
|
||||
password: 'smartvm'
|
||||
validate_certs: False
|
||||
|
||||
- name: Update email of user in ManageIQ using a token
|
||||
manageiq_user:
|
||||
userid: 'jdoe'
|
||||
email: 'jaustine@example.com'
|
||||
manageiq_connection:
|
||||
url: 'http://127.0.0.1:3000'
|
||||
token: 'sometoken'
|
||||
validate_certs: False
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
|
||||
|
||||
|
||||
class ManageIQUser(object):
|
||||
"""
|
||||
Object to execute user management operations in manageiq.
|
||||
"""
|
||||
|
||||
def __init__(self, manageiq):
|
||||
self.manageiq = manageiq
|
||||
|
||||
self.module = self.manageiq.module
|
||||
self.api_url = self.manageiq.api_url
|
||||
self.client = self.manageiq.client
|
||||
|
||||
def group_id(self, description):
|
||||
""" Search for group id by group description.
|
||||
|
||||
Returns:
|
||||
the group id, or send a module Fail signal if group not found.
|
||||
"""
|
||||
group = self.manageiq.find_collection_resource_by('groups', description=description)
|
||||
if not group: # group doesn't exist
|
||||
self.module.fail_json(
|
||||
msg="group %s does not exist in manageiq" % (description))
|
||||
|
||||
return group['id']
|
||||
|
||||
def user(self, userid):
|
||||
""" Search for user object by userid.
|
||||
|
||||
Returns:
|
||||
the user, or None if user not found.
|
||||
"""
|
||||
return self.manageiq.find_collection_resource_by('users', userid=userid)
|
||||
|
||||
def compare_user(self, user, name, group_id, password, email):
|
||||
""" Compare user fields with new field values.
|
||||
|
||||
Returns:
|
||||
false if user fields have some difference from new fields, true o/w.
|
||||
"""
|
||||
found_difference = (
|
||||
(name and user['name'] != name) or
|
||||
(password is not None) or
|
||||
(email and user['email'] != email) or
|
||||
(group_id and user['current_group_id'] != group_id)
|
||||
)
|
||||
|
||||
return not found_difference
|
||||
|
||||
def delete_user(self, user):
|
||||
""" Deletes a user from manageiq.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
try:
|
||||
url = '%s/users/%s' % (self.api_url, user['id'])
|
||||
result = self.client.post(url, action='delete')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
|
||||
|
||||
return dict(changed=True, msg=result['message'])
|
||||
|
||||
def edit_user(self, user, name, group, password, email):
|
||||
""" Edit a user from manageiq.
|
||||
|
||||
Returns:
|
||||
a short message describing the operation executed.
|
||||
"""
|
||||
group_id = None
|
||||
url = '%s/users/%s' % (self.api_url, user['id'])
|
||||
|
||||
resource = dict(userid=user['userid'])
|
||||
if group is not None:
|
||||
group_id = self.group_id(group)
|
||||
resource['group'] = dict(id=group_id)
|
||||
if name is not None:
|
||||
resource['name'] = name
|
||||
if email is not None:
|
||||
resource['email'] = email
|
||||
|
||||
# if there is a password param, but 'update_password' is 'on_create'
|
||||
# then discard the password (since we're editing an existing user)
|
||||
if self.module.params['update_password'] == 'on_create':
|
||||
password = None
|
||||
if password is not None:
|
||||
resource['password'] = password
|
||||
|
||||
# check if we need to update ( compare_user is true is no difference found )
|
||||
if self.compare_user(user, name, group_id, password, email):
|
||||
return dict(
|
||||
changed=False,
|
||||
msg="user %s is not changed." % (user['userid']))
|
||||
|
||||
# try to update user
|
||||
try:
|
||||
result = self.client.post(url, action='edit', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully updated the user %s: %s" % (user['userid'], result))
|
||||
|
||||
def create_user(self, userid, name, group, password, email):
|
||||
""" Creates the user in manageiq.
|
||||
|
||||
Returns:
|
||||
the created user id, name, created_on timestamp,
|
||||
updated_on timestamp, userid and current_group_id.
|
||||
"""
|
||||
# check for required arguments
|
||||
for key, value in dict(name=name, group=group, password=password).items():
|
||||
if value in (None, ''):
|
||||
self.module.fail_json(msg="missing required argument: %s" % (key))
|
||||
|
||||
group_id = self.group_id(group)
|
||||
url = '%s/users' % (self.api_url)
|
||||
|
||||
resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
|
||||
if email is not None:
|
||||
resource['email'] = email
|
||||
|
||||
# try to create a new user
|
||||
try:
|
||||
result = self.client.post(url, action='create', resource=resource)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
|
||||
|
||||
return dict(
|
||||
changed=True,
|
||||
msg="successfully created the user %s: %s" % (userid, result['results']))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
userid=dict(required=True, type='str'),
|
||||
name=dict(),
|
||||
password=dict(no_log=True),
|
||||
group=dict(),
|
||||
email=dict(),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
update_password=dict(choices=['always', 'on_create'],
|
||||
default='always'),
|
||||
)
|
||||
# add the manageiq connection arguments to the arguments
|
||||
argument_spec.update(manageiq_argument_spec())
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
userid = module.params['userid']
|
||||
name = module.params['name']
|
||||
password = module.params['password']
|
||||
group = module.params['group']
|
||||
email = module.params['email']
|
||||
state = module.params['state']
|
||||
|
||||
manageiq = ManageIQ(module)
|
||||
manageiq_user = ManageIQUser(manageiq)
|
||||
|
||||
user = manageiq_user.user(userid)
|
||||
|
||||
# user should not exist
|
||||
if state == "absent":
|
||||
# if we have a user, delete it
|
||||
if user:
|
||||
res_args = manageiq_user.delete_user(user)
|
||||
# if we do not have a user, nothing to do
|
||||
else:
|
||||
res_args = dict(
|
||||
changed=False,
|
||||
msg="user %s: does not exist in manageiq" % (userid))
|
||||
|
||||
# user should exist
|
||||
if state == "present":
|
||||
# if we have a user, edit it
|
||||
if user:
|
||||
res_args = manageiq_user.edit_user(user, name, group, password, email)
|
||||
# if we do not have a user, create it
|
||||
else:
|
||||
res_args = manageiq_user.create_user(userid, name, group, password, email)
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_datacenter_info.py
|
||||
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_datacenter_info
|
||||
short_description: Retrieve information about the OneView Data Centers
|
||||
description:
|
||||
- Retrieve information about the OneView Data Centers.
|
||||
- This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_datacenter_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- "hpOneView >= 2.0.1"
|
||||
author:
|
||||
- Alex Monteiro (@aalexmonteiro)
|
||||
- Madhav Bharadwaj (@madhav-bharadwaj)
|
||||
- Priyanka Sood (@soodpr)
|
||||
- Ricardo Galeno (@ricardogpsf)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Data Center name.
|
||||
options:
|
||||
description:
|
||||
- "Retrieve additional information. Options available: 'visualContent'."
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Data Centers
|
||||
oneview_datacenter_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Data Centers
|
||||
oneview_datacenter_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: 'name:descending'
|
||||
filter: 'state=Unmanaged'
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather information about a Data Center by name
|
||||
oneview_datacenter_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
name: "My Data Center"
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
|
||||
- name: Gather information about the Data Center Visual Content
|
||||
oneview_datacenter_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
name: "My Data Center"
|
||||
options:
|
||||
- visualContent
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.datacenters }}"
|
||||
- debug:
|
||||
msg: "{{ result.datacenter_visual_content }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
datacenters:
|
||||
description: Has all the OneView information about the Data Centers.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
|
||||
datacenter_visual_content:
|
||||
description: Has information about the Data Center Visual Content.
|
||||
returned: When requested, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class DatacenterInfoModule(OneViewModuleBase):
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
options=dict(type='list'),
|
||||
params=dict(type='dict')
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_datacenter_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
client = self.oneview_client.datacenters
|
||||
info = {}
|
||||
|
||||
if self.module.params.get('name'):
|
||||
datacenters = client.get_by('name', self.module.params['name'])
|
||||
|
||||
if self.options and 'visualContent' in self.options:
|
||||
if datacenters:
|
||||
info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
|
||||
else:
|
||||
info['datacenter_visual_content'] = None
|
||||
|
||||
info['datacenters'] = datacenters
|
||||
else:
|
||||
info['datacenters'] = client.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False,
|
||||
ansible_facts=info)
|
||||
else:
|
||||
return dict(changed=False, **info)
|
||||
|
||||
|
||||
def main():
|
||||
DatacenterInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_enclosure_info.py
|
||||
@@ -0,0 +1,228 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_enclosure_info
|
||||
short_description: Retrieve information about one or more Enclosures
|
||||
description:
|
||||
- Retrieve information about one or more of the Enclosures from OneView.
|
||||
- This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_enclosure_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Enclosure name.
|
||||
options:
|
||||
description:
|
||||
- "List with options to gather additional information about an Enclosure and related resources.
|
||||
Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
|
||||
you can provide specific parameters."
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Enclosures
|
||||
oneview_enclosure_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Enclosures
|
||||
oneview_enclosure_info:
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: name:descending
|
||||
filter: status=OK
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather information about an Enclosure by name
|
||||
oneview_enclosure_info:
|
||||
name: Enclosure-Name
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
|
||||
- name: Gather information about an Enclosure by name with options
|
||||
oneview_enclosure_info:
|
||||
name: Test-Enclosure
|
||||
options:
|
||||
- script # optional
|
||||
- environmentalConfiguration # optional
|
||||
- utilization # optional
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
- debug:
|
||||
msg: "{{ result.enclosure_script }}"
|
||||
- debug:
|
||||
msg: "{{ result.enclosure_environmental_configuration }}"
|
||||
- debug:
|
||||
msg: "{{ result.enclosure_utilization }}"
|
||||
|
||||
- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
|
||||
specified dates"
|
||||
oneview_enclosure_info:
|
||||
name: Test-Enclosure
|
||||
options:
|
||||
- utilization: # optional
|
||||
fields: AmbientTemperature
|
||||
filter:
|
||||
- startDate=2016-07-01T14:29:42.000Z
|
||||
- endDate=2017-07-01T03:29:42.000Z
|
||||
view: day
|
||||
refresh: false
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.enclosures }}"
|
||||
- debug:
|
||||
msg: "{{ result.enclosure_utilization }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
enclosures:
|
||||
description: Has all the OneView information about the Enclosures.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
|
||||
enclosure_script:
|
||||
description: Has all the OneView information about the script of an Enclosure.
|
||||
returned: When requested, but can be null.
|
||||
type: str
|
||||
|
||||
enclosure_environmental_configuration:
|
||||
description: Has all the OneView information about the environmental configuration of an Enclosure.
|
||||
returned: When requested, but can be null.
|
||||
type: dict
|
||||
|
||||
enclosure_utilization:
|
||||
description: Has all the OneView information about the utilization of an Enclosure.
|
||||
returned: When requested, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class EnclosureInfoModule(OneViewModuleBase):
|
||||
argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
|
||||
|
||||
def __init__(self):
|
||||
super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_enclosure_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
info = {}
|
||||
|
||||
if self.module.params['name']:
|
||||
enclosures = self._get_by_name(self.module.params['name'])
|
||||
|
||||
if self.options and enclosures:
|
||||
info = self._gather_optional_info(self.options, enclosures[0])
|
||||
else:
|
||||
enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
|
||||
|
||||
info['enclosures'] = enclosures
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False,
|
||||
ansible_facts=info)
|
||||
else:
|
||||
return dict(changed=False, **info)
|
||||
|
||||
def _gather_optional_info(self, options, enclosure):
|
||||
|
||||
enclosure_client = self.oneview_client.enclosures
|
||||
info = {}
|
||||
|
||||
if options.get('script'):
|
||||
info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
|
||||
if options.get('environmentalConfiguration'):
|
||||
env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
|
||||
info['enclosure_environmental_configuration'] = env_config
|
||||
if options.get('utilization'):
|
||||
info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
|
||||
|
||||
return info
|
||||
|
||||
def _get_utilization(self, enclosure, params):
|
||||
fields = view = refresh = filter = ''
|
||||
|
||||
if isinstance(params, dict):
|
||||
fields = params.get('fields')
|
||||
view = params.get('view')
|
||||
refresh = params.get('refresh')
|
||||
filter = params.get('filter')
|
||||
|
||||
return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
|
||||
fields=fields,
|
||||
filter=filter,
|
||||
refresh=refresh,
|
||||
view=view)
|
||||
|
||||
def _get_by_name(self, name):
|
||||
return self.oneview_client.enclosures.get_by('name', name)
|
||||
|
||||
|
||||
def main():
|
||||
EnclosureInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,251 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_ethernet_network
|
||||
short_description: Manage OneView Ethernet Network resources
|
||||
description:
|
||||
- Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
|
||||
requirements:
|
||||
- hpOneView >= 3.1.0
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the Ethernet Network resource.
|
||||
- C(present) will ensure data properties are compliant with OneView.
|
||||
- C(absent) will remove the resource from OneView, if it exists.
|
||||
- C(default_bandwidth_reset) will reset the network connection template to the default.
|
||||
default: present
|
||||
choices: [present, absent, default_bandwidth_reset]
|
||||
data:
|
||||
description:
|
||||
- List with Ethernet Network properties.
|
||||
required: true
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure that the Ethernet Network is present using the default configuration
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
name: 'Test Ethernet Network'
|
||||
vlanId: '201'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Update the Ethernet Network changing bandwidth and purpose
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
name: 'Test Ethernet Network'
|
||||
purpose: Management
|
||||
bandwidth:
|
||||
maximumBandwidth: 3000
|
||||
typicalBandwidth: 2000
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
name: 'Test Ethernet Network'
|
||||
newName: 'Renamed Ethernet Network'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that the Ethernet Network is absent
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: absent
|
||||
data:
|
||||
name: 'New Ethernet Network'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create Ethernet networks in bulk
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
vlanIdRange: '1-10,15,17'
|
||||
purpose: General
|
||||
namePrefix: TestNetwork
|
||||
smartLink: false
|
||||
privateNetwork: false
|
||||
bandwidth:
|
||||
maximumBandwidth: 10000
|
||||
typicalBandwidth: 2000
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Reset to the default network connection template
|
||||
oneview_ethernet_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: default_bandwidth_reset
|
||||
data:
|
||||
name: 'Test Ethernet Network'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ethernet_network:
|
||||
description: Has the facts about the Ethernet Networks.
|
||||
returned: On state 'present'. Can be null.
|
||||
type: dict
|
||||
|
||||
ethernet_network_bulk:
|
||||
description: Has the facts about the Ethernet Networks affected by the bulk insert.
|
||||
returned: When 'vlanIdRange' attribute is in data argument. Can be null.
|
||||
type: dict
|
||||
|
||||
ethernet_network_connection_template:
|
||||
description: Has the facts about the Ethernet Network Connection Template.
|
||||
returned: On state 'default_bandwidth_reset'. Can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
|
||||
|
||||
|
||||
class EthernetNetworkModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'Ethernet Network created successfully.'
|
||||
MSG_UPDATED = 'Ethernet Network updated successfully.'
|
||||
MSG_DELETED = 'Ethernet Network deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
|
||||
MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
|
||||
|
||||
MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
|
||||
MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
|
||||
MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
|
||||
MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
|
||||
MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
|
||||
|
||||
RESOURCE_FACT_NAME = 'ethernet_network'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
argument_spec = dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
|
||||
data=dict(type='dict', required=True),
|
||||
)
|
||||
|
||||
super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
|
||||
|
||||
self.resource_client = self.oneview_client.ethernet_networks
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
changed, msg, ansible_facts, resource = False, '', {}, None
|
||||
|
||||
if self.data.get('name'):
|
||||
resource = self.get_by_name(self.data['name'])
|
||||
|
||||
if self.state == 'present':
|
||||
if self.data.get('vlanIdRange'):
|
||||
return self._bulk_present()
|
||||
else:
|
||||
return self._present(resource)
|
||||
elif self.state == 'absent':
|
||||
return self.resource_absent(resource)
|
||||
elif self.state == 'default_bandwidth_reset':
|
||||
changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
|
||||
return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
|
||||
|
||||
def _present(self, resource):
|
||||
|
||||
bandwidth = self.data.pop('bandwidth', None)
|
||||
scope_uris = self.data.pop('scopeUris', None)
|
||||
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
|
||||
|
||||
if bandwidth:
|
||||
if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
|
||||
result['changed'] = True
|
||||
result['msg'] = self.MSG_UPDATED
|
||||
|
||||
if scope_uris is not None:
|
||||
result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
|
||||
|
||||
return result
|
||||
|
||||
def _bulk_present(self):
|
||||
vlan_id_range = self.data['vlanIdRange']
|
||||
result = dict(ansible_facts={})
|
||||
ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
|
||||
|
||||
if not ethernet_networks:
|
||||
self.resource_client.create_bulk(self.data)
|
||||
result['changed'] = True
|
||||
result['msg'] = self.MSG_BULK_CREATED
|
||||
|
||||
else:
|
||||
vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
|
||||
for net in ethernet_networks[:]:
|
||||
vlan_ids.remove(net['vlanId'])
|
||||
|
||||
if len(vlan_ids) == 0:
|
||||
result['msg'] = self.MSG_BULK_ALREADY_EXIST
|
||||
result['changed'] = False
|
||||
else:
|
||||
if len(vlan_ids) == 1:
|
||||
self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
|
||||
else:
|
||||
self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
|
||||
|
||||
self.resource_client.create_bulk(self.data)
|
||||
result['changed'] = True
|
||||
result['msg'] = self.MSG_MISSING_BULK_CREATED
|
||||
result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
|
||||
|
||||
return result
|
||||
|
||||
def _update_connection_template(self, ethernet_network, bandwidth):
|
||||
|
||||
if 'connectionTemplateUri' not in ethernet_network:
|
||||
return False, None
|
||||
|
||||
connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
|
||||
|
||||
merged_data = connection_template.copy()
|
||||
merged_data.update({'bandwidth': bandwidth})
|
||||
|
||||
if not self.compare(connection_template, merged_data):
|
||||
connection_template = self.oneview_client.connection_templates.update(merged_data)
|
||||
return True, connection_template
|
||||
else:
|
||||
return False, None
|
||||
|
||||
def _default_bandwidth_reset(self, resource):
|
||||
|
||||
if not resource:
|
||||
raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
|
||||
|
||||
default_connection_template = self.oneview_client.connection_templates.get_default()
|
||||
|
||||
changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
|
||||
|
||||
return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
|
||||
ethernet_network_connection_template=connection_template)
|
||||
|
||||
|
||||
def main():
|
||||
EthernetNetworkModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_ethernet_network_info.py
|
||||
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_ethernet_network_info
|
||||
short_description: Retrieve the information about one or more of the OneView Ethernet Networks
|
||||
description:
|
||||
- Retrieve the information about one or more of the Ethernet Networks from OneView.
|
||||
- This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Ethernet Network name.
|
||||
options:
|
||||
description:
|
||||
- "List with options to gather additional information about an Ethernet Network and related resources.
|
||||
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Ethernet Networks
|
||||
oneview_ethernet_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather paginated and filtered information about Ethernet Networks
|
||||
oneview_ethernet_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
params:
|
||||
start: 1
|
||||
count: 3
|
||||
sort: 'name:descending'
|
||||
filter: 'purpose=General'
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather information about an Ethernet Network by name
|
||||
oneview_ethernet_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
name: Ethernet network name
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.ethernet_networks }}"
|
||||
|
||||
- name: Gather information about an Ethernet Network by name with options
|
||||
oneview_ethernet_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
name: eth1
|
||||
options:
|
||||
- associatedProfiles
|
||||
- associatedUplinkGroups
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.enet_associated_profiles }}"
|
||||
- debug:
|
||||
msg: "{{ result.enet_associated_uplink_groups }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ethernet_networks:
|
||||
description: Has all the OneView information about the Ethernet Networks.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
|
||||
enet_associated_profiles:
|
||||
description: Has all the OneView information about the profiles which are using the Ethernet network.
|
||||
returned: When requested, but can be null.
|
||||
type: dict
|
||||
|
||||
enet_associated_uplink_groups:
|
||||
description: Has all the OneView information about the uplink sets which are using the Ethernet network.
|
||||
returned: When requested, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class EthernetNetworkInfoModule(OneViewModuleBase):
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
options=dict(type='list'),
|
||||
params=dict(type='dict')
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_ethernet_network_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
self.resource_client = self.oneview_client.ethernet_networks
|
||||
|
||||
def execute_module(self):
|
||||
info = {}
|
||||
if self.module.params['name']:
|
||||
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
|
||||
|
||||
if self.module.params.get('options') and ethernet_networks:
|
||||
info = self.__gather_optional_info(ethernet_networks[0])
|
||||
else:
|
||||
ethernet_networks = self.resource_client.get_all(**self.facts_params)
|
||||
|
||||
info['ethernet_networks'] = ethernet_networks
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False, ansible_facts=info)
|
||||
else:
|
||||
return dict(changed=False, **info)
|
||||
|
||||
def __gather_optional_info(self, ethernet_network):
|
||||
|
||||
info = {}
|
||||
|
||||
if self.options.get('associatedProfiles'):
|
||||
info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
|
||||
if self.options.get('associatedUplinkGroups'):
|
||||
info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
|
||||
|
||||
return info
|
||||
|
||||
def __get_associated_profiles(self, ethernet_network):
|
||||
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
|
||||
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
|
||||
|
||||
def __get_associated_uplink_groups(self, ethernet_network):
|
||||
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
|
||||
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
|
||||
|
||||
|
||||
def main():
|
||||
EthernetNetworkInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
124
plugins/modules/remote_management/oneview/oneview_fc_network.py
Normal file
124
plugins/modules/remote_management/oneview/oneview_fc_network.py
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_fc_network
|
||||
short_description: Manage OneView Fibre Channel Network resources.
|
||||
description:
|
||||
- Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
|
||||
requirements:
|
||||
- "hpOneView >= 4.0.0"
|
||||
author: "Felipe Bulsoni (@fgbulsoni)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the Fibre Channel Network resource.
|
||||
C(present) will ensure data properties are compliant with OneView.
|
||||
C(absent) will remove the resource from OneView, if it exists.
|
||||
choices: ['present', 'absent']
|
||||
data:
|
||||
description:
|
||||
- List with the Fibre Channel Network properties.
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure that the Fibre Channel Network is present using the default configuration
|
||||
oneview_fc_network:
|
||||
config: "{{ config_file_path }}"
|
||||
state: present
|
||||
data:
|
||||
name: 'New FC Network'
|
||||
|
||||
- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
|
||||
oneview_fc_network:
|
||||
config: "{{ config_file_path }}"
|
||||
state: present
|
||||
data:
|
||||
name: 'New FC Network'
|
||||
fabricType: 'DirectAttach'
|
||||
|
||||
- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
|
||||
oneview_fc_network:
|
||||
config: "{{ config_file_path }}"
|
||||
state: present
|
||||
data:
|
||||
name: 'New FC Network'
|
||||
scopeUris:
|
||||
- '/rest/scopes/00SC123456'
|
||||
- '/rest/scopes/01SC123456'
|
||||
|
||||
- name: Ensure that the Fibre Channel Network is absent
|
||||
oneview_fc_network:
|
||||
config: "{{ config_file_path }}"
|
||||
state: absent
|
||||
data:
|
||||
name: 'New FC Network'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
fc_network:
|
||||
description: Has the facts about the managed OneView FC Network.
|
||||
returned: On state 'present'. Can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class FcNetworkModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'FC Network created successfully.'
|
||||
MSG_UPDATED = 'FC Network updated successfully.'
|
||||
MSG_DELETED = 'FC Network deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'FC Network is already present.'
|
||||
MSG_ALREADY_ABSENT = 'FC Network is already absent.'
|
||||
RESOURCE_FACT_NAME = 'fc_network'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
additional_arg_spec = dict(data=dict(required=True, type='dict'),
|
||||
state=dict(
|
||||
required=True,
|
||||
choices=['present', 'absent']))
|
||||
|
||||
super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
|
||||
validate_etag_support=True)
|
||||
|
||||
self.resource_client = self.oneview_client.fc_networks
|
||||
|
||||
def execute_module(self):
|
||||
resource = self.get_by_name(self.data['name'])
|
||||
|
||||
if self.state == 'present':
|
||||
return self._present(resource)
|
||||
else:
|
||||
return self.resource_absent(resource)
|
||||
|
||||
def _present(self, resource):
|
||||
scope_uris = self.data.pop('scopeUris', None)
|
||||
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
|
||||
if scope_uris is not None:
|
||||
result = self.resource_scopes_set(result, 'fc_network', scope_uris)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
FcNetworkModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_fc_network_info.py
|
||||
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_fc_network_info
|
||||
short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
|
||||
description:
|
||||
- Retrieve the information about one or more of the Fibre Channel Networks from OneView.
|
||||
- This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_fc_network_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Fibre Channel Network name.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Fibre Channel Networks
|
||||
oneview_fc_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
|
||||
oneview_fc_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
params:
|
||||
start: 1
|
||||
count: 3
|
||||
sort: 'name:descending'
|
||||
filter: 'fabricType=FabricAttach'
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
|
||||
- name: Gather information about a Fibre Channel Network by name
|
||||
oneview_fc_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
name: network name
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.fc_networks }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
fc_networks:
|
||||
description: Has all the OneView information about the Fibre Channel Networks.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class FcNetworkInfoModule(OneViewModuleBase):
|
||||
def __init__(self):
|
||||
|
||||
argument_spec = dict(
|
||||
name=dict(required=False, type='str'),
|
||||
params=dict(required=False, type='dict')
|
||||
)
|
||||
|
||||
super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_fc_network_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
if self.module.params['name']:
|
||||
fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
|
||||
else:
|
||||
fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
|
||||
else:
|
||||
return dict(changed=False, fc_networks=fc_networks)
|
||||
|
||||
|
||||
def main():
|
||||
FcNetworkInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,121 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_fcoe_network
|
||||
short_description: Manage OneView FCoE Network resources
|
||||
description:
|
||||
- Provides an interface to manage FCoE Network resources. Can create, update, or delete.
|
||||
requirements:
|
||||
- "python >= 2.7.9"
|
||||
- "hpOneView >= 4.0.0"
|
||||
author: "Felipe Bulsoni (@fgbulsoni)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the FCoE Network resource.
|
||||
C(present) will ensure data properties are compliant with OneView.
|
||||
C(absent) will remove the resource from OneView, if it exists.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
data:
|
||||
description:
|
||||
- List with FCoE Network properties.
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure that FCoE Network is present using the default configuration
|
||||
oneview_fcoe_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
name: Test FCoE Network
|
||||
vlanId: 201
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Update the FCOE network scopes
|
||||
oneview_fcoe_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: present
|
||||
data:
|
||||
name: New FCoE Network
|
||||
scopeUris:
|
||||
- '/rest/scopes/00SC123456'
|
||||
- '/rest/scopes/01SC123456'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that FCoE Network is absent
|
||||
oneview_fcoe_network:
|
||||
config: '/etc/oneview/oneview_config.json'
|
||||
state: absent
|
||||
data:
|
||||
name: New FCoE Network
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
fcoe_network:
|
||||
description: Has the facts about the OneView FCoE Networks.
|
||||
returned: On state 'present'. Can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class FcoeNetworkModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'FCoE Network created successfully.'
|
||||
MSG_UPDATED = 'FCoE Network updated successfully.'
|
||||
MSG_DELETED = 'FCoE Network deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
|
||||
MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
|
||||
RESOURCE_FACT_NAME = 'fcoe_network'
|
||||
|
||||
def __init__(self):
|
||||
|
||||
additional_arg_spec = dict(data=dict(required=True, type='dict'),
|
||||
state=dict(default='present',
|
||||
choices=['present', 'absent']))
|
||||
|
||||
super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
|
||||
validate_etag_support=True)
|
||||
|
||||
self.resource_client = self.oneview_client.fcoe_networks
|
||||
|
||||
def execute_module(self):
|
||||
resource = self.get_by_name(self.data.get('name'))
|
||||
|
||||
if self.state == 'present':
|
||||
return self.__present(resource)
|
||||
elif self.state == 'absent':
|
||||
return self.resource_absent(resource)
|
||||
|
||||
def __present(self, resource):
|
||||
scope_uris = self.data.pop('scopeUris', None)
|
||||
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
|
||||
if scope_uris is not None:
|
||||
result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
FcoeNetworkModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_fcoe_network_info.py
|
||||
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_fcoe_network_info
|
||||
short_description: Retrieve the information about one or more of the OneView FCoE Networks
|
||||
description:
|
||||
- Retrieve the information about one or more of the FCoE Networks from OneView.
|
||||
- This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- FCoE Network name.
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all FCoE Networks
|
||||
oneview_fcoe_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about FCoE Networks
|
||||
oneview_fcoe_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: 'name:descending'
|
||||
filter: 'vlanId=2'
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
|
||||
- name: Gather information about a FCoE Network by name
|
||||
oneview_fcoe_network_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
name: Test FCoE Network Information
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.fcoe_networks }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
fcoe_networks:
|
||||
description: Has all the OneView information about the FCoE Networks.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class FcoeNetworkInfoModule(OneViewModuleBase):
|
||||
def __init__(self):
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
params=dict(type='dict'),
|
||||
)
|
||||
|
||||
super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_fcoe_network_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
if self.module.params['name']:
|
||||
fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
|
||||
else:
|
||||
fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False,
|
||||
ansible_facts=dict(fcoe_networks=fcoe_networks))
|
||||
else:
|
||||
return dict(changed=False, fcoe_networks=fcoe_networks)
|
||||
|
||||
|
||||
def main():
|
||||
FcoeNetworkInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_logical_interconnect_group
|
||||
short_description: Manage OneView Logical Interconnect Group resources
|
||||
description:
|
||||
- Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
|
||||
requirements:
|
||||
- hpOneView >= 4.0.0
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the Logical Interconnect Group resource.
|
||||
C(absent) will remove the resource from OneView, if it exists.
|
||||
C(present) will ensure data properties are compliant with OneView.
|
||||
choices: [absent, present]
|
||||
default: present
|
||||
data:
|
||||
description:
|
||||
- List with the Logical Interconnect Group properties.
|
||||
required: true
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure that the Logical Interconnect Group is present
|
||||
oneview_logical_interconnect_group:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: Test Logical Interconnect Group
|
||||
uplinkSets: []
|
||||
enclosureType: C7000
|
||||
interconnectMapTemplate:
|
||||
interconnectMapEntryTemplates:
|
||||
- logicalDownlinkUri: ~
|
||||
logicalLocation:
|
||||
locationEntries:
|
||||
- relativeValue: 1
|
||||
type: Bay
|
||||
- relativeValue: 1
|
||||
type: Enclosure
|
||||
permittedInterconnectTypeName: HP VC Flex-10/10D Module
|
||||
# Alternatively you can inform permittedInterconnectTypeUri
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that the Logical Interconnect Group has the specified scopes
|
||||
oneview_logical_interconnect_group:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: Test Logical Interconnect Group
|
||||
scopeUris:
|
||||
- /rest/scopes/00SC123456
|
||||
- /rest/scopes/01SC123456
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that the Logical Interconnect Group is present with name 'Test'
|
||||
oneview_logical_interconnect_group:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: New Logical Interconnect Group
|
||||
newName: Test
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure that the Logical Interconnect Group is absent
|
||||
oneview_logical_interconnect_group:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: absent
|
||||
data:
|
||||
name: New Logical Interconnect Group
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
logical_interconnect_group:
|
||||
description: Has the facts about the OneView Logical Interconnect Group.
|
||||
returned: On state 'present'. Can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
|
||||
|
||||
|
||||
class LogicalInterconnectGroupModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'Logical Interconnect Group created successfully.'
|
||||
MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
|
||||
MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
|
||||
MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
|
||||
MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
|
||||
|
||||
RESOURCE_FACT_NAME = 'logical_interconnect_group'
|
||||
|
||||
def __init__(self):
|
||||
argument_spec = dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
data=dict(required=True, type='dict')
|
||||
)
|
||||
|
||||
super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
|
||||
validate_etag_support=True)
|
||||
self.resource_client = self.oneview_client.logical_interconnect_groups
|
||||
|
||||
def execute_module(self):
|
||||
resource = self.get_by_name(self.data['name'])
|
||||
|
||||
if self.state == 'present':
|
||||
return self.__present(resource)
|
||||
elif self.state == 'absent':
|
||||
return self.resource_absent(resource)
|
||||
|
||||
def __present(self, resource):
|
||||
scope_uris = self.data.pop('scopeUris', None)
|
||||
|
||||
self.__replace_name_by_uris(self.data)
|
||||
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
|
||||
|
||||
if scope_uris is not None:
|
||||
result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
|
||||
|
||||
return result
|
||||
|
||||
def __replace_name_by_uris(self, data):
|
||||
map_template = data.get('interconnectMapTemplate')
|
||||
|
||||
if map_template:
|
||||
map_entry_templates = map_template.get('interconnectMapEntryTemplates')
|
||||
if map_entry_templates:
|
||||
for value in map_entry_templates:
|
||||
permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
|
||||
if permitted_interconnect_type_name:
|
||||
value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
|
||||
permitted_interconnect_type_name).get('uri')
|
||||
|
||||
def __get_interconnect_type_by_name(self, name):
|
||||
i_type = self.oneview_client.interconnect_types.get_by('name', name)
|
||||
if i_type:
|
||||
return i_type[0]
|
||||
else:
|
||||
raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
|
||||
|
||||
|
||||
def main():
|
||||
LogicalInterconnectGroupModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_logical_interconnect_group_info.py
|
||||
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_logical_interconnect_group_info
|
||||
short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
|
||||
description:
|
||||
- Retrieve information about one or more of the Logical Interconnect Groups from OneView
|
||||
- This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Logical Interconnect Group name.
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Logical Interconnect Groups
|
||||
oneview_logical_interconnect_group_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
|
||||
oneview_logical_interconnect_group_info:
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: name:descending
|
||||
filter: name=LIGName
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
|
||||
- name: Gather information about a Logical Interconnect Group by name
|
||||
oneview_logical_interconnect_group_info:
|
||||
name: logical interconnect group name
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.logical_interconnect_groups }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
logical_interconnect_groups:
|
||||
description: Has all the OneView information about the Logical Interconnect Groups.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
|
||||
def __init__(self):
|
||||
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
params=dict(type='dict'),
|
||||
)
|
||||
|
||||
super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_logical_interconnect_group_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
if self.module.params.get('name'):
|
||||
ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
|
||||
else:
|
||||
ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
|
||||
else:
|
||||
return dict(changed=False, logical_interconnect_groups=ligs)
|
||||
|
||||
|
||||
def main():
|
||||
LogicalInterconnectGroupInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
154
plugins/modules/remote_management/oneview/oneview_network_set.py
Normal file
154
plugins/modules/remote_management/oneview/oneview_network_set.py
Normal file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_network_set
|
||||
short_description: Manage HPE OneView Network Set resources
|
||||
description:
|
||||
- Provides an interface to manage Network Set resources. Can create, update, or delete.
|
||||
requirements:
|
||||
- hpOneView >= 4.0.0
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the Network Set resource.
|
||||
- C(present) will ensure data properties are compliant with OneView.
|
||||
- C(absent) will remove the resource from OneView, if it exists.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
data:
|
||||
description:
|
||||
- List with the Network Set properties.
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a Network Set
|
||||
oneview_network_set:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: OneViewSDK Test Network Set
|
||||
networkUris:
|
||||
- Test Ethernet Network_1 # can be a name
|
||||
- /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
|
||||
oneview_network_set:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: OneViewSDK Test Network Set
|
||||
newName: OneViewSDK Test Network Set - Renamed
|
||||
networkUris:
|
||||
- Test Ethernet Network_1
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete the Network Set
|
||||
oneview_network_set:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: absent
|
||||
data:
|
||||
name: OneViewSDK Test Network Set - Renamed
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Update the Network set with two scopes
|
||||
oneview_network_set:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: OneViewSDK Test Network Set
|
||||
scopeUris:
|
||||
- /rest/scopes/01SC123456
|
||||
- /rest/scopes/02SC123456
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
network_set:
|
||||
description: Has the facts about the Network Set.
|
||||
returned: On state 'present', but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
|
||||
|
||||
|
||||
class NetworkSetModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'Network Set created successfully.'
|
||||
MSG_UPDATED = 'Network Set updated successfully.'
|
||||
MSG_DELETED = 'Network Set deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'Network Set is already present.'
|
||||
MSG_ALREADY_ABSENT = 'Network Set is already absent.'
|
||||
MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
|
||||
RESOURCE_FACT_NAME = 'network_set'
|
||||
|
||||
argument_spec = dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
data=dict(required=True, type='dict'))
|
||||
|
||||
def __init__(self):
|
||||
super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
|
||||
validate_etag_support=True)
|
||||
self.resource_client = self.oneview_client.network_sets
|
||||
|
||||
def execute_module(self):
|
||||
resource = self.get_by_name(self.data.get('name'))
|
||||
|
||||
if self.state == 'present':
|
||||
return self._present(resource)
|
||||
elif self.state == 'absent':
|
||||
return self.resource_absent(resource)
|
||||
|
||||
def _present(self, resource):
|
||||
scope_uris = self.data.pop('scopeUris', None)
|
||||
self._replace_network_name_by_uri(self.data)
|
||||
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
|
||||
if scope_uris is not None:
|
||||
result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
|
||||
return result
|
||||
|
||||
def _get_ethernet_network_by_name(self, name):
|
||||
result = self.oneview_client.ethernet_networks.get_by('name', name)
|
||||
return result[0] if result else None
|
||||
|
||||
def _get_network_uri(self, network_name_or_uri):
|
||||
if network_name_or_uri.startswith('/rest/ethernet-networks'):
|
||||
return network_name_or_uri
|
||||
else:
|
||||
enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
|
||||
if enet_network:
|
||||
return enet_network['uri']
|
||||
else:
|
||||
raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
|
||||
|
||||
def _replace_network_name_by_uri(self, data):
|
||||
if 'networkUris' in data:
|
||||
data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
|
||||
|
||||
|
||||
def main():
|
||||
NetworkSetModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_network_set_info.py
|
||||
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_network_set_info
|
||||
short_description: Retrieve information about the OneView Network Sets
|
||||
description:
|
||||
- Retrieve information about the Network Sets from OneView.
|
||||
- This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_network_set_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Network Set name.
|
||||
|
||||
options:
|
||||
description:
|
||||
- "List with options to gather information about Network Set.
|
||||
Option allowed: C(withoutEthernet).
|
||||
The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.factsparams
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all Network Sets
|
||||
oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather paginated, filtered, and sorted information about Network Sets
|
||||
oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: 'name:descending'
|
||||
filter: name='netset001'
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about all Network Sets, excluding Ethernet networks
|
||||
oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
options:
|
||||
- withoutEthernet
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about a Network Set by name
|
||||
oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
name: Name of the Network Set
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
|
||||
- name: Gather information about a Network Set by name, excluding Ethernet networks
|
||||
oneview_network_set_info:
|
||||
hostname: 172.16.101.48
|
||||
username: administrator
|
||||
password: my_password
|
||||
api_version: 500
|
||||
name: Name of the Network Set
|
||||
options:
|
||||
- withoutEthernet
|
||||
no_log: true
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.network_sets }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
network_sets:
|
||||
description: Has all the OneView information about the Network Sets.
|
||||
returned: Always, but can be empty.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class NetworkSetInfoModule(OneViewModuleBase):
|
||||
argument_spec = dict(
|
||||
name=dict(type='str'),
|
||||
options=dict(type='list'),
|
||||
params=dict(type='dict'),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
|
||||
self.is_old_facts = self.module._name == 'oneview_network_set_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
|
||||
name = self.module.params.get('name')
|
||||
|
||||
if 'withoutEthernet' in self.options:
|
||||
filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
|
||||
network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
|
||||
elif name:
|
||||
network_sets = self.oneview_client.network_sets.get_by('name', name)
|
||||
else:
|
||||
network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False,
|
||||
ansible_facts=dict(network_sets=network_sets))
|
||||
else:
|
||||
return dict(changed=False, network_sets=network_sets)
|
||||
|
||||
|
||||
def main():
|
||||
NetworkSetInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
219
plugins/modules/remote_management/oneview/oneview_san_manager.py
Normal file
219
plugins/modules/remote_management/oneview/oneview_san_manager.py
Normal file
@@ -0,0 +1,219 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_san_manager
|
||||
short_description: Manage OneView SAN Manager resources
|
||||
description:
|
||||
- Provides an interface to manage SAN Manager resources. Can create, update, or delete.
|
||||
requirements:
|
||||
- hpOneView >= 3.1.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired state for the Uplink Set resource.
|
||||
- C(present) ensures data properties are compliant with OneView.
|
||||
- C(absent) removes the resource from OneView, if it exists.
|
||||
- C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
|
||||
default: present
|
||||
choices: [present, absent, connection_information_set]
|
||||
data:
|
||||
description:
|
||||
- List with SAN Manager properties.
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
- community.general.oneview.validateetag
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
|
||||
oneview_san_manager:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
providerDisplayName: Brocade Network Advisor
|
||||
connectionInfo:
|
||||
- name: Host
|
||||
value: 172.18.15.1
|
||||
- name: Port
|
||||
value: 5989
|
||||
- name: Username
|
||||
value: username
|
||||
- name: Password
|
||||
value: password
|
||||
- name: UseSsl
|
||||
value: true
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure a Device Manager for the Cisco SAN Provider is present
|
||||
oneview_san_manager:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: 172.18.20.1
|
||||
providerDisplayName: Cisco
|
||||
connectionInfo:
|
||||
- name: Host
|
||||
value: 172.18.20.1
|
||||
- name: SnmpPort
|
||||
value: 161
|
||||
- name: SnmpUserName
|
||||
value: admin
|
||||
- name: SnmpAuthLevel
|
||||
value: authnopriv
|
||||
- name: SnmpAuthProtocol
|
||||
value: sha
|
||||
- name: SnmpAuthString
|
||||
value: password
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Sets the SAN Manager connection information
|
||||
oneview_san_manager:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: connection_information_set
|
||||
data:
|
||||
connectionInfo:
|
||||
- name: Host
|
||||
value: '172.18.15.1'
|
||||
- name: Port
|
||||
value: '5989'
|
||||
- name: Username
|
||||
value: 'username'
|
||||
- name: Password
|
||||
value: 'password'
|
||||
- name: UseSsl
|
||||
value: true
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Refreshes the SAN Manager
|
||||
oneview_san_manager:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: present
|
||||
data:
|
||||
name: 172.18.15.1
|
||||
refreshState: RefreshPending
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete the SAN Manager recently created
|
||||
oneview_san_manager:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
state: absent
|
||||
data:
|
||||
name: '172.18.15.1'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
san_manager:
|
||||
description: Has the OneView facts about the SAN Manager.
|
||||
returned: On state 'present'. Can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
|
||||
|
||||
|
||||
class SanManagerModule(OneViewModuleBase):
|
||||
MSG_CREATED = 'SAN Manager created successfully.'
|
||||
MSG_UPDATED = 'SAN Manager updated successfully.'
|
||||
MSG_DELETED = 'SAN Manager deleted successfully.'
|
||||
MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
|
||||
MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
|
||||
MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
|
||||
|
||||
argument_spec = dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
|
||||
data=dict(type='dict', required=True)
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
|
||||
self.resource_client = self.oneview_client.san_managers
|
||||
|
||||
def execute_module(self):
|
||||
if self.data.get('connectionInfo'):
|
||||
for connection_hash in self.data.get('connectionInfo'):
|
||||
if connection_hash.get('name') == 'Host':
|
||||
resource_name = connection_hash.get('value')
|
||||
elif self.data.get('name'):
|
||||
resource_name = self.data.get('name')
|
||||
else:
|
||||
msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
|
||||
msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
|
||||
raise OneViewModuleValueError(msg.format())
|
||||
|
||||
resource = self.resource_client.get_by_name(resource_name)
|
||||
|
||||
if self.state == 'present':
|
||||
changed, msg, san_manager = self._present(resource)
|
||||
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
|
||||
|
||||
elif self.state == 'absent':
|
||||
return self.resource_absent(resource, method='remove')
|
||||
|
||||
elif self.state == 'connection_information_set':
|
||||
changed, msg, san_manager = self._connection_information_set(resource)
|
||||
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
|
||||
|
||||
def _present(self, resource):
|
||||
if not resource:
|
||||
provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
|
||||
return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
|
||||
else:
|
||||
merged_data = resource.copy()
|
||||
merged_data.update(self.data)
|
||||
|
||||
# Remove 'connectionInfo' from comparison, since it is not possible to validate it.
|
||||
resource.pop('connectionInfo', None)
|
||||
merged_data.pop('connectionInfo', None)
|
||||
|
||||
if self.compare(resource, merged_data):
|
||||
return False, self.MSG_ALREADY_PRESENT, resource
|
||||
else:
|
||||
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
|
||||
return True, self.MSG_UPDATED, updated_san_manager
|
||||
|
||||
def _connection_information_set(self, resource):
|
||||
if not resource:
|
||||
return self._present(resource)
|
||||
else:
|
||||
merged_data = resource.copy()
|
||||
merged_data.update(self.data)
|
||||
merged_data.pop('refreshState', None)
|
||||
if not self.data.get('connectionInfo', None):
|
||||
raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
|
||||
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
|
||||
return True, self.MSG_UPDATED, updated_san_manager
|
||||
|
||||
def _get_provider_uri_by_display_name(self, data):
|
||||
display_name = data.get('providerDisplayName')
|
||||
provider_uri = self.resource_client.get_provider_uri(display_name)
|
||||
|
||||
if not provider_uri:
|
||||
raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
|
||||
|
||||
return provider_uri
|
||||
|
||||
|
||||
def main():
|
||||
SanManagerModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1 @@
|
||||
oneview_san_manager_info.py
|
||||
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: oneview_san_manager_info
|
||||
short_description: Retrieve information about one or more of the OneView SAN Managers
|
||||
description:
|
||||
- Retrieve information about one or more of the SAN Managers from OneView
|
||||
- This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(oneview_san_manager_info) module no longer returns C(ansible_facts)!
|
||||
requirements:
|
||||
- hpOneView >= 2.0.1
|
||||
author:
|
||||
- Felipe Bulsoni (@fgbulsoni)
|
||||
- Thiago Miotto (@tmiotto)
|
||||
- Adriane Cardozo (@adriane-cardozo)
|
||||
options:
|
||||
provider_display_name:
|
||||
description:
|
||||
- Provider Display Name.
|
||||
params:
|
||||
description:
|
||||
- List of params to delimit, filter and sort the list of resources.
|
||||
- "params allowed:
|
||||
- C(start): The first item to return, using 0-based indexing.
|
||||
- C(count): The number of resources to return.
|
||||
- C(query): A general query string to narrow the list of resources returned.
|
||||
- C(sort): The sort order of the returned data set."
|
||||
extends_documentation_fragment:
|
||||
- community.general.oneview
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather information about all SAN Managers
|
||||
oneview_san_manager_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
|
||||
- name: Gather paginated, filtered and sorted information about SAN Managers
|
||||
oneview_san_manager_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
params:
|
||||
start: 0
|
||||
count: 3
|
||||
sort: name:ascending
|
||||
query: isInternal eq false
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
|
||||
- name: Gather information about a SAN Manager by provider display name
|
||||
oneview_san_manager_info:
|
||||
config: /etc/oneview/oneview_config.json
|
||||
provider_display_name: Brocade Network Advisor
|
||||
delegate_to: localhost
|
||||
register: result
|
||||
|
||||
- debug:
|
||||
msg: "{{ result.san_managers }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
san_managers:
|
||||
description: Has all the OneView information about the SAN Managers.
|
||||
returned: Always, but can be null.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
|
||||
|
||||
|
||||
class SanManagerInfoModule(OneViewModuleBase):
|
||||
argument_spec = dict(
|
||||
provider_display_name=dict(type='str'),
|
||||
params=dict(type='dict')
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
|
||||
self.resource_client = self.oneview_client.san_managers
|
||||
self.is_old_facts = self.module._name == 'oneview_san_manager_facts'
|
||||
if self.is_old_facts:
|
||||
self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
def execute_module(self):
|
||||
if self.module.params.get('provider_display_name'):
|
||||
provider_display_name = self.module.params['provider_display_name']
|
||||
san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
|
||||
if san_manager:
|
||||
resources = [san_manager]
|
||||
else:
|
||||
resources = []
|
||||
else:
|
||||
resources = self.oneview_client.san_managers.get_all(**self.facts_params)
|
||||
|
||||
if self.is_old_facts:
|
||||
return dict(changed=False, ansible_facts=dict(san_managers=resources))
|
||||
else:
|
||||
return dict(changed=False, san_managers=resources)
|
||||
|
||||
|
||||
def main():
|
||||
SanManagerInfoModule().run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: idrac_redfish_command
|
||||
short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
perform an action.
|
||||
- For use with Dell iDRAC operations that require Redfish OEM extensions
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- User for authentication with OOB controller
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with OOB controller
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller
|
||||
default: 10
|
||||
type: int
|
||||
resource_id:
|
||||
required: false
|
||||
description:
|
||||
- The ID of the System, Manager or Chassis to modify
|
||||
type: str
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create BIOS configuration job (schedule BIOS setting update)
|
||||
idrac_redfish_command:
|
||||
category: Systems
|
||||
command: CreateBiosConfigJob
|
||||
resource_id: System.Embedded.1
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
'''
|
||||
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class IdracRedfishUtils(RedfishUtils):
|
||||
|
||||
def create_bios_config_job(self):
|
||||
result = {}
|
||||
key = "Bios"
|
||||
jobs = "Jobs"
|
||||
|
||||
# Search for 'key' entry and extract URI from it
|
||||
response = self.get_request(self.root_uri + self.systems_uris[0])
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if key not in data:
|
||||
return {'ret': False, 'msg': "Key %s not found" % key}
|
||||
|
||||
bios_uri = data[key]["@odata.id"]
|
||||
|
||||
# Extract proper URI
|
||||
response = self.get_request(self.root_uri + bios_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
|
||||
"@odata.id"]
|
||||
|
||||
payload = {"TargetSettingsURI": set_bios_attr_uri}
|
||||
response = self.post_request(
|
||||
self.root_uri + self.manager_uri + "/" + jobs, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
response_output = response['resp'].__dict__
|
||||
job_id = response_output["headers"]["Location"]
|
||||
job_id = re.search("JID_.+", job_id).group()
|
||||
# Currently not passing job_id back to user but patch is coming
|
||||
return {'ret': True, 'msg': "Config job %s created" % job_id}
|
||||
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["CreateBiosConfigJob"],
|
||||
"Accounts": [],
|
||||
"Manager": []
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10),
|
||||
resource_id=dict()
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# System, Manager or Chassis ID to modify
|
||||
resource_id = module.params['resource_id']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
result = rf_utils._find_systems_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "CreateBiosConfigJob":
|
||||
# execute only if we find a Managers resource
|
||||
result = rf_utils._find_managers_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
result = rf_utils.create_bios_config_job()
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
del result['ret']
|
||||
module.exit_json(changed=True, msg='Action was successful')
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,329 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: idrac_redfish_config
|
||||
short_description: Manages servers through iDRAC using Dell Redfish APIs
|
||||
description:
|
||||
- For use with Dell iDRAC operations that require Redfish OEM extensions
|
||||
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to
|
||||
set or update a configuration attribute.
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
type: str
|
||||
description:
|
||||
- Category to execute on iDRAC
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on iDRAC
|
||||
- I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
|
||||
I(SetSystemAttributes) are mutually exclusive commands when C(category)
|
||||
is I(Manager)
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of iDRAC
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- User for authentication with iDRAC
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with iDRAC
|
||||
type: str
|
||||
manager_attribute_name:
|
||||
required: false
|
||||
description:
|
||||
- (deprecated) name of iDRAC attribute to update
|
||||
type: str
|
||||
manager_attribute_value:
|
||||
required: false
|
||||
description:
|
||||
- (deprecated) value of iDRAC attribute to update
|
||||
type: str
|
||||
manager_attributes:
|
||||
required: false
|
||||
description:
|
||||
- dictionary of iDRAC attribute name and value pairs to update
|
||||
default: {}
|
||||
type: 'dict'
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to iDRAC controller
|
||||
default: 10
|
||||
type: int
|
||||
resource_id:
|
||||
required: false
|
||||
description:
|
||||
- The ID of the System, Manager or Chassis to modify
|
||||
type: str
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enable NTP and set NTP server and Time zone attributes in iDRAC
|
||||
idrac_redfish_config:
|
||||
category: Manager
|
||||
command: SetManagerAttributes
|
||||
resource_id: iDRAC.Embedded.1
|
||||
manager_attributes:
|
||||
NTPConfigGroup.1.NTPEnable: "Enabled"
|
||||
NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
|
||||
Time.1.Timezone: "{{ timezone }}"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username}}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Enable Syslog and set Syslog servers in iDRAC
|
||||
idrac_redfish_config:
|
||||
category: Manager
|
||||
command: SetManagerAttributes
|
||||
resource_id: iDRAC.Embedded.1
|
||||
manager_attributes:
|
||||
SysLog.1.SysLogEnable: "Enabled"
|
||||
SysLog.1.Server1: "{{ syslog_server1 }}"
|
||||
SysLog.1.Server2: "{{ syslog_server2 }}"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username}}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Configure SNMP community string, port, protocol and trap format
|
||||
idrac_redfish_config:
|
||||
category: Manager
|
||||
command: SetManagerAttributes
|
||||
resource_id: iDRAC.Embedded.1
|
||||
manager_attributes:
|
||||
SNMP.1.AgentEnable: "Enabled"
|
||||
SNMP.1.AgentCommunity: "public_community_string"
|
||||
SNMP.1.TrapFormat: "SNMPv1"
|
||||
SNMP.1.SNMPProtocol: "All"
|
||||
SNMP.1.DiscoveryPort: 161
|
||||
SNMP.1.AlertPort: 162
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username}}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Enable CSIOR
|
||||
idrac_redfish_config:
|
||||
category: Manager
|
||||
command: SetLifecycleControllerAttributes
|
||||
resource_id: iDRAC.Embedded.1
|
||||
manager_attributes:
|
||||
LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username}}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set Power Supply Redundancy Policy to A/B Grid Redundant
|
||||
idrac_redfish_config:
|
||||
category: Manager
|
||||
command: SetSystemAttributes
|
||||
resource_id: iDRAC.Embedded.1
|
||||
manager_attributes:
|
||||
ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username}}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.validation import (
|
||||
check_mutually_exclusive,
|
||||
check_required_arguments
|
||||
)
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class IdracRedfishUtils(RedfishUtils):
|
||||
|
||||
def set_manager_attributes(self, command):
|
||||
|
||||
result = {}
|
||||
required_arg_spec = {'manager_attributes': {'required': True}}
|
||||
|
||||
try:
|
||||
check_required_arguments(required_arg_spec, self.module.params)
|
||||
|
||||
except TypeError as e:
|
||||
msg = to_native(e)
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
key = "Attributes"
|
||||
command_manager_attributes_uri_map = {
|
||||
"SetManagerAttributes": self.manager_uri,
|
||||
"SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
|
||||
"SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
|
||||
}
|
||||
manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
|
||||
|
||||
attributes = self.module.params['manager_attributes']
|
||||
manager_attr_name = self.module.params.get('manager_attribute_name')
|
||||
manager_attr_value = self.module.params.get('manager_attribute_value')
|
||||
|
||||
# manager attributes to update
|
||||
if manager_attr_name:
|
||||
attributes.update({manager_attr_name: manager_attr_value})
|
||||
|
||||
attrs_to_patch = {}
|
||||
attrs_skipped = {}
|
||||
|
||||
# Search for key entry and extract URI from it
|
||||
response = self.get_request(self.root_uri + manager_uri + "/" + key)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
result['ret'] = True
|
||||
data = response['data']
|
||||
|
||||
if key not in data:
|
||||
return {'ret': False,
|
||||
'msg': "%s: Key %s not found" % (command, key)}
|
||||
|
||||
for attr_name, attr_value in attributes.items():
|
||||
# Check if attribute exists
|
||||
if attr_name not in data[u'Attributes']:
|
||||
return {'ret': False,
|
||||
'msg': "%s: Manager attribute %s not found" % (command, attr_name)}
|
||||
|
||||
# Find out if value is already set to what we want. If yes, exclude
|
||||
# those attributes
|
||||
if data[u'Attributes'][attr_name] == attr_value:
|
||||
attrs_skipped.update({attr_name: attr_value})
|
||||
else:
|
||||
attrs_to_patch.update({attr_name: attr_value})
|
||||
|
||||
if not attrs_to_patch:
|
||||
return {'ret': True, 'changed': False,
|
||||
'msg': "Manager attributes already set"}
|
||||
|
||||
payload = {"Attributes": attrs_to_patch}
|
||||
response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)}
|
||||
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
|
||||
"SetSystemAttributes"]
|
||||
}
|
||||
|
||||
# list of mutually exclusive commands for a category
|
||||
CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
|
||||
"Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
|
||||
"SetSystemAttributes"]]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
manager_attribute_name=dict(default=None),
|
||||
manager_attribute_value=dict(default=None),
|
||||
manager_attributes=dict(type='dict', default={}),
|
||||
timeout=dict(type='int', default=10),
|
||||
resource_id=dict()
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# System, Manager or Chassis ID to modify
|
||||
resource_id = module.params['resource_id']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# check for mutually exclusive commands
|
||||
try:
|
||||
# check_mutually_exclusive accepts a single list or list of lists that
|
||||
# are groups of terms that should be mutually exclusive with one another
|
||||
# and checks that against a dictionary
|
||||
check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
|
||||
dict.fromkeys(command_list, True))
|
||||
|
||||
except TypeError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
|
||||
if category == "Manager":
|
||||
# execute only if we find a Manager resource
|
||||
result = rf_utils._find_managers_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
|
||||
result = rf_utils.set_manager_attributes(command)
|
||||
|
||||
if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])):
|
||||
module.deprecate(msg='Arguments `manager_attribute_name` and '
|
||||
'`manager_attribute_value` are deprecated. '
|
||||
'Use `manager_attributes` instead for passing in '
|
||||
'the manager attribute name and value pairs',
|
||||
version='2.13')
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/remote_management/redfish/idrac_redfish_facts.py
Symbolic link
1
plugins/modules/remote_management/redfish/idrac_redfish_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
idrac_redfish_info.py
|
||||
239
plugins/modules/remote_management/redfish/idrac_redfish_info.py
Normal file
239
plugins/modules/remote_management/redfish/idrac_redfish_info.py
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: idrac_redfish_info
|
||||
short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to
|
||||
get information back.
|
||||
- For use with Dell EMC iDRAC operations that require Redfish OEM extensions
|
||||
- This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(idrac_redfish_info) module no longer returns C(ansible_facts)!
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on iDRAC controller
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on iDRAC controller
|
||||
- C(GetManagerAttributes) returns the list of dicts containing iDRAC,
|
||||
LifecycleController and System attributes
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of iDRAC controller
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- User for authentication with iDRAC controller
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with iDRAC controller
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller
|
||||
default: 10
|
||||
type: int
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Manager attributes with a default of 20 seconds
|
||||
idrac_redfish_info:
|
||||
category: Manager
|
||||
command: GetManagerAttributes
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
timeout: 20
|
||||
register: result
|
||||
|
||||
# Examples to display the value of all or a single iDRAC attribute
|
||||
- name: Store iDRAC attributes as a fact variable
|
||||
set_fact:
|
||||
idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
|
||||
|
||||
- name: Display all iDRAC attributes
|
||||
debug:
|
||||
var: idrac_attributes
|
||||
|
||||
- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
|
||||
debug:
|
||||
var: idrac_attributes['Syslog.1.SysLogEnable']
|
||||
|
||||
# Examples to display the value of all or a single LifecycleController attribute
|
||||
- name: Store LifecycleController attributes as a fact variable
|
||||
set_fact:
|
||||
lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
|
||||
|
||||
- name: Display LifecycleController attributes
|
||||
debug:
|
||||
var: lc_attributes
|
||||
|
||||
- name: Display the value of 'CollectSystemInventoryOnRestart' attribute
|
||||
debug:
|
||||
var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
|
||||
|
||||
# Examples to display the value of all or a single System attribute
|
||||
- name: Store System attributes as a fact variable
|
||||
set_fact:
|
||||
system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
|
||||
|
||||
- name: Display System attributes
|
||||
debug:
|
||||
var: system_attributes
|
||||
|
||||
- name: Display the value of 'PSRedPolicy'
|
||||
debug:
|
||||
var: system_attributes['ServerPwr.1.PSRedPolicy']
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: different results depending on task
|
||||
returned: always
|
||||
type: dict
|
||||
sample: List of Manager attributes
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class IdracRedfishUtils(RedfishUtils):
|
||||
|
||||
def get_manager_attributes(self):
|
||||
result = {}
|
||||
manager_attributes = []
|
||||
properties = ['Attributes', 'Id']
|
||||
|
||||
response = self.get_request(self.root_uri + self.manager_uri)
|
||||
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
# Manager attributes are supported as part of iDRAC OEM extension
|
||||
# Attributes are supported only on iDRAC9
|
||||
try:
|
||||
for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
|
||||
attributes_uri = members[u'@odata.id']
|
||||
|
||||
response = self.get_request(self.root_uri + attributes_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
data = response['data']
|
||||
|
||||
attributes = {}
|
||||
for prop in properties:
|
||||
if prop in data:
|
||||
attributes[prop] = data.get(prop)
|
||||
|
||||
if attributes:
|
||||
manager_attributes.append(attributes)
|
||||
|
||||
result['ret'] = True
|
||||
|
||||
except (AttributeError, KeyError) as e:
|
||||
result['ret'] = False
|
||||
result['msg'] = "Failed to find attribute/key: " + str(e)
|
||||
|
||||
result["entries"] = manager_attributes
|
||||
return result
|
||||
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Manager": ["GetManagerAttributes"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
is_old_facts = module._name == 'idrac_redfish_facts'
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
|
||||
if category == "Manager":
|
||||
# execute only if we find a Manager resource
|
||||
result = rf_utils._find_managers_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetManagerAttributes":
|
||||
result = rf_utils.get_manager_attributes()
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
del result['ret']
|
||||
if is_old_facts:
|
||||
module.exit_json(ansible_facts=dict(redfish_facts=result))
|
||||
else:
|
||||
module.exit_json(redfish_facts=result)
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
552
plugins/modules/remote_management/redfish/redfish_command.py
Normal file
552
plugins/modules/remote_management/redfish/redfish_command.py
Normal file
@@ -0,0 +1,552 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-2018 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redfish_command
|
||||
short_description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
perform an action.
|
||||
- Manages OOB controller ex. reboot, log management.
|
||||
- Manages OOB controller users ex. add, remove, update.
|
||||
- Manages system power ex. on, off, graceful and forced reboot.
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- Username for authentication with OOB controller
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with OOB controller
|
||||
type: str
|
||||
id:
|
||||
required: false
|
||||
aliases: [ account_id ]
|
||||
description:
|
||||
- ID of account to delete/modify
|
||||
type: str
|
||||
new_username:
|
||||
required: false
|
||||
aliases: [ account_username ]
|
||||
description:
|
||||
- Username of account to add/delete/modify
|
||||
type: str
|
||||
new_password:
|
||||
required: false
|
||||
aliases: [ account_password ]
|
||||
description:
|
||||
- New password of account to add/modify
|
||||
type: str
|
||||
roleid:
|
||||
required: false
|
||||
aliases: [ account_roleid ]
|
||||
description:
|
||||
- Role of account to add/modify
|
||||
type: str
|
||||
bootdevice:
|
||||
required: false
|
||||
description:
|
||||
- bootdevice when setting boot configuration
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller
|
||||
default: 10
|
||||
type: int
|
||||
uefi_target:
|
||||
required: false
|
||||
description:
|
||||
- UEFI target when bootdevice is "UefiTarget"
|
||||
type: str
|
||||
boot_next:
|
||||
required: false
|
||||
description:
|
||||
- BootNext target when bootdevice is "UefiBootNext"
|
||||
type: str
|
||||
update_username:
|
||||
required: false
|
||||
aliases: [ account_updatename ]
|
||||
description:
|
||||
- new update user name for account_username
|
||||
type: str
|
||||
account_properties:
|
||||
required: false
|
||||
description:
|
||||
- properties of account service to update
|
||||
type: dict
|
||||
resource_id:
|
||||
required: false
|
||||
description:
|
||||
- The ID of the System, Manager or Chassis to modify
|
||||
type: str
|
||||
update_image_uri:
|
||||
required: false
|
||||
description:
|
||||
- The URI of the image for the update
|
||||
type: str
|
||||
update_protocol:
|
||||
required: false
|
||||
description:
|
||||
- The protocol for the update
|
||||
type: str
|
||||
update_targets:
|
||||
required: false
|
||||
description:
|
||||
- The list of target resource URIs to apply the update to
|
||||
type: list
|
||||
elements: str
|
||||
update_creds:
|
||||
required: false
|
||||
description:
|
||||
- The credentials for retrieving the update image
|
||||
type: dict
|
||||
suboptions:
|
||||
username:
|
||||
required: false
|
||||
description:
|
||||
- The username for retrieving the update image
|
||||
type: str
|
||||
password:
|
||||
required: false
|
||||
description:
|
||||
- The password for retrieving the update image
|
||||
type: str
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Restart system power gracefully
|
||||
redfish_command:
|
||||
category: Systems
|
||||
command: PowerGracefulRestart
|
||||
resource_id: 437XR1138R2
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set one-time boot device to {{ bootdevice }}
|
||||
redfish_command:
|
||||
category: Systems
|
||||
command: SetOneTimeBoot
|
||||
resource_id: 437XR1138R2
|
||||
bootdevice: "{{ bootdevice }}"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
|
||||
redfish_command:
|
||||
category: Systems
|
||||
command: SetOneTimeBoot
|
||||
resource_id: 437XR1138R2
|
||||
bootdevice: "UefiTarget"
|
||||
uefi_target: "/0x31/0x33/0x01/0x01"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set one-time boot device to BootNext target of "Boot0001"
|
||||
redfish_command:
|
||||
category: Systems
|
||||
command: SetOneTimeBoot
|
||||
resource_id: 437XR1138R2
|
||||
bootdevice: "UefiBootNext"
|
||||
boot_next: "Boot0001"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set chassis indicator LED to blink
|
||||
redfish_command:
|
||||
category: Chassis
|
||||
command: IndicatorLedBlink
|
||||
resource_id: 1U
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Add user
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: AddUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
new_username: "{{ new_username }}"
|
||||
new_password: "{{ new_password }}"
|
||||
roleid: "{{ roleid }}"
|
||||
|
||||
- name: Add user using new option aliases
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: AddUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
account_password: "{{ account_password }}"
|
||||
account_roleid: "{{ account_roleid }}"
|
||||
|
||||
- name: Delete user
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: DeleteUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
|
||||
- name: Disable user
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: DisableUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
|
||||
- name: Enable user
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: EnableUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
|
||||
- name: Add and enable user
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: AddUser,EnableUser
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
new_username: "{{ new_username }}"
|
||||
new_password: "{{ new_password }}"
|
||||
roleid: "{{ roleid }}"
|
||||
|
||||
- name: Update user password
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: UpdateUserPassword
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
account_password: "{{ account_password }}"
|
||||
|
||||
- name: Update user role
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: UpdateUserRole
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
roleid: "{{ roleid }}"
|
||||
|
||||
- name: Update user name
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: UpdateUserName
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
account_updatename: "{{ account_updatename }}"
|
||||
|
||||
- name: Update user name
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: UpdateUserName
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_username: "{{ account_username }}"
|
||||
update_username: "{{ update_username }}"
|
||||
|
||||
- name: Update AccountService properties
|
||||
redfish_command:
|
||||
category: Accounts
|
||||
command: UpdateAccountServiceProperties
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
account_properties:
|
||||
AccountLockoutThreshold: 5
|
||||
AccountLockoutDuration: 600
|
||||
|
||||
- name: Clear Manager Logs with a timeout of 20 seconds
|
||||
redfish_command:
|
||||
category: Manager
|
||||
command: ClearLogs
|
||||
resource_id: BMC
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
timeout: 20
|
||||
|
||||
- name: Clear Sessions
|
||||
redfish_command:
|
||||
category: Sessions
|
||||
command: ClearSessions
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Simple update
|
||||
redfish_command:
|
||||
category: Update
|
||||
command: SimpleUpdate
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
update_image_uri: https://example.com/myupdate.img
|
||||
|
||||
- name: Simple update with additional options
|
||||
redfish_command:
|
||||
category: Update
|
||||
command: SimpleUpdate
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
update_image_uri: //example.com/myupdate.img
|
||||
update_protocol: FTP
|
||||
update_targets:
|
||||
- /redfish/v1/UpdateService/FirmwareInventory/BMC
|
||||
update_creds:
|
||||
username: operator
|
||||
password: supersecretpwd
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
|
||||
"PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot"],
|
||||
"Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
|
||||
"Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
|
||||
"UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
|
||||
"UpdateAccountServiceProperties"],
|
||||
"Sessions": ["ClearSessions"],
|
||||
"Manager": ["GracefulRestart", "ClearLogs"],
|
||||
"Update": ["SimpleUpdate"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
id=dict(aliases=["account_id"]),
|
||||
new_username=dict(aliases=["account_username"]),
|
||||
new_password=dict(aliases=["account_password"], no_log=True),
|
||||
roleid=dict(aliases=["account_roleid"]),
|
||||
update_username=dict(type='str', aliases=["account_updatename"]),
|
||||
account_properties=dict(type='dict', default={}),
|
||||
bootdevice=dict(),
|
||||
timeout=dict(type='int', default=10),
|
||||
uefi_target=dict(),
|
||||
boot_next=dict(),
|
||||
resource_id=dict(),
|
||||
update_image_uri=dict(),
|
||||
update_protocol=dict(),
|
||||
update_targets=dict(type='list', elements='str', default=[]),
|
||||
update_creds=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
username=dict(),
|
||||
password=dict()
|
||||
)
|
||||
)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# user to add/modify/delete
|
||||
user = {'account_id': module.params['id'],
|
||||
'account_username': module.params['new_username'],
|
||||
'account_password': module.params['new_password'],
|
||||
'account_roleid': module.params['roleid'],
|
||||
'account_updatename': module.params['update_username'],
|
||||
'account_properties': module.params['account_properties']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# System, Manager or Chassis ID to modify
|
||||
resource_id = module.params['resource_id']
|
||||
|
||||
# update options
|
||||
update_opts = {
|
||||
'update_image_uri': module.params['update_image_uri'],
|
||||
'update_protocol': module.params['update_protocol'],
|
||||
'update_targets': module.params['update_targets'],
|
||||
'update_creds': module.params['update_creds']
|
||||
}
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Accounts":
|
||||
ACCOUNTS_COMMANDS = {
|
||||
"AddUser": rf_utils.add_user,
|
||||
"EnableUser": rf_utils.enable_user,
|
||||
"DeleteUser": rf_utils.delete_user,
|
||||
"DisableUser": rf_utils.disable_user,
|
||||
"UpdateUserRole": rf_utils.update_user_role,
|
||||
"UpdateUserPassword": rf_utils.update_user_password,
|
||||
"UpdateUserName": rf_utils.update_user_name,
|
||||
"UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
|
||||
}
|
||||
|
||||
# execute only if we find an Account service resource
|
||||
result = rf_utils._find_accountservice_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
result = ACCOUNTS_COMMANDS[command](user)
|
||||
|
||||
elif category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
result = rf_utils._find_systems_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if "Power" in command:
|
||||
result = rf_utils.manage_system_power(command)
|
||||
elif command == "SetOneTimeBoot":
|
||||
result = rf_utils.set_one_time_boot_device(
|
||||
module.params['bootdevice'],
|
||||
module.params['uefi_target'],
|
||||
module.params['boot_next'])
|
||||
|
||||
elif category == "Chassis":
|
||||
result = rf_utils._find_chassis_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
|
||||
|
||||
# Check if more than one led_command is present
|
||||
num_led_commands = sum([command in led_commands for command in command_list])
|
||||
if num_led_commands > 1:
|
||||
result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
|
||||
else:
|
||||
for command in command_list:
|
||||
if command in led_commands:
|
||||
result = rf_utils.manage_indicator_led(command)
|
||||
|
||||
elif category == "Sessions":
|
||||
# execute only if we find SessionService resources
|
||||
resource = rf_utils._find_sessionservice_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "ClearSessions":
|
||||
result = rf_utils.clear_sessions()
|
||||
|
||||
elif category == "Manager":
|
||||
MANAGER_COMMANDS = {
|
||||
"GracefulRestart": rf_utils.restart_manager_gracefully,
|
||||
"ClearLogs": rf_utils.clear_logs
|
||||
}
|
||||
|
||||
# execute only if we find a Manager service resource
|
||||
result = rf_utils._find_managers_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
result = MANAGER_COMMANDS[command]()
|
||||
|
||||
elif category == "Update":
|
||||
# execute only if we find UpdateService resources
|
||||
resource = rf_utils._find_updateservice_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "SimpleUpdate":
|
||||
result = rf_utils.simple_update(update_opts)
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
del result['ret']
|
||||
changed = result.get('changed', True)
|
||||
module.exit_json(changed=changed, msg='Action was successful')
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
332
plugins/modules/remote_management/redfish/redfish_config.py
Normal file
332
plugins/modules/remote_management/redfish/redfish_config.py
Normal file
@@ -0,0 +1,332 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-2018 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redfish_config
|
||||
short_description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
set or update a configuration attribute.
|
||||
- Manages BIOS configuration settings.
|
||||
- Manages OOB controller configuration settings.
|
||||
options:
|
||||
category:
|
||||
required: true
|
||||
description:
|
||||
- Category to execute on OOB controller
|
||||
type: str
|
||||
command:
|
||||
required: true
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- User for authentication with OOB controller
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with OOB controller
|
||||
type: str
|
||||
bios_attribute_name:
|
||||
required: false
|
||||
description:
|
||||
- name of BIOS attr to update (deprecated - use bios_attributes instead)
|
||||
default: 'null'
|
||||
type: str
|
||||
bios_attribute_value:
|
||||
required: false
|
||||
description:
|
||||
- value of BIOS attr to update (deprecated - use bios_attributes instead)
|
||||
default: 'null'
|
||||
type: str
|
||||
bios_attributes:
|
||||
required: false
|
||||
description:
|
||||
- dictionary of BIOS attributes to update
|
||||
default: {}
|
||||
type: dict
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller
|
||||
default: 10
|
||||
type: int
|
||||
boot_order:
|
||||
required: false
|
||||
description:
|
||||
- list of BootOptionReference strings specifying the BootOrder
|
||||
default: []
|
||||
type: list
|
||||
network_protocols:
|
||||
required: false
|
||||
description:
|
||||
- setting dict of manager services to update
|
||||
type: dict
|
||||
resource_id:
|
||||
required: false
|
||||
description:
|
||||
- The ID of the System, Manager or Chassis to modify
|
||||
type: str
|
||||
nic_addr:
|
||||
required: false
|
||||
description:
|
||||
- EthernetInterface Address string on OOB controller
|
||||
default: 'null'
|
||||
type: str
|
||||
nic_config:
|
||||
required: false
|
||||
description:
|
||||
- setting dict of EthernetInterface on OOB controller
|
||||
type: dict
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set BootMode to UEFI
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetBiosAttributes
|
||||
resource_id: 437XR1138R2
|
||||
bios_attributes:
|
||||
BootMode: "Uefi"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set multiple BootMode attributes
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetBiosAttributes
|
||||
resource_id: 437XR1138R2
|
||||
bios_attributes:
|
||||
BootMode: "Bios"
|
||||
OneTimeBootMode: "Enabled"
|
||||
BootSeqRetry: "Enabled"
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Enable PXE Boot for NIC1 using deprecated options
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetBiosAttributes
|
||||
resource_id: 437XR1138R2
|
||||
bios_attribute_name: PxeDev1EnDis
|
||||
bios_attribute_value: Enabled
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set BIOS default settings with a timeout of 20 seconds
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetBiosDefaultSettings
|
||||
resource_id: 437XR1138R2
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
timeout: 20
|
||||
|
||||
- name: Set boot order
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetBootOrder
|
||||
boot_order:
|
||||
- Boot0002
|
||||
- Boot0001
|
||||
- Boot0000
|
||||
- Boot0003
|
||||
- Boot0004
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set boot order to the default
|
||||
redfish_config:
|
||||
category: Systems
|
||||
command: SetDefaultBootOrder
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set Manager Network Protocols
|
||||
redfish_config:
|
||||
category: Manager
|
||||
command: SetNetworkProtocols
|
||||
network_protocols:
|
||||
SNMP:
|
||||
ProtocolEnabled: True
|
||||
Port: 161
|
||||
HTTP:
|
||||
ProtocolEnabled: False
|
||||
Port: 8080
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Set Manager NIC
|
||||
redfish_config:
|
||||
category: Manager
|
||||
command: SetManagerNic
|
||||
nic_config:
|
||||
DHCPv4:
|
||||
DHCPEnabled: False
|
||||
IPv4StaticAddresses:
|
||||
Address: 192.168.1.3
|
||||
Gateway: 192.168.1.1
|
||||
SubnetMask: 255.255.255.0
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Message with action result or error description
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Action was successful"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
# More will be added as module features are expanded
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
|
||||
"SetDefaultBootOrder"],
|
||||
"Manager": ["SetNetworkProtocols", "SetManagerNic"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(required=True),
|
||||
command=dict(required=True, type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
bios_attribute_name=dict(default='null'),
|
||||
bios_attribute_value=dict(default='null'),
|
||||
bios_attributes=dict(type='dict', default={}),
|
||||
timeout=dict(type='int', default=10),
|
||||
boot_order=dict(type='list', elements='str', default=[]),
|
||||
network_protocols=dict(
|
||||
type='dict',
|
||||
default={}
|
||||
),
|
||||
resource_id=dict(),
|
||||
nic_addr=dict(default='null'),
|
||||
nic_config=dict(
|
||||
type='dict',
|
||||
default={}
|
||||
)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
category = module.params['category']
|
||||
command_list = module.params['command']
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# BIOS attributes to update
|
||||
bios_attributes = module.params['bios_attributes']
|
||||
if module.params['bios_attribute_name'] != 'null':
|
||||
bios_attributes[module.params['bios_attribute_name']] = module.params[
|
||||
'bios_attribute_value']
|
||||
module.deprecate(msg='The bios_attribute_name/bios_attribute_value '
|
||||
'options are deprecated. Use bios_attributes instead',
|
||||
version='2.14')
|
||||
|
||||
# boot order
|
||||
boot_order = module.params['boot_order']
|
||||
|
||||
# System, Manager or Chassis ID to modify
|
||||
resource_id = module.params['resource_id']
|
||||
|
||||
# manager nic
|
||||
nic_addr = module.params['nic_addr']
|
||||
nic_config = module.params['nic_config']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
|
||||
resource_id=resource_id, data_modification=True)
|
||||
|
||||
# Check that Category is valid
|
||||
if category not in CATEGORY_COMMANDS_ALL:
|
||||
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
|
||||
|
||||
# Check that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Systems":
|
||||
# execute only if we find a System resource
|
||||
result = rf_utils._find_systems_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "SetBiosDefaultSettings":
|
||||
result = rf_utils.set_bios_default_settings()
|
||||
elif command == "SetBiosAttributes":
|
||||
result = rf_utils.set_bios_attributes(bios_attributes)
|
||||
elif command == "SetBootOrder":
|
||||
result = rf_utils.set_boot_order(boot_order)
|
||||
elif command == "SetDefaultBootOrder":
|
||||
result = rf_utils.set_default_boot_order()
|
||||
|
||||
elif category == "Manager":
|
||||
# execute only if we find a Manager service resource
|
||||
result = rf_utils._find_managers_resource()
|
||||
if result['ret'] is False:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
for command in command_list:
|
||||
if command == "SetNetworkProtocols":
|
||||
result = rf_utils.set_network_protocols(module.params['network_protocols'])
|
||||
elif command == "SetManagerNic":
|
||||
result = rf_utils.set_manager_nic(nic_addr, nic_config)
|
||||
|
||||
# Return data back or fail with proper message
|
||||
if result['ret'] is True:
|
||||
module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
|
||||
else:
|
||||
module.fail_json(msg=to_native(result['msg']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1
plugins/modules/remote_management/redfish/redfish_facts.py
Symbolic link
1
plugins/modules/remote_management/redfish/redfish_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
redfish_info.py
|
||||
469
plugins/modules/remote_management/redfish/redfish_info.py
Normal file
469
plugins/modules/remote_management/redfish/redfish_info.py
Normal file
@@ -0,0 +1,469 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-2018 Dell EMC Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: redfish_info
|
||||
short_description: Manages Out-Of-Band controllers using Redfish APIs
|
||||
description:
|
||||
- Builds Redfish URIs locally and sends them to remote OOB controllers to
|
||||
get information back.
|
||||
- Information retrieved is placed in a location specified by the user.
|
||||
- This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
|
||||
Note that the M(redfish_info) module no longer returns C(ansible_facts)!
|
||||
options:
|
||||
category:
|
||||
required: false
|
||||
description:
|
||||
- List of categories to execute on OOB controller
|
||||
default: ['Systems']
|
||||
type: list
|
||||
command:
|
||||
required: false
|
||||
description:
|
||||
- List of commands to execute on OOB controller
|
||||
type: list
|
||||
baseuri:
|
||||
required: true
|
||||
description:
|
||||
- Base URI of OOB controller
|
||||
type: str
|
||||
username:
|
||||
required: true
|
||||
description:
|
||||
- User for authentication with OOB controller
|
||||
type: str
|
||||
password:
|
||||
required: true
|
||||
description:
|
||||
- Password for authentication with OOB controller
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for URL requests to OOB controller
|
||||
default: 10
|
||||
type: int
|
||||
|
||||
author: "Jose Delarosa (@jose-delarosa)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get CPU inventory
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetCpuInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
|
||||
|
||||
- name: Get CPU model
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetCpuInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
|
||||
|
||||
- name: Get memory inventory
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetMemoryInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
|
||||
- name: Get fan inventory with a timeout of 20 seconds
|
||||
redfish_info:
|
||||
category: Chassis
|
||||
command: GetFanInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
timeout: 20
|
||||
register: result
|
||||
|
||||
- name: Get Virtual Media information
|
||||
redfish_info:
|
||||
category: Manager
|
||||
command: GetVirtualMedia
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Volume Inventory
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetVolumeInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
|
||||
|
||||
- name: Get Session information
|
||||
redfish_info:
|
||||
category: Sessions
|
||||
command: GetSessions
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
|
||||
|
||||
- name: Get default inventory information
|
||||
redfish_info:
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
register: result
|
||||
- debug:
|
||||
msg: "{{ result.redfish_facts | to_nice_json }}"
|
||||
|
||||
- name: Get several inventories
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetNicInventory,GetBiosAttributes
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get default system inventory and user information
|
||||
redfish_info:
|
||||
category: Systems,Accounts
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get default system, user and firmware information
|
||||
redfish_info:
|
||||
category: ["Systems", "Accounts", "Update"]
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get Manager NIC inventory information
|
||||
redfish_info:
|
||||
category: Manager
|
||||
command: GetManagerNicInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get boot override information
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetBootOverride
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get chassis inventory
|
||||
redfish_info:
|
||||
category: Chassis
|
||||
command: GetChassisInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get all information available in the Manager category
|
||||
redfish_info:
|
||||
category: Manager
|
||||
command: all
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get firmware update capability information
|
||||
redfish_info:
|
||||
category: Update
|
||||
command: GetFirmwareUpdateCapabilities
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get firmware inventory
|
||||
redfish_info:
|
||||
category: Update
|
||||
command: GetFirmwareInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get software inventory
|
||||
redfish_info:
|
||||
category: Update
|
||||
command: GetSoftwareInventory
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get Manager Services
|
||||
redfish_info:
|
||||
category: Manager
|
||||
command: GetNetworkProtocols
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get all information available in all categories
|
||||
redfish_info:
|
||||
category: all
|
||||
command: all
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get system health report
|
||||
redfish_info:
|
||||
category: Systems
|
||||
command: GetHealthReport
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get chassis health report
|
||||
redfish_info:
|
||||
category: Chassis
|
||||
command: GetHealthReport
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
|
||||
- name: Get manager health report
|
||||
redfish_info:
|
||||
category: Manager
|
||||
command: GetHealthReport
|
||||
baseuri: "{{ baseuri }}"
|
||||
username: "{{ username }}"
|
||||
password: "{{ password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
result:
|
||||
description: different results depending on task
|
||||
returned: always
|
||||
type: dict
|
||||
sample: List of CPUs on system
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
|
||||
|
||||
CATEGORY_COMMANDS_ALL = {
|
||||
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
|
||||
"GetMemoryInventory", "GetNicInventory", "GetHealthReport",
|
||||
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
|
||||
"GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
|
||||
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
|
||||
"GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
|
||||
"Accounts": ["ListUsers"],
|
||||
"Sessions": ["GetSessions"],
|
||||
"Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
|
||||
"Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
|
||||
"GetHealthReport"],
|
||||
}
|
||||
|
||||
CATEGORY_COMMANDS_DEFAULT = {
|
||||
"Systems": "GetSystemInventory",
|
||||
"Chassis": "GetFanInventory",
|
||||
"Accounts": "ListUsers",
|
||||
"Update": "GetFirmwareInventory",
|
||||
"Sessions": "GetSessions",
|
||||
"Manager": "GetManagerNicInventory"
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
result = {}
|
||||
category_list = []
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
category=dict(type='list', default=['Systems']),
|
||||
command=dict(type='list'),
|
||||
baseuri=dict(required=True),
|
||||
username=dict(required=True),
|
||||
password=dict(required=True, no_log=True),
|
||||
timeout=dict(type='int', default=10)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
is_old_facts = module._name == 'redfish_facts'
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
|
||||
"and the renamed one no longer returns ansible_facts", version='2.13')
|
||||
|
||||
# admin credentials used for authentication
|
||||
creds = {'user': module.params['username'],
|
||||
'pswd': module.params['password']}
|
||||
|
||||
# timeout
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# Build root URI
|
||||
root_uri = "https://" + module.params['baseuri']
|
||||
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
|
||||
|
||||
# Build Category list
|
||||
if "all" in module.params['category']:
|
||||
for entry in CATEGORY_COMMANDS_ALL:
|
||||
category_list.append(entry)
|
||||
else:
|
||||
# one or more categories specified
|
||||
category_list = module.params['category']
|
||||
|
||||
for category in category_list:
|
||||
command_list = []
|
||||
# Build Command list for each Category
|
||||
if category in CATEGORY_COMMANDS_ALL:
|
||||
if not module.params['command']:
|
||||
# True if we don't specify a command --> use default
|
||||
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
|
||||
elif "all" in module.params['command']:
|
||||
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
|
||||
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
|
||||
# one or more commands
|
||||
else:
|
||||
command_list = module.params['command']
|
||||
# Verify that all commands are valid
|
||||
for cmd in command_list:
|
||||
# Fail if even one command given is invalid
|
||||
if cmd not in CATEGORY_COMMANDS_ALL[category]:
|
||||
module.fail_json(msg="Invalid Command: %s" % cmd)
|
||||
else:
|
||||
# Fail if even one category given is invalid
|
||||
module.fail_json(msg="Invalid Category: %s" % category)
|
||||
|
||||
# Organize by Categories / Commands
|
||||
if category == "Systems":
|
||||
# execute only if we find a Systems resource
|
||||
resource = rf_utils._find_systems_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetSystemInventory":
|
||||
result["system"] = rf_utils.get_multi_system_inventory()
|
||||
elif command == "GetCpuInventory":
|
||||
result["cpu"] = rf_utils.get_multi_cpu_inventory()
|
||||
elif command == "GetMemoryInventory":
|
||||
result["memory"] = rf_utils.get_multi_memory_inventory()
|
||||
elif command == "GetNicInventory":
|
||||
result["nic"] = rf_utils.get_multi_nic_inventory(category)
|
||||
elif command == "GetStorageControllerInventory":
|
||||
result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
|
||||
elif command == "GetDiskInventory":
|
||||
result["disk"] = rf_utils.get_multi_disk_inventory()
|
||||
elif command == "GetVolumeInventory":
|
||||
result["volume"] = rf_utils.get_multi_volume_inventory()
|
||||
elif command == "GetBiosAttributes":
|
||||
result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
|
||||
elif command == "GetBootOrder":
|
||||
result["boot_order"] = rf_utils.get_multi_boot_order()
|
||||
elif command == "GetBootOverride":
|
||||
result["boot_override"] = rf_utils.get_multi_boot_override()
|
||||
elif command == "GetHealthReport":
|
||||
result["health_report"] = rf_utils.get_multi_system_health_report()
|
||||
|
||||
elif category == "Chassis":
|
||||
# execute only if we find Chassis resource
|
||||
resource = rf_utils._find_chassis_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetFanInventory":
|
||||
result["fan"] = rf_utils.get_fan_inventory()
|
||||
elif command == "GetPsuInventory":
|
||||
result["psu"] = rf_utils.get_psu_inventory()
|
||||
elif command == "GetChassisThermals":
|
||||
result["thermals"] = rf_utils.get_chassis_thermals()
|
||||
elif command == "GetChassisPower":
|
||||
result["chassis_power"] = rf_utils.get_chassis_power()
|
||||
elif command == "GetChassisInventory":
|
||||
result["chassis"] = rf_utils.get_chassis_inventory()
|
||||
elif command == "GetHealthReport":
|
||||
result["health_report"] = rf_utils.get_multi_chassis_health_report()
|
||||
|
||||
elif category == "Accounts":
|
||||
# execute only if we find an Account service resource
|
||||
resource = rf_utils._find_accountservice_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "ListUsers":
|
||||
result["user"] = rf_utils.list_users()
|
||||
|
||||
elif category == "Update":
|
||||
# execute only if we find UpdateService resources
|
||||
resource = rf_utils._find_updateservice_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetFirmwareInventory":
|
||||
result["firmware"] = rf_utils.get_firmware_inventory()
|
||||
elif command == "GetSoftwareInventory":
|
||||
result["software"] = rf_utils.get_software_inventory()
|
||||
elif command == "GetFirmwareUpdateCapabilities":
|
||||
result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
|
||||
|
||||
elif category == "Sessions":
|
||||
# execute only if we find SessionService resources
|
||||
resource = rf_utils._find_sessionservice_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetSessions":
|
||||
result["session"] = rf_utils.get_sessions()
|
||||
|
||||
elif category == "Manager":
|
||||
# execute only if we find a Manager service resource
|
||||
resource = rf_utils._find_managers_resource()
|
||||
if resource['ret'] is False:
|
||||
module.fail_json(msg=resource['msg'])
|
||||
|
||||
for command in command_list:
|
||||
if command == "GetManagerNicInventory":
|
||||
result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
|
||||
elif command == "GetVirtualMedia":
|
||||
result["virtual_media"] = rf_utils.get_multi_virtualmedia()
|
||||
elif command == "GetLogs":
|
||||
result["log"] = rf_utils.get_logs()
|
||||
elif command == "GetNetworkProtocols":
|
||||
result["network_protocols"] = rf_utils.get_network_protocols()
|
||||
elif command == "GetHealthReport":
|
||||
result["health_report"] = rf_utils.get_multi_manager_health_report()
|
||||
|
||||
# Return data back
|
||||
if is_old_facts:
|
||||
module.exit_json(ansible_facts=dict(redfish_facts=result))
|
||||
else:
|
||||
module.exit_json(redfish_facts=result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
269
plugins/modules/remote_management/stacki/stacki_host.py
Normal file
269
plugins/modules/remote_management/stacki/stacki_host.py
Normal file
@@ -0,0 +1,269 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: stacki_host
|
||||
short_description: Add or remove host to stacki front-end
|
||||
description:
|
||||
- Use this module to add or remove hosts to a stacki front-end via API.
|
||||
- U(https://github.com/StackIQ/stacki)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the host to be added to Stacki.
|
||||
required: True
|
||||
stacki_user:
|
||||
description:
|
||||
- Username for authenticating with Stacki API, but if not
|
||||
specified, the environment variable C(stacki_user) is used instead.
|
||||
required: True
|
||||
stacki_password:
|
||||
description:
|
||||
- Password for authenticating with Stacki API, but if not
|
||||
specified, the environment variable C(stacki_password) is used instead.
|
||||
required: True
|
||||
stacki_endpoint:
|
||||
description:
|
||||
- URL for the Stacki API Endpoint.
|
||||
required: True
|
||||
prim_intf_mac:
|
||||
description:
|
||||
- MAC Address for the primary PXE boot network interface.
|
||||
prim_intf_ip:
|
||||
description:
|
||||
- IP Address for the primary network interface.
|
||||
prim_intf:
|
||||
description:
|
||||
- Name of the primary network interface.
|
||||
force_install:
|
||||
description:
|
||||
- Set value to True to force node into install state if it already exists in stacki.
|
||||
type: bool
|
||||
author:
|
||||
- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add a host named test-1
|
||||
stacki_host:
|
||||
name: test-1
|
||||
stacki_user: usr
|
||||
stacki_password: pwd
|
||||
stacki_endpoint: url
|
||||
prim_intf_mac: mac_addr
|
||||
prim_intf_ip: x.x.x.x
|
||||
prim_intf: eth0
|
||||
|
||||
- name: Remove a host named test-1
|
||||
stacki_host:
|
||||
name: test-1
|
||||
stacki_user: usr
|
||||
stacki_password: pwd
|
||||
stacki_endpoint: url
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
changed:
|
||||
description: response to whether or not the api call completed successfully
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
|
||||
stdout:
|
||||
description: the set of responses from the commands
|
||||
returned: always
|
||||
type: list
|
||||
sample: ['...', '...']
|
||||
|
||||
stdout_lines:
|
||||
description: the value of stdout split into a list
|
||||
returned: always
|
||||
type: list
|
||||
sample: [['...', '...'], ['...'], ['...']]
|
||||
'''
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class StackiHost(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.hostname = module.params['name']
|
||||
self.rack = module.params['rack']
|
||||
self.rank = module.params['rank']
|
||||
self.appliance = module.params['appliance']
|
||||
self.prim_intf = module.params['prim_intf']
|
||||
self.prim_intf_ip = module.params['prim_intf_ip']
|
||||
self.network = module.params['network']
|
||||
self.prim_intf_mac = module.params['prim_intf_mac']
|
||||
self.endpoint = module.params['stacki_endpoint']
|
||||
|
||||
auth_creds = {'USERNAME': module.params['stacki_user'],
|
||||
'PASSWORD': module.params['stacki_password']}
|
||||
|
||||
# Get Initial CSRF
|
||||
cred_a = self.do_request(self.module, self.endpoint, method="GET")
|
||||
cookie_a = cred_a.headers.get('Set-Cookie').split(';')
|
||||
init_csrftoken = None
|
||||
for c in cookie_a:
|
||||
if "csrftoken" in c:
|
||||
init_csrftoken = c.replace("csrftoken=", "")
|
||||
init_csrftoken = init_csrftoken.rstrip("\r\n")
|
||||
break
|
||||
|
||||
# Make Header Dictionary with initial CSRF
|
||||
header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
|
||||
'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
|
||||
|
||||
# Endpoint to get final authentication header
|
||||
login_endpoint = self.endpoint + "/login"
|
||||
|
||||
# Get Final CSRF and Session ID
|
||||
login_req = self.do_request(self.module, login_endpoint, headers=header,
|
||||
payload=urlencode(auth_creds), method='POST')
|
||||
|
||||
cookie_f = login_req.headers.get('Set-Cookie').split(';')
|
||||
csrftoken = None
|
||||
for f in cookie_f:
|
||||
if "csrftoken" in f:
|
||||
csrftoken = f.replace("csrftoken=", "")
|
||||
if "sessionid" in f:
|
||||
sessionid = c.split("sessionid=", 1)[-1]
|
||||
sessionid = sessionid.rstrip("\r\n")
|
||||
|
||||
self.header = {'csrftoken': csrftoken,
|
||||
'X-CSRFToken': csrftoken,
|
||||
'sessionid': sessionid,
|
||||
'Content-type': 'application/json',
|
||||
'Cookie': login_req.headers.get('Set-Cookie')}
|
||||
|
||||
def do_request(self, module, url, payload=None, headers=None, method=None):
|
||||
res, info = fetch_url(module, url, data=payload, headers=headers, method=method)
|
||||
|
||||
if info['status'] != 200:
|
||||
self.module.fail_json(changed=False, msg=info['msg'])
|
||||
|
||||
return res
|
||||
|
||||
def stack_check_host(self):
|
||||
res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
|
||||
|
||||
if self.hostname in res.read():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def stack_sync(self):
|
||||
self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
|
||||
self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
|
||||
|
||||
def stack_force_install(self, result):
|
||||
data = dict()
|
||||
changed = False
|
||||
|
||||
data['cmd'] = "set host boot {0} action=install" \
|
||||
.format(self.hostname)
|
||||
self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
|
||||
changed = True
|
||||
|
||||
self.stack_sync()
|
||||
|
||||
result['changed'] = changed
|
||||
result['stdout'] = "api call successful".rstrip("\r\n")
|
||||
|
||||
def stack_add(self, result):
|
||||
data = dict()
|
||||
changed = False
|
||||
|
||||
data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
|
||||
.format(self.hostname, self.rack, self.rank, self.appliance)
|
||||
self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
|
||||
|
||||
self.stack_sync()
|
||||
|
||||
result['changed'] = changed
|
||||
result['stdout'] = "api call successful".rstrip("\r\n")
|
||||
|
||||
def stack_remove(self, result):
|
||||
data = dict()
|
||||
|
||||
data['cmd'] = "remove host {0}"\
|
||||
.format(self.hostname)
|
||||
self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
|
||||
|
||||
self.stack_sync()
|
||||
|
||||
result['changed'] = True
|
||||
result['stdout'] = "api call successful".rstrip("\r\n")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
name=dict(type='str', required=True),
|
||||
rack=dict(type='int', default=0),
|
||||
rank=dict(type='int', default=0),
|
||||
appliance=dict(type='str', default='backend'),
|
||||
prim_intf=dict(type='str'),
|
||||
prim_intf_ip=dict(type='str'),
|
||||
network=dict(type='str', default='private'),
|
||||
prim_intf_mac=dict(type='str'),
|
||||
stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')),
|
||||
stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True),
|
||||
stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')),
|
||||
force_install=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
result = {'changed': False}
|
||||
missing_params = list()
|
||||
|
||||
stacki = StackiHost(module)
|
||||
host_exists = stacki.stack_check_host()
|
||||
|
||||
# If state is present, but host exists, need force_install flag to put host back into install state
|
||||
if module.params['state'] == 'present' and host_exists and module.params['force_install']:
|
||||
stacki.stack_force_install(result)
|
||||
# If state is present, but host exists, and force_install and false, do nothing
|
||||
elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
|
||||
result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
|
||||
.format(module.params['name'])
|
||||
# Otherwise, state is present, but host doesn't exists, require more params to add host
|
||||
elif module.params['state'] == 'present' and not host_exists:
|
||||
for param in ['appliance', 'prim_intf',
|
||||
'prim_intf_ip', 'network', 'prim_intf_mac']:
|
||||
if not module.params[param]:
|
||||
missing_params.append(param)
|
||||
if len(missing_params) > 0:
|
||||
module.fail_json(msg="missing required arguments: {0}".format(missing_params))
|
||||
|
||||
stacki.stack_add(result)
|
||||
# If state is absent, and host exists, lets remove it.
|
||||
elif module.params['state'] == 'absent' and host_exists:
|
||||
stacki.stack_remove(result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
132
plugins/modules/remote_management/wakeonlan.py
Normal file
132
plugins/modules/remote_management/wakeonlan.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2016, Dag Wieers <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: wakeonlan
|
||||
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
|
||||
description:
|
||||
- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
|
||||
options:
|
||||
mac:
|
||||
description:
|
||||
- MAC address to send Wake-on-LAN broadcast packet for.
|
||||
required: true
|
||||
broadcast:
|
||||
description:
|
||||
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
|
||||
default: 255.255.255.255
|
||||
port:
|
||||
description:
|
||||
- UDP port to use for magic Wake-on-LAN packet.
|
||||
default: 7
|
||||
todo:
|
||||
- Add arping support to check whether the system is up (before and after)
|
||||
- Enable check-mode support (when we have arping support)
|
||||
- Does not have SecureOn password support
|
||||
notes:
|
||||
- This module sends a magic packet, without knowing whether it worked
|
||||
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
|
||||
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
|
||||
seealso:
|
||||
- module: community.windows.win_wakeonlan
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
|
||||
wakeonlan:
|
||||
mac: '00:00:5E:00:53:66'
|
||||
broadcast: 192.0.2.23
|
||||
delegate_to: localhost
|
||||
|
||||
- wakeonlan:
|
||||
mac: 00:00:5E:00:53:66
|
||||
port: 9
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
# Default return values
|
||||
'''
|
||||
import socket
|
||||
import struct
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def wakeonlan(module, mac, broadcast, port):
|
||||
""" Send a magic Wake-on-LAN packet. """
|
||||
|
||||
mac_orig = mac
|
||||
|
||||
# Remove possible separator from MAC address
|
||||
if len(mac) == 12 + 5:
|
||||
mac = mac.replace(mac[2], '')
|
||||
|
||||
# If we don't end up with 12 hexadecimal characters, fail
|
||||
if len(mac) != 12:
|
||||
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
|
||||
|
||||
# Test if it converts to an integer, otherwise fail
|
||||
try:
|
||||
int(mac, 16)
|
||||
except ValueError:
|
||||
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
|
||||
|
||||
# Create payload for magic packet
|
||||
data = b''
|
||||
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
|
||||
for i in range(0, len(padding), 2):
|
||||
data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
|
||||
|
||||
# Broadcast payload to network
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
|
||||
if not module.check_mode:
|
||||
|
||||
try:
|
||||
sock.sendto(data, (broadcast, port))
|
||||
except socket.error as e:
|
||||
sock.close()
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
sock.close()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
mac=dict(type='str', required=True),
|
||||
broadcast=dict(type='str', default='255.255.255.255'),
|
||||
port=dict(type='int', default=7),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
mac = module.params['mac']
|
||||
broadcast = module.params['broadcast']
|
||||
port = module.params['port']
|
||||
|
||||
wakeonlan(module, mac, broadcast, port)
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user