mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-07 22:02:50 +00:00
Relocating extras into lib/ansible/modules/ after merge
This commit is contained in:
committed by
Matt Clay
parent
c65ba07d2c
commit
011ea55a8f
0
lib/ansible/modules/cloud/misc/__init__.py
Normal file
0
lib/ansible/modules/cloud/misc/__init__.py
Normal file
527
lib/ansible/modules/cloud/misc/ovirt.py
Normal file
527
lib/ansible/modules/cloud/misc/ovirt.py
Normal file
@@ -0,0 +1,527 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author: "Vincent Van der Kussen (@vincentvdk)"
|
||||
short_description: oVirt/RHEV platform management
|
||||
description:
|
||||
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
|
||||
version_added: "1.4"
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
url:
|
||||
description:
|
||||
- the url of the oVirt instance
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
instance_name:
|
||||
description:
|
||||
- the name of the instance to use
|
||||
default: null
|
||||
required: true
|
||||
aliases: [ vmname ]
|
||||
password:
|
||||
description:
|
||||
- password of the user to authenticate with
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
image:
|
||||
description:
|
||||
- template to use for the instance
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
resource_type:
|
||||
description:
|
||||
- whether you want to deploy an image or create an instance from scratch.
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'new', 'template' ]
|
||||
zone:
|
||||
description:
|
||||
- deploy the image to this oVirt cluster
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
instance_disksize:
|
||||
description:
|
||||
- size of the instance's disk in GB
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vm_disksize]
|
||||
instance_cpus:
|
||||
description:
|
||||
- the instance's number of cpu's
|
||||
default: 1
|
||||
required: false
|
||||
aliases: [ vmcpus ]
|
||||
instance_nic:
|
||||
description:
|
||||
- name of the network interface in oVirt/RHEV
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmnic ]
|
||||
instance_network:
|
||||
description:
|
||||
- the logical network the machine should belong to
|
||||
default: rhevm
|
||||
required: false
|
||||
aliases: [ vmnetwork ]
|
||||
instance_mem:
|
||||
description:
|
||||
- the instance's amount of memory in MB
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmmem ]
|
||||
instance_type:
|
||||
description:
|
||||
- define if the instance is a server or desktop
|
||||
default: server
|
||||
required: false
|
||||
aliases: [ vmtype ]
|
||||
choices: [ 'server', 'desktop' ]
|
||||
disk_alloc:
|
||||
description:
|
||||
- define if disk is thin or preallocated
|
||||
default: thin
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'thin', 'preallocated' ]
|
||||
disk_int:
|
||||
description:
|
||||
- interface type of the disk
|
||||
default: virtio
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'virtio', 'ide' ]
|
||||
instance_os:
|
||||
description:
|
||||
- type of Operating System
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmos ]
|
||||
instance_cores:
|
||||
description:
|
||||
- define the instance's number of cores
|
||||
default: 1
|
||||
required: false
|
||||
aliases: [ vmcores ]
|
||||
sdomain:
|
||||
description:
|
||||
- the Storage Domain where you want to create the instance's disk on.
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
region:
|
||||
description:
|
||||
- the oVirt/RHEV datacenter where you want to deploy to
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
instance_dns:
|
||||
description:
|
||||
- define the instance's Primary DNS server
|
||||
required: false
|
||||
aliases: [ dns ]
|
||||
version_added: "2.1"
|
||||
instance_domain:
|
||||
description:
|
||||
- define the instance's Domain
|
||||
required: false
|
||||
aliases: [ domain ]
|
||||
version_added: "2.1"
|
||||
instance_hostname:
|
||||
description:
|
||||
- define the instance's Hostname
|
||||
required: false
|
||||
aliases: [ hostname ]
|
||||
version_added: "2.1"
|
||||
instance_ip:
|
||||
description:
|
||||
- define the instance's IP
|
||||
required: false
|
||||
aliases: [ ip ]
|
||||
version_added: "2.1"
|
||||
instance_netmask:
|
||||
description:
|
||||
- define the instance's Netmask
|
||||
required: false
|
||||
aliases: [ netmask ]
|
||||
version_added: "2.1"
|
||||
instance_rootpw:
|
||||
description:
|
||||
- define the instance's Root password
|
||||
required: false
|
||||
aliases: [ rootpw ]
|
||||
version_added: "2.1"
|
||||
instance_key:
|
||||
description:
|
||||
- define the instance's Authorized key
|
||||
required: false
|
||||
aliases: [ key ]
|
||||
version_added: "2.1"
|
||||
state:
|
||||
description:
|
||||
- create, terminate or remove instances
|
||||
default: 'present'
|
||||
required: false
|
||||
aliases: []
|
||||
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "ovirt-engine-sdk-python"
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Basic example provisioning from image.
|
||||
|
||||
ovirt:
|
||||
user: admin@internal
|
||||
url: https://ovirt.example.com
|
||||
instance_name: ansiblevm04
|
||||
password: secret
|
||||
image: centos_64
|
||||
zone: cluster01
|
||||
resource_type: template"
|
||||
|
||||
# Full example to create new instance from scratch
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
resource_type: new
|
||||
instance_type: server
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
instance_disksize: 10
|
||||
zone: cluster01
|
||||
region: datacenter1
|
||||
instance_cpus: 1
|
||||
instance_nic: nic1
|
||||
instance_network: rhevm
|
||||
instance_mem: 1000
|
||||
disk_alloc: thin
|
||||
sdomain: FIBER01
|
||||
instance_cores: 1
|
||||
instance_os: rhel_6x64
|
||||
disk_int: virtio"
|
||||
|
||||
# stopping an instance
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: stopped
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
# starting an instance
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
# starting an instance with cloud init information
|
||||
ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
hostname: testansible
|
||||
domain: ansible.local
|
||||
ip: 192.0.2.100
|
||||
netmask: 255.255.255.0
|
||||
gateway: 192.0.2.1
|
||||
rootpw: bigsecret
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
from ovirtsdk.api import API
|
||||
from ovirtsdk.xml import params
|
||||
HAS_OVIRTSDK = True
|
||||
except ImportError:
|
||||
HAS_OVIRTSDK = False
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# create connection with API
|
||||
#
|
||||
def conn(url, user, password):
|
||||
api = API(url=url, username=user, password=password, insecure=True)
|
||||
try:
|
||||
value = api.test()
|
||||
except:
|
||||
raise Exception("error connecting to the oVirt API")
|
||||
return api
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Create VM from scratch
|
||||
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
|
||||
if vmdisk_alloc == 'thin':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
|
||||
elif vmdisk_alloc == 'preallocated':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
|
||||
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except:
|
||||
raise Exception("Error creating VM with specified parameters")
|
||||
vm = conn.vms.get(name=vmname)
|
||||
try:
|
||||
vm.disks.add(vmdisk)
|
||||
except:
|
||||
raise Exception("Error attaching disk")
|
||||
try:
|
||||
vm.nics.add(nic_net1)
|
||||
except:
|
||||
raise Exception("Error adding nic")
|
||||
|
||||
|
||||
# create an instance from a template
|
||||
def create_vm_template(conn, vmname, image, zone):
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except:
|
||||
raise Exception('error adding template %s' % image)
|
||||
|
||||
|
||||
# start instance
|
||||
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
|
||||
domain=None, dns=None, rootpw=None, key=None):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
use_cloud_init = False
|
||||
nics = None
|
||||
nic = None
|
||||
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
|
||||
use_cloud_init = True
|
||||
if ip and netmask and gateway:
|
||||
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
|
||||
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
|
||||
nics = params.Nics()
|
||||
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
|
||||
initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
|
||||
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
|
||||
authorized_ssh_keys=key)
|
||||
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
|
||||
vm.start(action=action)
|
||||
|
||||
# Stop instance
|
||||
def vm_stop(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
|
||||
# restart instance
|
||||
def vm_restart(conn, vmname):
|
||||
state = vm_status(conn, vmname)
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
while conn.vms.get(vmname).get_status().get_state() != 'down':
|
||||
time.sleep(5)
|
||||
vm.start()
|
||||
|
||||
# remove an instance
|
||||
def vm_remove(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.delete()
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# VM statuses
|
||||
#
|
||||
# Get the VMs status
|
||||
def vm_status(conn, vmname):
|
||||
status = conn.vms.get(name=vmname).status.state
|
||||
return status
|
||||
|
||||
|
||||
# Get VM object and return it's name if object exists
|
||||
def get_vm(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
if vm == None:
|
||||
name = "empty"
|
||||
else:
|
||||
name = vm.get_name()
|
||||
return name
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Hypervisor operations
|
||||
#
|
||||
# not available yet
|
||||
# ------------------------------------------------------------------- #
|
||||
# Main
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
|
||||
#name = dict(required=True),
|
||||
user = dict(required=True),
|
||||
url = dict(required=True),
|
||||
instance_name = dict(required=True, aliases=['vmname']),
|
||||
password = dict(required=True, no_log=True),
|
||||
image = dict(),
|
||||
resource_type = dict(choices=['new', 'template']),
|
||||
zone = dict(),
|
||||
instance_disksize = dict(aliases=['vm_disksize']),
|
||||
instance_cpus = dict(default=1, aliases=['vmcpus']),
|
||||
instance_nic = dict(aliases=['vmnic']),
|
||||
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
|
||||
instance_mem = dict(aliases=['vmmem']),
|
||||
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
|
||||
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
|
||||
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
|
||||
instance_os = dict(aliases=['vmos']),
|
||||
instance_cores = dict(default=1, aliases=['vmcores']),
|
||||
instance_hostname = dict(aliases=['hostname']),
|
||||
instance_ip = dict(aliases=['ip']),
|
||||
instance_netmask = dict(aliases=['netmask']),
|
||||
instance_gateway = dict(aliases=['gateway']),
|
||||
instance_domain = dict(aliases=['domain']),
|
||||
instance_dns = dict(aliases=['dns']),
|
||||
instance_rootpw = dict(aliases=['rootpw']),
|
||||
instance_key = dict(aliases=['key']),
|
||||
sdomain = dict(),
|
||||
region = dict(),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_OVIRTSDK:
|
||||
module.fail_json(msg='ovirtsdk required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
user = module.params['user']
|
||||
url = module.params['url']
|
||||
vmname = module.params['instance_name']
|
||||
password = module.params['password']
|
||||
image = module.params['image'] # name of the image to deploy
|
||||
resource_type = module.params['resource_type'] # template or from scratch
|
||||
zone = module.params['zone'] # oVirt cluster
|
||||
vmdisk_size = module.params['instance_disksize'] # disksize
|
||||
vmcpus = module.params['instance_cpus'] # number of cpu
|
||||
vmnic = module.params['instance_nic'] # network interface
|
||||
vmnetwork = module.params['instance_network'] # logical network
|
||||
vmmem = module.params['instance_mem'] # mem size
|
||||
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
|
||||
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
|
||||
vmos = module.params['instance_os'] # Operating System
|
||||
vmtype = module.params['instance_type'] # server or desktop
|
||||
vmcores = module.params['instance_cores'] # number of cores
|
||||
sdomain = module.params['sdomain'] # storage domain to store disk on
|
||||
region = module.params['region'] # oVirt Datacenter
|
||||
hostname = module.params['instance_hostname']
|
||||
ip = module.params['instance_ip']
|
||||
netmask = module.params['instance_netmask']
|
||||
gateway = module.params['instance_gateway']
|
||||
domain = module.params['instance_domain']
|
||||
dns = module.params['instance_dns']
|
||||
rootpw = module.params['instance_rootpw']
|
||||
key = module.params['instance_key']
|
||||
#initialize connection
|
||||
try:
|
||||
c = conn(url+"/api", user, password)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
if state == 'present':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
if resource_type == 'template':
|
||||
try:
|
||||
create_vm_template(c, vmname, image, zone)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
|
||||
elif resource_type == 'new':
|
||||
# FIXME: refactor, use keyword args.
|
||||
try:
|
||||
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="You did not specify a resource type")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
|
||||
|
||||
if state == 'started':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
|
||||
else:
|
||||
#vm_start(c, vmname)
|
||||
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmname)
|
||||
|
||||
if state == 'shutdown':
|
||||
if vm_status(c, vmname) == 'down':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
|
||||
else:
|
||||
vm_stop(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
|
||||
|
||||
if state == 'restart':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
vm_restart(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
|
||||
|
||||
if state == 'absent':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
|
||||
else:
|
||||
vm_remove(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmname)
|
||||
|
||||
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
591
lib/ansible/modules/cloud/misc/proxmox.py
Normal file
591
lib/ansible/modules/cloud/misc/proxmox.py
Normal file
@@ -0,0 +1,591 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox
|
||||
short_description: management of instances in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to create/delete/stop instances in Proxmox VE cluster
|
||||
- Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
|
||||
version_added: "2.0"
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
default: null
|
||||
required: false
|
||||
vmid:
|
||||
description:
|
||||
- the instance id
|
||||
- if not set, the next available VM ID will be fetched from ProxmoxAPI.
|
||||
- if not set, will be fetched from PromoxAPI based on the hostname
|
||||
default: null
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when new VM will be created
|
||||
- required only for C(state=present)
|
||||
- for another states will be autodiscovered
|
||||
default: null
|
||||
required: false
|
||||
pool:
|
||||
description:
|
||||
- Proxmox VE resource pool
|
||||
default: null
|
||||
required: false
|
||||
version_added: "2.3"
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
hostname:
|
||||
description:
|
||||
- the instance hostname
|
||||
- required only for C(state=present)
|
||||
- must be unique if vmid is not passed
|
||||
default: null
|
||||
required: false
|
||||
ostemplate:
|
||||
description:
|
||||
- the template for VM creating
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
disk:
|
||||
description:
|
||||
- hard disk size in GB for instance
|
||||
default: 3
|
||||
required: false
|
||||
cpus:
|
||||
description:
|
||||
- numbers of allocated cpus for instance
|
||||
default: 1
|
||||
required: false
|
||||
memory:
|
||||
description:
|
||||
- memory size in MB for instance
|
||||
default: 512
|
||||
required: false
|
||||
swap:
|
||||
description:
|
||||
- swap memory size in MB for instance
|
||||
default: 0
|
||||
required: false
|
||||
netif:
|
||||
description:
|
||||
- specifies network interfaces for the container
|
||||
default: null
|
||||
required: false
|
||||
type: A hash/dictionary defining interfaces
|
||||
mounts:
|
||||
description:
|
||||
- specifies additional mounts (separate disks) for the container
|
||||
default: null
|
||||
required: false
|
||||
type: A hash/dictionary defining mount points
|
||||
version_added: "2.2"
|
||||
ip_address:
|
||||
description:
|
||||
- specifies the address the container will be assigned
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
onboot:
|
||||
description:
|
||||
- specifies whether a VM will be started during system bootup
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
required: false
|
||||
type: string
|
||||
cpuunits:
|
||||
description:
|
||||
- CPU weight for a VM
|
||||
default: 1000
|
||||
required: false
|
||||
type: integer
|
||||
nameserver:
|
||||
description:
|
||||
- sets DNS server IP address for a container
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
searchdomain:
|
||||
description:
|
||||
- sets DNS search domain for a container
|
||||
default: null
|
||||
required: false
|
||||
type: string
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
required: false
|
||||
type: integer
|
||||
force:
|
||||
description:
|
||||
- forcing operations
|
||||
- can be used only with states C(present), C(stopped), C(restarted)
|
||||
- with C(state=present) force option allow to overwrite existing container
|
||||
- with states C(stopped) , C(restarted) allow to force stop instance
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
|
||||
default: present
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
|
||||
author: "Sergei Antipov @UnderGreen"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create new container with minimal options
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container automatically selecting the next available vmid.
|
||||
- proxmox: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options with force(it will rewrite existing container)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
force: yes
|
||||
|
||||
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
|
||||
# Create new container with minimal options defining network interface with dhcp
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
|
||||
|
||||
# Create new container with minimal options defining network interface with static ip
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
|
||||
|
||||
# Create new container with minimal options defining a mount
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
password: 123456
|
||||
hostname: example.org
|
||||
ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
|
||||
mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
|
||||
|
||||
# Start container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: started
|
||||
|
||||
# Stop container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: stopped
|
||||
|
||||
# Stop container with force
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_passwordL 1q2w3e
|
||||
api_host: node1
|
||||
force: yes
|
||||
state: stopped
|
||||
|
||||
# Restart container(stopped or mounted container you can't restart)
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: stopped
|
||||
|
||||
# Remove container
|
||||
- proxmox:
|
||||
vmid: 100
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
VZ_TYPE=None
|
||||
|
||||
def get_nextvmid(proxmox):
|
||||
try:
|
||||
vmid = proxmox.cluster.nextid.get()
|
||||
return vmid
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s")
|
||||
|
||||
def get_vmid(proxmox, hostname):
|
||||
return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == hostname ]
|
||||
|
||||
def get_instance(proxmox, vmid):
|
||||
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
|
||||
|
||||
def content_check(proxmox, node, ostemplate, template_store):
|
||||
return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
|
||||
|
||||
def node_check(proxmox, node):
|
||||
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
|
||||
|
||||
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
|
||||
proxmox_node = proxmox.nodes(node)
|
||||
kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
|
||||
if VZ_TYPE =='lxc':
|
||||
kwargs['cpulimit']=cpus
|
||||
kwargs['rootfs']=disk
|
||||
if 'netif' in kwargs:
|
||||
kwargs.update(kwargs['netif'])
|
||||
del kwargs['netif']
|
||||
if 'mounts' in kwargs:
|
||||
kwargs.update(kwargs['mounts'])
|
||||
del kwargs['mounts']
|
||||
else:
|
||||
kwargs['cpus']=cpus
|
||||
kwargs['disk']=disk
|
||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||
|
||||
while timeout:
|
||||
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def start_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
|
||||
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def stop_instance(module, proxmox, vm, vmid, timeout, force):
|
||||
if force:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
|
||||
else:
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
vmid = dict(required=False),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
pool = dict(),
|
||||
password = dict(no_log=True),
|
||||
hostname = dict(),
|
||||
ostemplate = dict(),
|
||||
disk = dict(type='str', default='3'),
|
||||
cpus = dict(type='int', default=1),
|
||||
memory = dict(type='int', default=512),
|
||||
swap = dict(type='int', default=0),
|
||||
netif = dict(type='dict'),
|
||||
mounts = dict(type='dict'),
|
||||
ip_address = dict(),
|
||||
onboot = dict(type='bool', default='no'),
|
||||
storage = dict(default='local'),
|
||||
cpuunits = dict(type='int', default=1000),
|
||||
nameserver = dict(),
|
||||
searchdomain = dict(),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
vmid = module.params['vmid']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
disk = module.params['disk']
|
||||
cpus = module.params['cpus']
|
||||
memory = module.params['memory']
|
||||
swap = module.params['swap']
|
||||
storage = module.params['storage']
|
||||
hostname = module.params['hostname']
|
||||
if module.params['ostemplate'] is not None:
|
||||
template_store = module.params['ostemplate'].split(":")[0]
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
global VZ_TYPE
|
||||
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
# If vmid not set get the Next VM id from ProxmoxAPI
|
||||
# If hostname is set get the VM id from ProxmoxAPI
|
||||
if not vmid and state == 'present':
|
||||
vmid = get_nextvmid(proxmox)
|
||||
elif not vmid and hostname:
|
||||
vmid = get_vmid(proxmox, hostname)[0]
|
||||
elif not vmid:
|
||||
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
if get_instance(proxmox, vmid) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
|
||||
# If no vmid was passed, there cannot be another VM named 'hostname'
|
||||
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
|
||||
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
|
||||
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
|
||||
elif not node_check(proxmox, node):
|
||||
module.fail_json(msg="node '%s' not exists in cluster" % node)
|
||||
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
|
||||
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
|
||||
% (module.params['ostemplate'], node, template_store))
|
||||
|
||||
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
|
||||
pool = module.params['pool'],
|
||||
password = module.params['password'],
|
||||
hostname = module.params['hostname'],
|
||||
ostemplate = module.params['ostemplate'],
|
||||
netif = module.params['netif'],
|
||||
mounts = module.params['mounts'],
|
||||
ip_address = module.params['ip_address'],
|
||||
onboot = int(module.params['onboot']),
|
||||
cpuunits = module.params['cpuunits'],
|
||||
nameserver = module.params['nameserver'],
|
||||
searchdomain = module.params['searchdomain'],
|
||||
force = int(module.params['force']))
|
||||
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
|
||||
|
||||
elif state == 'started':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
|
||||
|
||||
if start_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'stopped':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
if module.params['force']:
|
||||
if umount_instance(module, proxmox, vm, vmid, timeout):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
else:
|
||||
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
|
||||
"You can use force option to umount it.") % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
|
||||
|
||||
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'restarted':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
|
||||
if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
|
||||
or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
|
||||
|
||||
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
|
||||
start_instance(module, proxmox, vm, vmid, timeout) ):
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
vm = get_instance(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
|
||||
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
|
||||
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
|
||||
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
|
||||
% proxmox_node.tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1058
lib/ansible/modules/cloud/misc/proxmox_kvm.py
Normal file
1058
lib/ansible/modules/cloud/misc/proxmox_kvm.py
Normal file
File diff suppressed because it is too large
Load Diff
261
lib/ansible/modules/cloud/misc/proxmox_template.py
Normal file
261
lib/ansible/modules/cloud/misc/proxmox_template.py
Normal file
@@ -0,0 +1,261 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_template
|
||||
short_description: management of OS templates in Proxmox VE cluster
|
||||
description:
|
||||
- allows you to upload/delete templates in Proxmox VE cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
api_host:
|
||||
description:
|
||||
- the host of the Proxmox VE cluster
|
||||
required: true
|
||||
api_user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
required: true
|
||||
api_password:
|
||||
description:
|
||||
- the password to authenticate with
|
||||
- you can use PROXMOX_PASSWORD environment variable
|
||||
default: null
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- enable / disable https certificate verification
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
node:
|
||||
description:
|
||||
- Proxmox VE node, when you will operate with template
|
||||
default: null
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- path to uploaded file
|
||||
- required only for C(state=present)
|
||||
default: null
|
||||
required: false
|
||||
aliases: ['path']
|
||||
template:
|
||||
description:
|
||||
- the template name
|
||||
- required only for states C(absent), C(info)
|
||||
default: null
|
||||
required: false
|
||||
content_type:
|
||||
description:
|
||||
- content type
|
||||
- required only for C(state=present)
|
||||
default: 'vztmpl'
|
||||
required: false
|
||||
choices: ['vztmpl', 'iso']
|
||||
storage:
|
||||
description:
|
||||
- target storage
|
||||
default: 'local'
|
||||
required: false
|
||||
type: string
|
||||
timeout:
|
||||
description:
|
||||
- timeout for operations
|
||||
default: 30
|
||||
required: false
|
||||
type: integer
|
||||
force:
|
||||
description:
|
||||
- can be used only with C(state=present), exists template will be overwritten
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the template
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
notes:
|
||||
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
|
||||
requirements: [ "proxmoxer", "requests" ]
|
||||
author: "Sergei Antipov @UnderGreen"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Upload new openvz template with minimal options
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
|
||||
# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_host: node1
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
|
||||
# Upload new openvz template with all options and force overwrite
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
storage: local
|
||||
content_type: vztmpl
|
||||
src: ~/ubuntu-14.04-x86_64.tar.gz
|
||||
force: yes
|
||||
|
||||
# Delete template with minimal options
|
||||
- proxmox_template:
|
||||
node: uk-mc02
|
||||
api_user: root@pam
|
||||
api_password: 1q2w3e
|
||||
api_host: node1
|
||||
template: ubuntu-14.04-x86_64.tar.gz
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from proxmoxer import ProxmoxAPI
|
||||
HAS_PROXMOXER = True
|
||||
except ImportError:
|
||||
HAS_PROXMOXER = False
|
||||
|
||||
def get_template(proxmox, node, storage, content_type, template):
|
||||
return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
|
||||
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
|
||||
|
||||
def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
|
||||
while timeout:
|
||||
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
|
||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
|
||||
% proxmox.node(node).tasks(taskid).log.get()[:1])
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
volid = '%s:%s/%s' % (storage, content_type, template)
|
||||
proxmox.nodes(node).storage(storage).content.delete(volid)
|
||||
while timeout:
|
||||
if not get_template(proxmox, node, storage, content_type, template):
|
||||
return True
|
||||
timeout = timeout - 1
|
||||
if timeout == 0:
|
||||
module.fail_json(msg='Reached timeout while waiting for deleting template.')
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
api_host = dict(required=True),
|
||||
api_user = dict(required=True),
|
||||
api_password = dict(no_log=True),
|
||||
validate_certs = dict(type='bool', default='no'),
|
||||
node = dict(),
|
||||
src = dict(),
|
||||
template = dict(),
|
||||
content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
|
||||
storage = dict(default='local'),
|
||||
timeout = dict(type='int', default=30),
|
||||
force = dict(type='bool', default='no'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg='proxmoxer required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
api_user = module.params['api_user']
|
||||
api_host = module.params['api_host']
|
||||
api_password = module.params['api_password']
|
||||
validate_certs = module.params['validate_certs']
|
||||
node = module.params['node']
|
||||
storage = module.params['storage']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
# If password not set get it from PROXMOX_PASSWORD env
|
||||
if not api_password:
|
||||
try:
|
||||
api_password = os.environ['PROXMOX_PASSWORD']
|
||||
except KeyError as e:
|
||||
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
|
||||
|
||||
try:
|
||||
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
|
||||
|
||||
if state == 'present':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
src = module.params['src']
|
||||
|
||||
from ansible import utils
|
||||
realpath = utils.path_dwim(None, src)
|
||||
template = os.path.basename(realpath)
|
||||
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
|
||||
elif not src:
|
||||
module.fail_json(msg='src param to uploading template file is mandatory')
|
||||
elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
|
||||
module.fail_json(msg='template file on path %s not exists' % realpath)
|
||||
|
||||
if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
elif state == 'absent':
|
||||
try:
|
||||
content_type = module.params['content_type']
|
||||
template = module.params['template']
|
||||
|
||||
if not template:
|
||||
module.fail_json(msg='template param is mandatory')
|
||||
elif not get_template(proxmox, node, storage, content_type, template):
|
||||
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
|
||||
|
||||
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
|
||||
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1534
lib/ansible/modules/cloud/misc/rhevm.py
Normal file
1534
lib/ansible/modules/cloud/misc/rhevm.py
Normal file
File diff suppressed because it is too large
Load Diff
538
lib/ansible/modules/cloud/misc/virt.py
Normal file
538
lib/ansible/modules/cloud/misc/virt.py
Normal file
@@ -0,0 +1,538 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Virt management features
|
||||
|
||||
Copyright 2007, 2012 Red Hat, Inc
|
||||
Michael DeHaan <michael.dehaan@gmail.com>
|
||||
Seth Vidal <skvidal@fedoraproject.org>
|
||||
|
||||
This software may be freely redistributed under the terms of the GNU
|
||||
general public license.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt
|
||||
short_description: Manages virtual machines supported by libvirt
|
||||
description:
|
||||
- Manages virtual machines supported by I(libvirt).
|
||||
version_added: "0.2"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- name of the guest VM being managed. Note that VM must be previously
|
||||
defined with xml.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Note that there may be some lag for state requests like C(shutdown)
|
||||
since these refer only to VM states. After starting a guest, it may not
|
||||
be immediately accessible.
|
||||
required: false
|
||||
choices: [ "running", "shutdown", "destroyed", "paused" ]
|
||||
default: "no"
|
||||
command:
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available. See examples
|
||||
required: false
|
||||
choices: ["create","status", "start", "stop", "pause", "unpause",
|
||||
"shutdown", "undefine", "destroy", "get_xml", "autostart",
|
||||
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
|
||||
uri:
|
||||
description:
|
||||
- libvirt connection uri
|
||||
required: false
|
||||
defaults: qemu:///system
|
||||
xml:
|
||||
description:
|
||||
- XML document used with the define command
|
||||
required: false
|
||||
default: null
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "libvirt-python"
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Michael DeHaan"
|
||||
- "Seth Vidal"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# a playbook task line:
|
||||
- virt:
|
||||
name: alpha
|
||||
state: running
|
||||
|
||||
# /usr/bin/ansible invocations
|
||||
ansible host -m virt -a "name=alpha command=status"
|
||||
ansible host -m virt -a "name=alpha command=get_xml"
|
||||
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
|
||||
|
||||
# a playbook example of defining and launching an LXC guest
|
||||
tasks:
|
||||
- name: define vm
|
||||
virt:
|
||||
name: foo
|
||||
command: define
|
||||
xml: '{{ lookup('template', 'container-template.xml.j2') }}'
|
||||
uri: 'lxc:///'
|
||||
- name: start vm
|
||||
virt:
|
||||
name: foo
|
||||
state: running
|
||||
uri: 'lxc:///'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# for list_vms command
|
||||
list_vms:
|
||||
description: The list of vms defined on the remote system
|
||||
type: dictionary
|
||||
returned: success
|
||||
sample: [
|
||||
"build.example.org",
|
||||
"dev.example.org"
|
||||
]
|
||||
# for status command
|
||||
status:
|
||||
description: The status of the VM, among running, crashed, paused and shutdown
|
||||
type: string
|
||||
sample: "success"
|
||||
returned: success
|
||||
'''
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE=2
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
ALL_COMMANDS = []
|
||||
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
|
||||
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
|
||||
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
|
||||
ALL_COMMANDS.extend(VM_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {
|
||||
0 : "running",
|
||||
1 : "running",
|
||||
2 : "running",
|
||||
3 : "paused",
|
||||
4 : "shutdown",
|
||||
5 : "shutdown",
|
||||
6 : "crashed"
|
||||
}
|
||||
|
||||
class VMNotFound(Exception):
|
||||
pass
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
cmd = "uname -r"
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
|
||||
if "xen" in stdout:
|
||||
conn = libvirt.open(None)
|
||||
elif "esx" in uri:
|
||||
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
|
||||
conn = libvirt.openAuth(uri, auth)
|
||||
else:
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_vm(self, vmid):
|
||||
"""
|
||||
Extra bonus feature: vmid = -1 returns a list of everything
|
||||
"""
|
||||
conn = self.conn
|
||||
|
||||
vms = []
|
||||
|
||||
# this block of code borrowed from virt-manager:
|
||||
# get working domain's name
|
||||
ids = conn.listDomainsID()
|
||||
for id in ids:
|
||||
vm = conn.lookupByID(id)
|
||||
vms.append(vm)
|
||||
# get defined domain
|
||||
names = conn.listDefinedDomains()
|
||||
for name in names:
|
||||
vm = conn.lookupByName(name)
|
||||
vms.append(vm)
|
||||
|
||||
if vmid == -1:
|
||||
return vms
|
||||
|
||||
for vm in vms:
|
||||
if vm.name() == vmid:
|
||||
return vm
|
||||
|
||||
raise VMNotFound("virtual machine %s not found" % vmid)
|
||||
|
||||
def shutdown(self, vmid):
|
||||
return self.find_vm(vmid).shutdown()
|
||||
|
||||
def pause(self, vmid):
|
||||
return self.suspend(self.conn,vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
return self.resume(self.conn,vmid)
|
||||
|
||||
def suspend(self, vmid):
|
||||
return self.find_vm(vmid).suspend()
|
||||
|
||||
def resume(self, vmid):
|
||||
return self.find_vm(vmid).resume()
|
||||
|
||||
def create(self, vmid):
|
||||
return self.find_vm(vmid).create()
|
||||
|
||||
def destroy(self, vmid):
|
||||
return self.find_vm(vmid).destroy()
|
||||
|
||||
def undefine(self, vmid):
|
||||
return self.find_vm(vmid).undefine()
|
||||
|
||||
def get_status2(self, vm):
|
||||
state = vm.info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state,"unknown")
|
||||
|
||||
def get_status(self, vmid):
|
||||
state = self.find_vm(vmid).info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state,"unknown")
|
||||
|
||||
def nodeinfo(self):
|
||||
return self.conn.getInfo()
|
||||
|
||||
def get_type(self):
|
||||
return self.conn.getType()
|
||||
|
||||
def get_xml(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.XMLDesc(0)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxVcpus()
|
||||
|
||||
def get_maxMemory(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxMemory()
|
||||
|
||||
def getFreeMemory(self):
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def get_autostart(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.autostart()
|
||||
|
||||
def set_autostart(self, vmid, val):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.setAutostart(val)
|
||||
|
||||
def define_from_xml(self, xml):
|
||||
return self.conn.defineXML(xml)
|
||||
|
||||
|
||||
class Virt(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
|
||||
def __get_conn(self):
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
return self.conn
|
||||
|
||||
def get_vm(self, vmid):
|
||||
self.__get_conn()
|
||||
return self.conn.find_vm(vmid)
|
||||
|
||||
def state(self):
|
||||
vms = self.list_vms()
|
||||
state = []
|
||||
for vm in vms:
|
||||
state_blurb = self.conn.get_status(vm)
|
||||
state.append("%s %s" % (vm,state_blurb))
|
||||
return state
|
||||
|
||||
def info(self):
|
||||
vms = self.list_vms()
|
||||
info = dict()
|
||||
for vm in vms:
|
||||
data = self.conn.find_vm(vm).info()
|
||||
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
|
||||
# xmlrpclib tries to convert to regular int's during serialization.
|
||||
# This throws exceptions, so convert them to strings here and
|
||||
# assume the other end of the xmlrpc connection can figure things
|
||||
# out or doesn't care.
|
||||
info[vm] = {
|
||||
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
|
||||
"maxMem" : str(data[1]),
|
||||
"memory" : str(data[2]),
|
||||
"nrVirtCpu" : data[3],
|
||||
"cpuTime" : str(data[4]),
|
||||
}
|
||||
info[vm]["autostart"] = self.conn.get_autostart(vm)
|
||||
|
||||
return info
|
||||
|
||||
def nodeinfo(self):
|
||||
self.__get_conn()
|
||||
info = dict()
|
||||
data = self.conn.nodeinfo()
|
||||
info = {
|
||||
"cpumodel" : str(data[0]),
|
||||
"phymemory" : str(data[1]),
|
||||
"cpus" : str(data[2]),
|
||||
"cpumhz" : str(data[3]),
|
||||
"numanodes" : str(data[4]),
|
||||
"sockets" : str(data[5]),
|
||||
"cpucores" : str(data[6]),
|
||||
"cputhreads" : str(data[7])
|
||||
}
|
||||
return info
|
||||
|
||||
def list_vms(self, state=None):
|
||||
self.conn = self.__get_conn()
|
||||
vms = self.conn.find_vm(-1)
|
||||
results = []
|
||||
for x in vms:
|
||||
try:
|
||||
if state:
|
||||
vmstate = self.conn.get_status2(x)
|
||||
if vmstate == state:
|
||||
results.append(x.name())
|
||||
else:
|
||||
results.append(x.name())
|
||||
except:
|
||||
pass
|
||||
return results
|
||||
|
||||
def virttype(self):
|
||||
return self.__get_conn().get_type()
|
||||
|
||||
def autostart(self, vmid):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.set_autostart(vmid, True)
|
||||
|
||||
def freemem(self):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def shutdown(self, vmid):
|
||||
""" Make the machine with the given vmid stop running. Whatever that takes. """
|
||||
self.__get_conn()
|
||||
self.conn.shutdown(vmid)
|
||||
return 0
|
||||
|
||||
|
||||
def pause(self, vmid):
|
||||
""" Pause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.suspend(vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
""" Unpause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.resume(vmid)
|
||||
|
||||
def create(self, vmid):
|
||||
""" Start the machine via the given vmid """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def start(self, vmid):
|
||||
""" Start the machine via the given id/name """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def destroy(self, vmid):
|
||||
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
|
||||
self.__get_conn()
|
||||
return self.conn.destroy(vmid)
|
||||
|
||||
def undefine(self, vmid):
|
||||
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.undefine(vmid)
|
||||
|
||||
def status(self, vmid):
|
||||
"""
|
||||
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.get_status(vmid)
|
||||
|
||||
def get_xml(self, vmid):
|
||||
"""
|
||||
Receive a Vm id as input
|
||||
Return an xml describing vm config returned by a libvirt call
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_xml(vmid)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
"""
|
||||
Gets the max number of VCPUs on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_maxVcpus(vmid)
|
||||
|
||||
def get_max_memory(self, vmid):
|
||||
"""
|
||||
Gets the max memory on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_MaxMemory(vmid)
|
||||
|
||||
def define(self, xml):
|
||||
"""
|
||||
Define a guest with the given xml
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.define_from_xml(xml)
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
guest = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
|
||||
v = Virt(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command=='list_vms':
|
||||
res = v.list_vms(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not guest:
|
||||
module.fail_json(msg = "state change requires a guest specified")
|
||||
|
||||
res['changed'] = False
|
||||
if state == 'running':
|
||||
if v.status(guest) is 'paused':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.unpause(guest)
|
||||
elif v.status(guest) is not 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(guest)
|
||||
elif state == 'shutdown':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.shutdown(guest)
|
||||
elif state == 'destroyed':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(guest)
|
||||
elif state == 'paused':
|
||||
if v.status(guest) is 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.pause(guest)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in VM_COMMANDS:
|
||||
if not guest:
|
||||
module.fail_json(msg = "%s requires 1 argument: guest" % command)
|
||||
if command == 'define':
|
||||
if not xml:
|
||||
module.fail_json(msg = "define requires xml argument")
|
||||
try:
|
||||
v.get_vm(guest)
|
||||
except VMNotFound:
|
||||
v.define(xml)
|
||||
res = {'changed': True, 'created': guest}
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(guest)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % basecmd)
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
name = dict(aliases=['guest']),
|
||||
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
|
||||
command = dict(choices=ALL_COMMANDS),
|
||||
uri = dict(default='qemu:///system'),
|
||||
xml = dict(),
|
||||
))
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(
|
||||
msg='The `libvirt` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
622
lib/ansible/modules/cloud/misc/virt_net.py
Normal file
622
lib/ansible/modules/cloud/misc/virt_net.py
Normal file
@@ -0,0 +1,622 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt_net
|
||||
author: "Maciej Delmanowski (@drybjed)"
|
||||
version_added: "2.0"
|
||||
short_description: Manage libvirt network configuration
|
||||
description:
|
||||
- Manage I(libvirt) networks.
|
||||
options:
|
||||
name:
|
||||
required: true
|
||||
aliases: ['network']
|
||||
description:
|
||||
- name of the network being managed. Note that network must be previously
|
||||
defined with xml.
|
||||
state:
|
||||
required: false
|
||||
choices: [ "active", "inactive", "present", "absent" ]
|
||||
description:
|
||||
- specify which state you want a network to be in.
|
||||
If 'active', network will be started.
|
||||
If 'present', ensure that network is present but do not change its
|
||||
state; if it's missing, you need to specify xml argument.
|
||||
If 'inactive', network will be stopped.
|
||||
If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
|
||||
command:
|
||||
required: false
|
||||
choices: [ "define", "create", "start", "stop", "destroy",
|
||||
"undefine", "get_xml", "list_nets", "facts",
|
||||
"info", "status", "modify"]
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available.
|
||||
See examples.
|
||||
Modify was added in version 2.1
|
||||
autostart:
|
||||
required: false
|
||||
choices: ["yes", "no"]
|
||||
description:
|
||||
- Specify if a given storage pool should be started automatically on system boot.
|
||||
uri:
|
||||
required: false
|
||||
default: "qemu:///system"
|
||||
description:
|
||||
- libvirt connection uri.
|
||||
xml:
|
||||
required: false
|
||||
description:
|
||||
- XML document used with the define command.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "python-libvirt"
|
||||
- "python-lxml"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Define a new network
|
||||
- virt_net:
|
||||
command: define
|
||||
name: br_nat
|
||||
xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
|
||||
|
||||
# Start a network
|
||||
- virt_net:
|
||||
command: create
|
||||
name: br_nat
|
||||
|
||||
# List available networks
|
||||
- virt_net:
|
||||
command: list_nets
|
||||
|
||||
# Get XML data of a specified network
|
||||
- virt_net:
|
||||
command: get_xml
|
||||
name: br_nat
|
||||
|
||||
# Stop a network
|
||||
- virt_net:
|
||||
command: destroy
|
||||
name: br_nat
|
||||
|
||||
# Undefine a network
|
||||
- virt_net:
|
||||
command: undefine
|
||||
name: br_nat
|
||||
|
||||
# Gather facts about networks
|
||||
# Facts will be available as 'ansible_libvirt_networks'
|
||||
- virt_net:
|
||||
command: facts
|
||||
|
||||
# Gather information about network managed by 'libvirt' remotely using uri
|
||||
- virt_net:
|
||||
command: info
|
||||
uri: '{{ item }}'
|
||||
with_items: '{{ libvirt_uris }}'
|
||||
register: networks
|
||||
|
||||
# Ensure that a network is active (needs to be defined and built first)
|
||||
- virt_net:
|
||||
state: active
|
||||
name: br_nat
|
||||
|
||||
# Ensure that a network is inactive
|
||||
- virt_net:
|
||||
state: inactive
|
||||
name: br_nat
|
||||
|
||||
# Ensure that a given network will be started at boot
|
||||
- virt_net:
|
||||
autostart: yes
|
||||
name: br_nat
|
||||
|
||||
# Disable autostart for a given network
|
||||
- virt_net:
|
||||
autostart: no
|
||||
name: br_nat
|
||||
'''
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE=2
|
||||
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
try:
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
HAS_XML = False
|
||||
else:
|
||||
HAS_XML = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
ALL_COMMANDS = []
|
||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
|
||||
'undefine', 'destroy', 'get_xml', 'define',
|
||||
'modify' ]
|
||||
HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
|
||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
ENTRY_STATE_ACTIVE_MAP = {
|
||||
0 : "inactive",
|
||||
1 : "active"
|
||||
}
|
||||
|
||||
ENTRY_STATE_AUTOSTART_MAP = {
|
||||
0 : "no",
|
||||
1 : "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_PERSISTENT_MAP = {
|
||||
0 : "no",
|
||||
1 : "yes"
|
||||
}
|
||||
|
||||
class EntryNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_entry(self, entryid):
|
||||
# entryid = -1 returns a list of everything
|
||||
|
||||
results = []
|
||||
|
||||
# Get active entries
|
||||
for name in self.conn.listNetworks():
|
||||
entry = self.conn.networkLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
# Get inactive entries
|
||||
for name in self.conn.listDefinedNetworks():
|
||||
entry = self.conn.networkLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
if entryid == -1:
|
||||
return results
|
||||
|
||||
for entry in results:
|
||||
if entry.name() == entryid:
|
||||
return entry
|
||||
|
||||
raise EntryNotFound("network %s not found" % entryid)
|
||||
|
||||
def create(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).create()
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def modify(self, entryid, xml):
|
||||
network = self.find_entry(entryid)
|
||||
# identify what type of entry is given in the xml
|
||||
new_data = etree.fromstring(xml)
|
||||
old_data = etree.fromstring(network.XMLDesc(0))
|
||||
if new_data.tag == 'host':
|
||||
mac_addr = new_data.get('mac')
|
||||
hosts = old_data.xpath('/network/ip/dhcp/host')
|
||||
# find the one mac we're looking for
|
||||
host = None
|
||||
for h in hosts:
|
||||
if h.get('mac') == mac_addr:
|
||||
host = h
|
||||
break
|
||||
if host is None:
|
||||
# add the host
|
||||
if not self.module.check_mode:
|
||||
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
|
||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||
else:
|
||||
# pretend there was a change
|
||||
res = 0
|
||||
if res == 0:
|
||||
return True
|
||||
else:
|
||||
# change the host
|
||||
if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
|
||||
return False
|
||||
else:
|
||||
if not self.module.check_mode:
|
||||
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
|
||||
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
|
||||
else:
|
||||
# pretend there was a change
|
||||
res = 0
|
||||
if res == 0:
|
||||
return True
|
||||
# command, section, parentIndex, xml, flags=0
|
||||
self.module.fail_json(msg='updating this is not supported yet '+unicode(xml))
|
||||
|
||||
def destroy(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).destroy()
|
||||
else:
|
||||
if self.find_entry(entryid).isActive():
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def undefine(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).undefine()
|
||||
else:
|
||||
if not self.find_entry(entryid):
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_status2(self, entry):
|
||||
state = entry.isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
|
||||
def get_status(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
except:
|
||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
|
||||
|
||||
def get_uuid(self, entryid):
|
||||
return self.find_entry(entryid).UUIDString()
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.find_entry(entryid).XMLDesc(0)
|
||||
|
||||
def get_forward(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/forward')[0].get('mode')
|
||||
except:
|
||||
raise ValueError('Forward mode not specified')
|
||||
return result
|
||||
|
||||
def get_domain(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/domain')[0].get('name')
|
||||
except:
|
||||
raise ValueError('Domain not specified')
|
||||
return result
|
||||
|
||||
def get_macaddress(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/network/mac')[0].get('address')
|
||||
except:
|
||||
raise ValueError('MAC address not specified')
|
||||
return result
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
state = self.find_entry(entryid).autostart()
|
||||
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
|
||||
|
||||
def get_autostart2(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).autostart()
|
||||
else:
|
||||
try:
|
||||
return self.find_entry(entryid).autostart()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def set_autostart(self, entryid, val):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).setAutostart(val)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).autostart()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if bool(state) != val:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_bridge(self, entryid):
|
||||
return self.find_entry(entryid).bridgeName()
|
||||
|
||||
def get_persistent(self, entryid):
|
||||
state = self.find_entry(entryid).isPersistent()
|
||||
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
|
||||
|
||||
def define_from_xml(self, entryid, xml):
|
||||
if not self.module.check_mode:
|
||||
return self.conn.networkDefineXML(xml)
|
||||
else:
|
||||
try:
|
||||
self.find_entry(entryid)
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
|
||||
class VirtNetwork(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
|
||||
def get_net(self, entryid):
|
||||
return self.conn.find_entry(entryid)
|
||||
|
||||
def list_nets(self, state=None):
|
||||
results = []
|
||||
for entry in self.conn.find_entry(-1):
|
||||
if state:
|
||||
if state == self.conn.get_status2(entry):
|
||||
results.append(entry.name())
|
||||
else:
|
||||
results.append(entry.name())
|
||||
return results
|
||||
|
||||
def state(self):
|
||||
results = []
|
||||
for entry in self.list_nets():
|
||||
state_blurb = self.conn.get_status(entry)
|
||||
results.append("%s %s" % (entry,state_blurb))
|
||||
return results
|
||||
|
||||
def autostart(self, entryid):
|
||||
return self.conn.set_autostart(entryid, True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
return self.conn.get_autostart2(entryid)
|
||||
|
||||
def set_autostart(self, entryid, state):
|
||||
return self.conn.set_autostart(entryid, state)
|
||||
|
||||
def create(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def modify(self, entryid, xml):
|
||||
return self.conn.modify(entryid, xml)
|
||||
|
||||
def start(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def stop(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def destroy(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def undefine(self, entryid):
|
||||
return self.conn.undefine(entryid)
|
||||
|
||||
def status(self, entryid):
|
||||
return self.conn.get_status(entryid)
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.conn.get_xml(entryid)
|
||||
|
||||
def define(self, entryid, xml):
|
||||
return self.conn.define_from_xml(entryid, xml)
|
||||
|
||||
def info(self):
|
||||
return self.facts(facts_mode='info')
|
||||
|
||||
def facts(self, facts_mode='facts'):
|
||||
results = dict()
|
||||
for entry in self.list_nets():
|
||||
results[entry] = dict()
|
||||
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
||||
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
results[entry]["bridge"] = self.conn.get_bridge(entry)
|
||||
results[entry]["uuid"] = self.conn.get_uuid(entry)
|
||||
|
||||
try:
|
||||
results[entry]["forward_mode"] = self.conn.get_forward(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["domain"] = self.conn.get_domain(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["macaddress"] = self.conn.get_macaddress(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
facts = dict()
|
||||
if facts_mode == 'facts':
|
||||
facts["ansible_facts"] = dict()
|
||||
facts["ansible_facts"]["ansible_libvirt_networks"] = results
|
||||
elif facts_mode == 'info':
|
||||
facts['networks'] = results
|
||||
return facts
|
||||
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
name = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
autostart = module.params.get('autostart', None)
|
||||
|
||||
v = VirtNetwork(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command == 'list_nets':
|
||||
res = v.list_nets(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not name:
|
||||
module.fail_json(msg = "state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if state in [ 'active' ]:
|
||||
if v.status(name) is not 'active':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(name)
|
||||
elif state in [ 'present' ]:
|
||||
try:
|
||||
v.get_net(name)
|
||||
except EntryNotFound:
|
||||
if not xml:
|
||||
module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
elif state in [ 'inactive' ]:
|
||||
entries = v.list_nets()
|
||||
if name in entries:
|
||||
if v.status(name) is not 'inactive':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(name)
|
||||
elif state in [ 'undefined', 'absent' ]:
|
||||
entries = v.list_nets()
|
||||
if name in entries:
|
||||
if v.status(name) is not 'inactive':
|
||||
v.destroy(name)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in ENTRY_COMMANDS:
|
||||
if not name:
|
||||
module.fail_json(msg = "%s requires 1 argument: name" % command)
|
||||
if command in ('define', 'modify'):
|
||||
if not xml:
|
||||
module.fail_json(msg = command+" requires xml argument")
|
||||
try:
|
||||
v.get_net(name)
|
||||
except EntryNotFound:
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
else:
|
||||
if command == 'modify':
|
||||
mod = v.modify(name, xml)
|
||||
res = {'changed': mod, 'modified': name}
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(name)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % command)
|
||||
|
||||
if autostart is not None:
|
||||
if not name:
|
||||
module.fail_json(msg = "state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if autostart:
|
||||
if not v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, True)
|
||||
else:
|
||||
if v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, False)
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule (
|
||||
argument_spec = dict(
|
||||
name = dict(aliases=['network']),
|
||||
state = dict(choices=['active', 'inactive', 'present', 'absent']),
|
||||
command = dict(choices=ALL_COMMANDS),
|
||||
uri = dict(default='qemu:///system'),
|
||||
xml = dict(),
|
||||
autostart = dict(type='bool')
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(
|
||||
msg='The `libvirt` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
if not HAS_XML:
|
||||
module.fail_json(
|
||||
msg='The `lxml` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
721
lib/ansible/modules/cloud/misc/virt_pool.py
Normal file
721
lib/ansible/modules/cloud/misc/virt_pool.py
Normal file
@@ -0,0 +1,721 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt_pool
|
||||
author: "Maciej Delmanowski (@drybjed)"
|
||||
version_added: "2.0"
|
||||
short_description: Manage libvirt storage pools
|
||||
description:
|
||||
- Manage I(libvirt) storage pools.
|
||||
options:
|
||||
name:
|
||||
required: false
|
||||
aliases: [ "pool" ]
|
||||
description:
|
||||
- name of the storage pool being managed. Note that pool must be previously
|
||||
defined with xml.
|
||||
state:
|
||||
required: false
|
||||
choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
|
||||
description:
|
||||
- specify which state you want a storage pool to be in.
|
||||
If 'active', pool will be started.
|
||||
If 'present', ensure that pool is present but do not change its
|
||||
state; if it's missing, you need to specify xml argument.
|
||||
If 'inactive', pool will be stopped.
|
||||
If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
|
||||
If 'deleted', pool contents will be deleted and then pool undefined.
|
||||
command:
|
||||
required: false
|
||||
choices: [ "define", "build", "create", "start", "stop", "destroy",
|
||||
"delete", "undefine", "get_xml", "list_pools", "facts",
|
||||
"info", "status" ]
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available.
|
||||
See examples.
|
||||
autostart:
|
||||
required: false
|
||||
choices: ["yes", "no"]
|
||||
description:
|
||||
- Specify if a given storage pool should be started automatically on system boot.
|
||||
uri:
|
||||
required: false
|
||||
default: "qemu:///system"
|
||||
description:
|
||||
- I(libvirt) connection uri.
|
||||
xml:
|
||||
required: false
|
||||
description:
|
||||
- XML document used with the define command.
|
||||
mode:
|
||||
required: false
|
||||
choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
|
||||
description:
|
||||
- Pass additional parameters to 'build' or 'delete' commands.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "python-libvirt"
|
||||
- "python-lxml"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Define a new storage pool
|
||||
- virt_pool:
|
||||
command: define
|
||||
name: vms
|
||||
xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
|
||||
|
||||
# Build a storage pool if it does not exist
|
||||
- virt_pool:
|
||||
command: build
|
||||
name: vms
|
||||
|
||||
# Start a storage pool
|
||||
- virt_pool:
|
||||
command: create
|
||||
name: vms
|
||||
|
||||
# List available pools
|
||||
- virt_pool:
|
||||
command: list_pools
|
||||
|
||||
# Get XML data of a specified pool
|
||||
- virt_pool:
|
||||
command: get_xml
|
||||
name: vms
|
||||
|
||||
# Stop a storage pool
|
||||
- virt_pool:
|
||||
command: destroy
|
||||
name: vms
|
||||
|
||||
# Delete a storage pool (destroys contents)
|
||||
- virt_pool:
|
||||
command: delete
|
||||
name: vms
|
||||
|
||||
# Undefine a storage pool
|
||||
- virt_pool:
|
||||
command: undefine
|
||||
name: vms
|
||||
|
||||
# Gather facts about storage pools
|
||||
# Facts will be available as 'ansible_libvirt_pools'
|
||||
- virt_pool:
|
||||
command: facts
|
||||
|
||||
# Gather information about pools managed by 'libvirt' remotely using uri
|
||||
- virt_pool:
|
||||
command: info
|
||||
uri: '{{ item }}'
|
||||
with_items: '{{ libvirt_uris }}'
|
||||
register: storage_pools
|
||||
|
||||
# Ensure that a pool is active (needs to be defined and built first)
|
||||
- virt_pool:
|
||||
state: active
|
||||
name: vms
|
||||
|
||||
# Ensure that a pool is inactive
|
||||
- virt_pool:
|
||||
state: inactive
|
||||
name: vms
|
||||
|
||||
# Ensure that a given pool will be started at boot
|
||||
- virt_pool:
|
||||
autostart: yes
|
||||
name: vms
|
||||
|
||||
# Disable autostart for a given pool
|
||||
- virt_pool:
|
||||
autostart: no
|
||||
name: vms
|
||||
'''
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE=2
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
HAS_VIRT = False
|
||||
else:
|
||||
HAS_VIRT = True
|
||||
|
||||
try:
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
HAS_XML = False
|
||||
else:
|
||||
HAS_XML = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
ALL_COMMANDS = []
|
||||
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
|
||||
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
|
||||
HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
|
||||
ALL_COMMANDS.extend(ENTRY_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
ENTRY_STATE_ACTIVE_MAP = {
|
||||
0 : "inactive",
|
||||
1 : "active"
|
||||
}
|
||||
|
||||
ENTRY_STATE_AUTOSTART_MAP = {
|
||||
0 : "no",
|
||||
1 : "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_PERSISTENT_MAP = {
|
||||
0 : "no",
|
||||
1 : "yes"
|
||||
}
|
||||
|
||||
ENTRY_STATE_INFO_MAP = {
|
||||
0 : "inactive",
|
||||
1 : "building",
|
||||
2 : "running",
|
||||
3 : "degraded",
|
||||
4 : "inaccessible"
|
||||
}
|
||||
|
||||
ENTRY_BUILD_FLAGS_MAP = {
|
||||
"new" : 0,
|
||||
"repair" : 1,
|
||||
"resize" : 2,
|
||||
"no_overwrite" : 4,
|
||||
"overwrite" : 8
|
||||
}
|
||||
|
||||
ENTRY_DELETE_FLAGS_MAP = {
|
||||
"normal" : 0,
|
||||
"zeroed" : 1
|
||||
}
|
||||
|
||||
ALL_MODES = []
|
||||
ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
|
||||
ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
|
||||
|
||||
|
||||
class EntryNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_entry(self, entryid):
|
||||
# entryid = -1 returns a list of everything
|
||||
|
||||
results = []
|
||||
|
||||
# Get active entries
|
||||
for name in self.conn.listStoragePools():
|
||||
entry = self.conn.storagePoolLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
# Get inactive entries
|
||||
for name in self.conn.listDefinedStoragePools():
|
||||
entry = self.conn.storagePoolLookupByName(name)
|
||||
results.append(entry)
|
||||
|
||||
if entryid == -1:
|
||||
return results
|
||||
|
||||
for entry in results:
|
||||
if entry.name() == entryid:
|
||||
return entry
|
||||
|
||||
raise EntryNotFound("storage pool %s not found" % entryid)
|
||||
|
||||
def create(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).create()
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def destroy(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).destroy()
|
||||
else:
|
||||
if self.find_entry(entryid).isActive():
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def undefine(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).undefine()
|
||||
else:
|
||||
if not self.find_entry(entryid):
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_status2(self, entry):
|
||||
state = entry.isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
|
||||
def get_status(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).isActive()
|
||||
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
|
||||
except:
|
||||
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
|
||||
|
||||
def get_uuid(self, entryid):
|
||||
return self.find_entry(entryid).UUIDString()
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.find_entry(entryid).XMLDesc(0)
|
||||
|
||||
def get_info(self, entryid):
|
||||
return self.find_entry(entryid).info()
|
||||
|
||||
def get_volume_count(self, entryid):
|
||||
return self.find_entry(entryid).numOfVolumes()
|
||||
|
||||
def get_volume_names(self, entryid):
|
||||
return self.find_entry(entryid).listVolumes()
|
||||
|
||||
def get_devices(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
if xml.xpath('/pool/source/device'):
|
||||
result = []
|
||||
for device in xml.xpath('/pool/source/device'):
|
||||
result.append(device.get('path'))
|
||||
try:
|
||||
return result
|
||||
except:
|
||||
raise ValueError('No devices specified')
|
||||
|
||||
def get_format(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/format')[0].get('type')
|
||||
except:
|
||||
raise ValueError('Format not specified')
|
||||
return result
|
||||
|
||||
def get_host(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/host')[0].get('name')
|
||||
except:
|
||||
raise ValueError('Host not specified')
|
||||
return result
|
||||
|
||||
def get_source_path(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
try:
|
||||
result = xml.xpath('/pool/source/dir')[0].get('path')
|
||||
except:
|
||||
raise ValueError('Source path not specified')
|
||||
return result
|
||||
|
||||
def get_path(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
return xml.xpath('/pool/target/path')[0].text
|
||||
|
||||
def get_type(self, entryid):
|
||||
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
|
||||
return xml.get('type')
|
||||
|
||||
def build(self, entryid, flags):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).build(flags)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid)
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if not state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def delete(self, entryid, flags):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).delete(flags)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid)
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if state:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
state = self.find_entry(entryid).autostart()
|
||||
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
|
||||
|
||||
def get_autostart2(self, entryid):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).autostart()
|
||||
else:
|
||||
try:
|
||||
return self.find_entry(entryid).autostart()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def set_autostart(self, entryid, val):
|
||||
if not self.module.check_mode:
|
||||
return self.find_entry(entryid).setAutostart(val)
|
||||
else:
|
||||
try:
|
||||
state = self.find_entry(entryid).autostart()
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
if bool(state) != val:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
def refresh(self, entryid):
|
||||
return self.find_entry(entryid).refresh()
|
||||
|
||||
def get_persistent(self, entryid):
|
||||
state = self.find_entry(entryid).isPersistent()
|
||||
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
|
||||
|
||||
def define_from_xml(self, entryid, xml):
|
||||
if not self.module.check_mode:
|
||||
return self.conn.storagePoolDefineXML(xml)
|
||||
else:
|
||||
try:
|
||||
self.find_entry(entryid)
|
||||
except:
|
||||
return self.module.exit_json(changed=True)
|
||||
|
||||
|
||||
class VirtStoragePool(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
|
||||
def get_pool(self, entryid):
|
||||
return self.conn.find_entry(entryid)
|
||||
|
||||
def list_pools(self, state=None):
|
||||
results = []
|
||||
for entry in self.conn.find_entry(-1):
|
||||
if state:
|
||||
if state == self.conn.get_status2(entry):
|
||||
results.append(entry.name())
|
||||
else:
|
||||
results.append(entry.name())
|
||||
return results
|
||||
|
||||
def state(self):
|
||||
results = []
|
||||
for entry in self.list_pools():
|
||||
state_blurb = self.conn.get_status(entry)
|
||||
results.append("%s %s" % (entry,state_blurb))
|
||||
return results
|
||||
|
||||
def autostart(self, entryid):
|
||||
return self.conn.set_autostart(entryid, True)
|
||||
|
||||
def get_autostart(self, entryid):
|
||||
return self.conn.get_autostart2(entryid)
|
||||
|
||||
def set_autostart(self, entryid, state):
|
||||
return self.conn.set_autostart(entryid, state)
|
||||
|
||||
def create(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def start(self, entryid):
|
||||
return self.conn.create(entryid)
|
||||
|
||||
def stop(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def destroy(self, entryid):
|
||||
return self.conn.destroy(entryid)
|
||||
|
||||
def undefine(self, entryid):
|
||||
return self.conn.undefine(entryid)
|
||||
|
||||
def status(self, entryid):
|
||||
return self.conn.get_status(entryid)
|
||||
|
||||
def get_xml(self, entryid):
|
||||
return self.conn.get_xml(entryid)
|
||||
|
||||
def define(self, entryid, xml):
|
||||
return self.conn.define_from_xml(entryid, xml)
|
||||
|
||||
def build(self, entryid, flags):
|
||||
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
|
||||
|
||||
def delete(self, entryid, flags):
|
||||
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
|
||||
|
||||
def refresh(self, entryid):
|
||||
return self.conn.refresh(entryid)
|
||||
|
||||
def info(self):
|
||||
return self.facts(facts_mode='info')
|
||||
|
||||
def facts(self, facts_mode='facts'):
|
||||
results = dict()
|
||||
for entry in self.list_pools():
|
||||
results[entry] = dict()
|
||||
if self.conn.find_entry(entry):
|
||||
data = self.conn.get_info(entry)
|
||||
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
|
||||
# xmlrpclib tries to convert to regular int's during serialization.
|
||||
# This throws exceptions, so convert them to strings here and
|
||||
# assume the other end of the xmlrpc connection can figure things
|
||||
# out or doesn't care.
|
||||
results[entry] = {
|
||||
"status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
|
||||
"size_total" : str(data[1]),
|
||||
"size_used" : str(data[2]),
|
||||
"size_available" : str(data[3]),
|
||||
}
|
||||
results[entry]["autostart"] = self.conn.get_autostart(entry)
|
||||
results[entry]["persistent"] = self.conn.get_persistent(entry)
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
results[entry]["path"] = self.conn.get_path(entry)
|
||||
results[entry]["type"] = self.conn.get_type(entry)
|
||||
results[entry]["uuid"] = self.conn.get_uuid(entry)
|
||||
if self.conn.find_entry(entry).isActive():
|
||||
results[entry]["volume_count"] = self.conn.get_volume_count(entry)
|
||||
results[entry]["volumes"] = list()
|
||||
for volume in self.conn.get_volume_names(entry):
|
||||
results[entry]["volumes"].append(volume)
|
||||
else:
|
||||
results[entry]["volume_count"] = -1
|
||||
|
||||
try:
|
||||
results[entry]["host"] = self.conn.get_host(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["source_path"] = self.conn.get_source_path(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
results[entry]["format"] = self.conn.get_format(entry)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
devices = self.conn.get_devices(entry)
|
||||
results[entry]["devices"] = devices
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
else:
|
||||
results[entry]["state"] = self.conn.get_status(entry)
|
||||
|
||||
facts = dict()
|
||||
if facts_mode == 'facts':
|
||||
facts["ansible_facts"] = dict()
|
||||
facts["ansible_facts"]["ansible_libvirt_pools"] = results
|
||||
elif facts_mode == 'info':
|
||||
facts['pools'] = results
|
||||
return facts
|
||||
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
name = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
autostart = module.params.get('autostart', None)
|
||||
mode = module.params.get('mode', None)
|
||||
|
||||
v = VirtStoragePool(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command == 'list_pools':
|
||||
res = v.list_pools(state=state)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not name:
|
||||
module.fail_json(msg = "state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if state in [ 'active' ]:
|
||||
if v.status(name) is not 'active':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(name)
|
||||
elif state in [ 'present' ]:
|
||||
try:
|
||||
v.get_pool(name)
|
||||
except EntryNotFound:
|
||||
if not xml:
|
||||
module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
elif state in [ 'inactive' ]:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) is not 'inactive':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(name)
|
||||
elif state in [ 'undefined', 'absent' ]:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) is not 'inactive':
|
||||
v.destroy(name)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
elif state in [ 'deleted' ]:
|
||||
entries = v.list_pools()
|
||||
if name in entries:
|
||||
if v.status(name) is not 'inactive':
|
||||
v.destroy(name)
|
||||
v.delete(name, mode)
|
||||
res['changed'] = True
|
||||
res['msg'] = v.undefine(name)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in ENTRY_COMMANDS:
|
||||
if not name:
|
||||
module.fail_json(msg = "%s requires 1 argument: name" % command)
|
||||
if command == 'define':
|
||||
if not xml:
|
||||
module.fail_json(msg = "define requires xml argument")
|
||||
try:
|
||||
v.get_pool(name)
|
||||
except EntryNotFound:
|
||||
v.define(name, xml)
|
||||
res = {'changed': True, 'created': name}
|
||||
return VIRT_SUCCESS, res
|
||||
elif command == 'build':
|
||||
res = v.build(name, mode)
|
||||
if not isinstance(res, dict):
|
||||
res = { 'changed': True, command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
elif command == 'delete':
|
||||
res = v.delete(name, mode)
|
||||
if not isinstance(res, dict):
|
||||
res = { 'changed': True, command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(name)
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if not isinstance(res, dict):
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % command)
|
||||
|
||||
if autostart is not None:
|
||||
if not name:
|
||||
module.fail_json(msg = "state change requires a specified name")
|
||||
|
||||
res['changed'] = False
|
||||
if autostart:
|
||||
if not v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, True)
|
||||
else:
|
||||
if v.get_autostart(name):
|
||||
res['changed'] = True
|
||||
res['msg'] = v.set_autostart(name, False)
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule (
|
||||
argument_spec = dict(
|
||||
name = dict(aliases=['pool']),
|
||||
state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
|
||||
command = dict(choices=ALL_COMMANDS),
|
||||
uri = dict(default='qemu:///system'),
|
||||
xml = dict(),
|
||||
autostart = dict(type='bool'),
|
||||
mode = dict(choices=ALL_MODES),
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
||||
if not HAS_VIRT:
|
||||
module.fail_json(
|
||||
msg='The `libvirt` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
if not HAS_XML:
|
||||
module.fail_json(
|
||||
msg='The `lxml` module is not importable. Check the requirements.'
|
||||
)
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user