mirror of
https://opendev.org/openstack/ansible-collections-openstack.git
synced 2026-03-28 22:43:03 +00:00
Compare commits
1 Commits
master
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20a27c461d |
209
.zuul.yaml
209
.zuul.yaml
@@ -47,8 +47,6 @@
|
|||||||
devstack_services:
|
devstack_services:
|
||||||
designate: true
|
designate: true
|
||||||
neutron-dns: true
|
neutron-dns: true
|
||||||
neutron-trunk: true
|
|
||||||
neutron-segments: true
|
|
||||||
zuul_copy_output:
|
zuul_copy_output:
|
||||||
'{{ devstack_log_dir }}/test_output.log': 'logs'
|
'{{ devstack_log_dir }}/test_output.log': 'logs'
|
||||||
extensions_to_txt:
|
extensions_to_txt:
|
||||||
@@ -96,39 +94,6 @@
|
|||||||
c-bak: false
|
c-bak: false
|
||||||
tox_extra_args: -vv --skip-missing-interpreters=false -- coe_cluster coe_cluster_template
|
tox_extra_args: -vv --skip-missing-interpreters=false -- coe_cluster coe_cluster_template
|
||||||
|
|
||||||
- job:
|
|
||||||
name: ansible-collections-openstack-functional-devstack-manila-base
|
|
||||||
parent: ansible-collections-openstack-functional-devstack-base
|
|
||||||
# Do not restrict branches in base jobs because else Zuul would not find a matching
|
|
||||||
# parent job variant during job freeze when child jobs are on other branches.
|
|
||||||
description: |
|
|
||||||
Run openstack collections functional tests against a devstack with Manila plugin enabled
|
|
||||||
# Do not set job.override-checkout or job.required-projects.override-checkout in base job because
|
|
||||||
# else Zuul will use this branch when matching variants for parent jobs during job freeze
|
|
||||||
required-projects:
|
|
||||||
- openstack/manila
|
|
||||||
- openstack/python-manilaclient
|
|
||||||
files:
|
|
||||||
- ^ci/roles/share_type/.*$
|
|
||||||
- ^plugins/modules/share_type.py
|
|
||||||
- ^plugins/modules/share_type_info.py
|
|
||||||
timeout: 10800
|
|
||||||
vars:
|
|
||||||
devstack_localrc:
|
|
||||||
MANILA_ENABLED_BACKENDS: generic
|
|
||||||
MANILA_OPTGROUP_generic_driver_handles_share_servers: true
|
|
||||||
MANILA_OPTGROUP_generic_connect_share_server_to_tenant_network: true
|
|
||||||
MANILA_USE_SERVICE_INSTANCE_PASSWORD: true
|
|
||||||
devstack_plugins:
|
|
||||||
manila: https://opendev.org/openstack/manila
|
|
||||||
devstack_services:
|
|
||||||
manila: true
|
|
||||||
m-api: true
|
|
||||||
m-sch: true
|
|
||||||
m-shr: true
|
|
||||||
m-dat: true
|
|
||||||
tox_extra_args: -vv --skip-missing-interpreters=false -- share_type share_type_info
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: ansible-collections-openstack-functional-devstack-magnum
|
name: ansible-collections-openstack-functional-devstack-magnum
|
||||||
parent: ansible-collections-openstack-functional-devstack-magnum-base
|
parent: ansible-collections-openstack-functional-devstack-magnum-base
|
||||||
@@ -138,15 +103,6 @@
|
|||||||
with Magnum plugin enabled, using master of openstacksdk and latest
|
with Magnum plugin enabled, using master of openstacksdk and latest
|
||||||
ansible release. Run it only on coe_cluster{,_template} changes.
|
ansible release. Run it only on coe_cluster{,_template} changes.
|
||||||
|
|
||||||
- job:
|
|
||||||
name: ansible-collections-openstack-functional-devstack-manila
|
|
||||||
parent: ansible-collections-openstack-functional-devstack-manila-base
|
|
||||||
branches: master
|
|
||||||
description: |
|
|
||||||
Run openstack collections functional tests against a master devstack
|
|
||||||
with Manila plugin enabled, using master of openstacksdk and latest
|
|
||||||
ansible release. Run it only on share_type{,_info} changes.
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: ansible-collections-openstack-functional-devstack-octavia-base
|
name: ansible-collections-openstack-functional-devstack-octavia-base
|
||||||
parent: ansible-collections-openstack-functional-devstack-base
|
parent: ansible-collections-openstack-functional-devstack-base
|
||||||
@@ -206,18 +162,45 @@
|
|||||||
tox_constraints_file: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tests/constraints-openstacksdk-1.x.x.txt'
|
tox_constraints_file: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tests/constraints-openstacksdk-1.x.x.txt'
|
||||||
tox_install_siblings: false
|
tox_install_siblings: false
|
||||||
|
|
||||||
|
# Job with Ansible 2.9 for checking backward compatibility
|
||||||
- job:
|
- job:
|
||||||
name: ansible-collections-openstack-functional-devstack-ansible-2.18
|
name: ansible-collections-openstack-functional-devstack-ansible-2.9
|
||||||
parent: ansible-collections-openstack-functional-devstack-base
|
parent: ansible-collections-openstack-functional-devstack-base
|
||||||
branches: master
|
branches: master
|
||||||
description: |
|
description: |
|
||||||
Run openstack collections functional tests against a master devstack
|
Run openstack collections functional tests against a master devstack
|
||||||
using master of openstacksdk and stable 2.16 branch of ansible
|
using master of openstacksdk and stable 2.9 branch of ansible
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: github.com/ansible/ansible
|
- name: github.com/ansible/ansible
|
||||||
override-checkout: stable-2.18
|
override-checkout: stable-2.9
|
||||||
vars:
|
vars:
|
||||||
tox_envlist: ansible_2_18
|
tox_envlist: ansible_2_9
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: ansible-collections-openstack-functional-devstack-ansible-2.11
|
||||||
|
parent: ansible-collections-openstack-functional-devstack-base
|
||||||
|
branches: master
|
||||||
|
description: |
|
||||||
|
Run openstack collections functional tests against a master devstack
|
||||||
|
using master of openstacksdk and stable 2.12 branch of ansible
|
||||||
|
required-projects:
|
||||||
|
- name: github.com/ansible/ansible
|
||||||
|
override-checkout: stable-2.11
|
||||||
|
vars:
|
||||||
|
tox_envlist: ansible_2_11
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||||
|
parent: ansible-collections-openstack-functional-devstack-base
|
||||||
|
branches: master
|
||||||
|
description: |
|
||||||
|
Run openstack collections functional tests against a master devstack
|
||||||
|
using master of openstacksdk and stable 2.12 branch of ansible
|
||||||
|
required-projects:
|
||||||
|
- name: github.com/ansible/ansible
|
||||||
|
override-checkout: stable-2.12
|
||||||
|
vars:
|
||||||
|
tox_envlist: ansible_2_12
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: ansible-collections-openstack-functional-devstack-ansible-devel
|
name: ansible-collections-openstack-functional-devstack-ansible-devel
|
||||||
@@ -261,22 +244,24 @@
|
|||||||
bindep_profile: test py310
|
bindep_profile: test py310
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: openstack-tox-linters-ansible-2.18
|
name: openstack-tox-linters-ansible-2.12
|
||||||
parent: openstack-tox-linters-ansible
|
parent: openstack-tox-linters-ansible
|
||||||
|
nodeset: ubuntu-focal
|
||||||
description: |
|
description: |
|
||||||
Run openstack collections linter tests using the 2.18 branch of ansible
|
Run openstack collections linter tests using the 2.12 branch of ansible
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: github.com/ansible/ansible
|
- name: github.com/ansible/ansible
|
||||||
override-checkout: stable-2.18
|
override-checkout: stable-2.12
|
||||||
vars:
|
vars:
|
||||||
tox_envlist: linters_2_18
|
ensure_tox_version: '<4'
|
||||||
python_version: "3.12"
|
tox_envlist: linters_2_12
|
||||||
bindep_profile: test py312
|
python_version: 3.8
|
||||||
|
bindep_profile: test py38
|
||||||
|
|
||||||
# Cross-checks with other projects
|
# Cross-checks with other projects
|
||||||
- job:
|
- job:
|
||||||
name: bifrost-collections-src
|
name: bifrost-collections-src
|
||||||
parent: bifrost-integration-on-ubuntu-jammy
|
parent: bifrost-integration-tinyipa-ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- openstack/ansible-collections-openstack
|
- openstack/ansible-collections-openstack
|
||||||
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
||||||
@@ -287,7 +272,7 @@
|
|||||||
override-checkout: master
|
override-checkout: master
|
||||||
- job:
|
- job:
|
||||||
name: bifrost-keystone-collections-src
|
name: bifrost-keystone-collections-src
|
||||||
parent: bifrost-integration-keystone-on-ubuntu-jammy
|
parent: bifrost-integration-tinyipa-keystone-ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- openstack/ansible-collections-openstack
|
- openstack/ansible-collections-openstack
|
||||||
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
||||||
@@ -297,73 +282,69 @@
|
|||||||
name: openstack/openstacksdk
|
name: openstack/openstacksdk
|
||||||
override-checkout: master
|
override-checkout: master
|
||||||
|
|
||||||
- job:
|
|
||||||
name: ansible-collections-openstack-release
|
|
||||||
parent: openstack-tox-linters-ansible
|
|
||||||
run: ci/publish/publish_collection.yml
|
|
||||||
secrets:
|
|
||||||
- ansible_galaxy_info
|
|
||||||
|
|
||||||
- secret:
|
|
||||||
name: ansible_galaxy_info
|
|
||||||
data:
|
|
||||||
url: https://galaxy.ansible.com
|
|
||||||
token: !encrypted/pkcs1-oaep
|
|
||||||
- K93hOZo1B5z248H04COB1N2HCkGbFPo2EUr+0W7qFzsrdvmbsAI86Hl9bUCfEENGrwvfV
|
|
||||||
0j9CE5iO0tyqal3r6ucMhGT44MgQWL3MBeRvK89yAJpSNMU7R7rEY/zbjZMoC9YElcHEv
|
|
||||||
GEDZSA/0gQHCHpZVDlx4JMGwrnd+Nz9ha3c12BYeZS8rS/dQl7EmZ867OsozmNdG9UkkC
|
|
||||||
0vP/dkenUQNvoZOSWgZztRBlbAyI1nc5iEEw9vvpLh19HcY9+S2iAZkgSq4jOOO4wn7gE
|
|
||||||
XAZPr0HRdwS2m4Hw0Pusrg7SdC3+2O0N/fvFGnvvKXHcSgQk3rPLn6HfKzOJoPWc4WlDX
|
|
||||||
MA79jYloNBXjOaeXOoiwYzzshWK53F6Ci+3leq1cYuFyHSi2ds2mYXat7YndZSsmsk5um
|
|
||||||
hj0+Ddy9Om1uYy3nhHyZLULE7UDUmduA9EPkvdyWlcW0yZL2kXcrDTHlSp4PaJg9iKVys
|
|
||||||
0aOOo9CNMwhyXAOGiFCYF/m7Efbnp50zUQhHN9+7LeVzXZuiH98C8kNvWfE0qrkrrgQ1n
|
|
||||||
78UMqGcGpdw4ZSlWrDTbrbd4v0bRnsJ+IAWISnT5OXaeJgGZwXRuBHtTXqbjoosBeX/8w
|
|
||||||
YKb0lx7E5ZtSw7+Y6LNDGihGTmVg1nkZUWo85CxyF/RiWHuNvpkzzqXmdGS1bg=
|
|
||||||
|
|
||||||
- project:
|
- project:
|
||||||
check:
|
check:
|
||||||
jobs:
|
jobs:
|
||||||
- tox-pep8
|
- tox-pep8
|
||||||
- openstack-tox-linters-ansible-devel
|
# - openstack-tox-linters-ansible-devel
|
||||||
- openstack-tox-linters-ansible-2.18
|
# - openstack-tox-linters-ansible-2.12
|
||||||
- ansible-collections-openstack-functional-devstack
|
# - ansible-collections-openstack-functional-devstack:
|
||||||
- ansible-collections-openstack-functional-devstack-releases
|
# dependencies: &deps_unit_lint
|
||||||
- ansible-collections-openstack-functional-devstack-ansible-2.18
|
# - tox-pep8
|
||||||
- ansible-collections-openstack-functional-devstack-ansible-devel
|
# - openstack-tox-linters-ansible-2.12
|
||||||
- ansible-collections-openstack-functional-devstack-magnum
|
|
||||||
- ansible-collections-openstack-functional-devstack-manila
|
|
||||||
- ansible-collections-openstack-functional-devstack-octavia
|
|
||||||
|
|
||||||
- bifrost-collections-src:
|
# - ansible-collections-openstack-functional-devstack-releases:
|
||||||
voting: false
|
# dependencies: *deps_unit_lint
|
||||||
irrelevant-files: *ignore_files
|
# - ansible-collections-openstack-functional-devstack-ansible-2.9:
|
||||||
- bifrost-keystone-collections-src:
|
# dependencies: *deps_unit_lint
|
||||||
voting: false
|
# - ansible-collections-openstack-functional-devstack-ansible-2.12:
|
||||||
irrelevant-files: *ignore_files
|
# dependencies: *deps_unit_lint
|
||||||
|
# - ansible-collections-openstack-functional-devstack-ansible-devel:
|
||||||
|
# dependencies: *deps_unit_lint
|
||||||
|
# - ansible-collections-openstack-functional-devstack-magnum:
|
||||||
|
# dependencies: *deps_unit_lint
|
||||||
|
# - ansible-collections-openstack-functional-devstack-octavia:
|
||||||
|
# dependencies: *deps_unit_lint
|
||||||
|
|
||||||
|
# - bifrost-collections-src:
|
||||||
|
# voting: false
|
||||||
|
# dependencies: *deps_unit_lint
|
||||||
|
# irrelevant-files: *ignore_files
|
||||||
|
# - bifrost-keystone-collections-src:
|
||||||
|
# voting: false
|
||||||
|
# dependencies: *deps_unit_lint
|
||||||
|
# irrelevant-files: *ignore_files
|
||||||
|
|
||||||
gate:
|
gate:
|
||||||
jobs:
|
jobs:
|
||||||
- tox-pep8
|
- tox-pep8
|
||||||
- openstack-tox-linters-ansible-2.18
|
# - openstack-tox-linters-ansible-2.12
|
||||||
- ansible-collections-openstack-functional-devstack-releases
|
# - ansible-collections-openstack-functional-devstack
|
||||||
- ansible-collections-openstack-functional-devstack-magnum
|
# - ansible-collections-openstack-functional-devstack-releases
|
||||||
- ansible-collections-openstack-functional-devstack-manila
|
# - ansible-collections-openstack-functional-devstack-ansible-2.9
|
||||||
- ansible-collections-openstack-functional-devstack-octavia
|
# - ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||||
|
# - ansible-collections-openstack-functional-devstack-magnum
|
||||||
|
# - ansible-collections-openstack-functional-devstack-octavia
|
||||||
|
|
||||||
periodic:
|
# periodic:
|
||||||
jobs:
|
# jobs:
|
||||||
- openstack-tox-linters-ansible-devel
|
# - openstack-tox-linters-ansible-devel
|
||||||
- openstack-tox-linters-ansible-2.18
|
# - openstack-tox-linters-ansible-2.12
|
||||||
- ansible-collections-openstack-functional-devstack
|
# - ansible-collections-openstack-functional-devstack
|
||||||
- ansible-collections-openstack-functional-devstack-releases
|
# - ansible-collections-openstack-functional-devstack-releases
|
||||||
- ansible-collections-openstack-functional-devstack-ansible-2.18
|
# - ansible-collections-openstack-functional-devstack-ansible-2.9
|
||||||
- ansible-collections-openstack-functional-devstack-ansible-devel
|
# - ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||||
- bifrost-collections-src
|
# - ansible-collections-openstack-functional-devstack-ansible-devel
|
||||||
- bifrost-keystone-collections-src
|
# - bifrost-collections-src
|
||||||
- ansible-collections-openstack-functional-devstack-magnum
|
# - bifrost-keystone-collections-src
|
||||||
- ansible-collections-openstack-functional-devstack-manila
|
# - ansible-collections-openstack-functional-devstack-magnum
|
||||||
- ansible-collections-openstack-functional-devstack-octavia
|
# - ansible-collections-openstack-functional-devstack-octavia
|
||||||
|
|
||||||
tag:
|
# experimental:
|
||||||
jobs:
|
# jobs:
|
||||||
- ansible-collections-openstack-release
|
# - ansible-collections-openstack-functional-devstack-ansible-2.11
|
||||||
|
|
||||||
|
# tag:
|
||||||
|
# jobs:
|
||||||
|
# - ansible-collections-openstack-release
|
||||||
|
|||||||
176
CHANGELOG.rst
176
CHANGELOG.rst
@@ -4,182 +4,6 @@ Ansible OpenStack Collection Release Notes
|
|||||||
|
|
||||||
.. contents:: Topics
|
.. contents:: Topics
|
||||||
|
|
||||||
v2.5.0
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Bugfixes and minor changes
|
|
||||||
|
|
||||||
Major Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add import_method to module
|
|
||||||
- Add object_containers_info module
|
|
||||||
- Add support for filters in inventory
|
|
||||||
- Add volume_manage module
|
|
||||||
- Introduce share_type modules
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Allow role_assignment module to work cross domain
|
|
||||||
- Don't compare current state for `reboot_*` actions
|
|
||||||
- Fix disable_gateway_ip for subnet
|
|
||||||
- Fix example in the dns_zone_info module doc
|
|
||||||
- Fix router module external IPs when only subnet specified
|
|
||||||
- Fix the bug reporting url
|
|
||||||
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
|
|
||||||
- Shows missing data in `stack_info` module output
|
|
||||||
|
|
||||||
v2.4.1
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Bugfixes and minor changes
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Update tags when changing server
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix missed client_cert in OpenStackModule
|
|
||||||
|
|
||||||
v2.4.0
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
New trait module and minor changes
|
|
||||||
|
|
||||||
Major Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add trait module
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add loadbalancer quota options
|
|
||||||
- Allow create instance with tags
|
|
||||||
|
|
||||||
New Modules
|
|
||||||
-----------
|
|
||||||
|
|
||||||
- openstack.cloud.trait - Add or Delete a trait from OpenStack
|
|
||||||
|
|
||||||
v2.3.3
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Bugfixes and minor changes
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add test to only_ipv4 in inventory
|
|
||||||
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
|
|
||||||
|
|
||||||
v2.3.2
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Bugfixes and minor changes
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Drop compat implementations for tests
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix openstack.cloud.port module failure in check mode
|
|
||||||
|
|
||||||
v2.3.1
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Client TLS certificate support
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add ability to pass client tls certificate
|
|
||||||
|
|
||||||
v2.3.0
|
|
||||||
======
|
|
||||||
|
|
||||||
Release Summary
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Bugfixes and new modules
|
|
||||||
|
|
||||||
Major Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add Neutron trunk module
|
|
||||||
- Add application_credential module
|
|
||||||
- Add module to filter available volume services
|
|
||||||
|
|
||||||
Minor Changes
|
|
||||||
-------------
|
|
||||||
|
|
||||||
- Add inactive state for the images
|
|
||||||
- Add insecure_registry property to coe_cluster_templates
|
|
||||||
- Add support for creation of the default external networks
|
|
||||||
- Add target_all_project option
|
|
||||||
- Add vlan_tranparency for creation networks
|
|
||||||
- Allow munch results in server_info module
|
|
||||||
- Allow to specify multiple allocation pools when creating a subnet
|
|
||||||
- CI - Disable auto-discovery for setuptools
|
|
||||||
- CI - Don't create port with binding profile
|
|
||||||
- CI - Fix CI in collection
|
|
||||||
- CI - Fix linters-devel and devstack tests
|
|
||||||
- CI - Fix regression in quota module
|
|
||||||
- CI - Fix test for server shelve
|
|
||||||
- CI - Migrate Bifrost jobs to Ubuntu Jammy
|
|
||||||
- CI - Remove 2.9 jobs from Zuul config
|
|
||||||
- CI - Run functional testing regardless of pep8/linter results
|
|
||||||
- Enable glance-direct interop image import
|
|
||||||
- Ensure coe_cluster_template compare labels properly
|
|
||||||
- Wait for deleted server to disappear from results
|
|
||||||
- router - Allow specifying external network name in a different project
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Allow wait false when auto_ip is false
|
|
||||||
- Fix exception when creating object from file
|
|
||||||
- Fix exception when updating container with metadata
|
|
||||||
- Fix typo in openstack.cloud.lb_pool
|
|
||||||
- Fix typo in parameter description
|
|
||||||
- fix subnet module - allow cidr option with subnet_pool
|
|
||||||
|
|
||||||
New Modules
|
|
||||||
-----------
|
|
||||||
|
|
||||||
- openstack.cloud.application_credential - Manage OpenStack Identity (Keystone) application credentials
|
|
||||||
- openstack.cloud.trunk - Add or delete trunks from an OpenStack cloud
|
|
||||||
- openstack.cloud.volume_service_info - Fetch OpenStack Volume (Cinder) services
|
|
||||||
|
|
||||||
v2.2.0
|
v2.2.0
|
||||||
======
|
======
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ Thank you for your interest in our Ansible OpenStack collection ☺️
|
|||||||
There are many ways in which you can participate in the project, for example:
|
There are many ways in which you can participate in the project, for example:
|
||||||
|
|
||||||
- [Report and verify bugs and help with solving issues](
|
- [Report and verify bugs and help with solving issues](
|
||||||
https://bugs.launchpad.net/ansible-collections-openstack).
|
https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack).
|
||||||
- [Submit and review patches](
|
- [Submit and review patches](
|
||||||
https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack).
|
https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack).
|
||||||
- Follow OpenStack's [How To Contribute](https://wiki.openstack.org/wiki/How_To_Contribute) guide.
|
- Follow OpenStack's [How To Contribute](https://wiki.openstack.org/wiki/How_To_Contribute) guide.
|
||||||
|
|||||||
@@ -526,112 +526,3 @@ releases:
|
|||||||
- Add volume_type modules
|
- Add volume_type modules
|
||||||
release_summary: New module for volume_type and bugfixes
|
release_summary: New module for volume_type and bugfixes
|
||||||
release_date: '2023-12-01'
|
release_date: '2023-12-01'
|
||||||
2.3.0:
|
|
||||||
changes:
|
|
||||||
bugfixes:
|
|
||||||
- Allow wait false when auto_ip is false
|
|
||||||
- Fix exception when creating object from file
|
|
||||||
- Fix exception when updating container with metadata
|
|
||||||
- Fix typo in openstack.cloud.lb_pool
|
|
||||||
- Fix typo in parameter description
|
|
||||||
- fix subnet module - allow cidr option with subnet_pool
|
|
||||||
major_changes:
|
|
||||||
- Add Neutron trunk module
|
|
||||||
- Add application_credential module
|
|
||||||
- Add module to filter available volume services
|
|
||||||
minor_changes:
|
|
||||||
- Add inactive state for the images
|
|
||||||
- Add insecure_registry property to coe_cluster_templates
|
|
||||||
- Add support for creation of the default external networks
|
|
||||||
- Add target_all_project option
|
|
||||||
- Add vlan_tranparency for creation networks
|
|
||||||
- Allow munch results in server_info module
|
|
||||||
- Allow to specify multiple allocation pools when creating a subnet
|
|
||||||
- CI - Disable auto-discovery for setuptools
|
|
||||||
- CI - Don't create port with binding profile
|
|
||||||
- CI - Fix CI in collection
|
|
||||||
- CI - Fix linters-devel and devstack tests
|
|
||||||
- CI - Fix regression in quota module
|
|
||||||
- CI - Fix test for server shelve
|
|
||||||
- CI - Migrate Bifrost jobs to Ubuntu Jammy
|
|
||||||
- CI - Remove 2.9 jobs from Zuul config
|
|
||||||
- CI - Run functional testing regardless of pep8/linter results
|
|
||||||
- Enable glance-direct interop image import
|
|
||||||
- Ensure coe_cluster_template compare labels properly
|
|
||||||
- Wait for deleted server to disappear from results
|
|
||||||
- router - Allow specifying external network name in a different project
|
|
||||||
release_summary: Bugfixes and new modules
|
|
||||||
modules:
|
|
||||||
- description: Manage OpenStack Identity (Keystone) application credentials
|
|
||||||
name: application_credential
|
|
||||||
namespace: ''
|
|
||||||
- description: Add or delete trunks from an OpenStack cloud
|
|
||||||
name: trunk
|
|
||||||
namespace: ''
|
|
||||||
- description: Fetch OpenStack Volume (Cinder) services
|
|
||||||
name: volume_service_info
|
|
||||||
namespace: ''
|
|
||||||
release_date: '2024-11-28'
|
|
||||||
2.3.1:
|
|
||||||
changes:
|
|
||||||
minor_changes:
|
|
||||||
- Add ability to pass client tls certificate
|
|
||||||
release_summary: Client TLS certificate support
|
|
||||||
release_date: '2024-12-18'
|
|
||||||
2.3.2:
|
|
||||||
changes:
|
|
||||||
bugfixes:
|
|
||||||
- Fix openstack.cloud.port module failure in check mode
|
|
||||||
minor_changes:
|
|
||||||
- Drop compat implementations for tests
|
|
||||||
release_summary: Bugfixes and minor changes
|
|
||||||
release_date: '2024-12-20'
|
|
||||||
2.3.3:
|
|
||||||
changes:
|
|
||||||
bugfixes:
|
|
||||||
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
|
|
||||||
minor_changes:
|
|
||||||
- Add test to only_ipv4 in inventory
|
|
||||||
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
|
|
||||||
release_summary: Bugfixes and minor changes
|
|
||||||
release_date: '2024-12-22'
|
|
||||||
2.4.0:
|
|
||||||
changes:
|
|
||||||
major_changes:
|
|
||||||
- Add trait module
|
|
||||||
minor_changes:
|
|
||||||
- Add loadbalancer quota options
|
|
||||||
- Allow create instance with tags
|
|
||||||
release_summary: New trait module and minor changes
|
|
||||||
modules:
|
|
||||||
- description: Add or Delete a trait from OpenStack
|
|
||||||
name: trait
|
|
||||||
namespace: ''
|
|
||||||
release_date: '2025-01-15'
|
|
||||||
2.4.1:
|
|
||||||
changes:
|
|
||||||
bugfixes:
|
|
||||||
- Fix missed client_cert in OpenStackModule
|
|
||||||
minor_changes:
|
|
||||||
- Update tags when changing server
|
|
||||||
release_summary: Bugfixes and minor changes
|
|
||||||
release_date: '2024-01-20'
|
|
||||||
2.5.0:
|
|
||||||
changes:
|
|
||||||
major_changes:
|
|
||||||
- Add import_method to module
|
|
||||||
- Add object_containers_info module
|
|
||||||
- Add support for filters in inventory
|
|
||||||
- Add volume_manage module
|
|
||||||
- Introduce share_type modules
|
|
||||||
minor_changes:
|
|
||||||
- Allow role_assignment module to work cross domain
|
|
||||||
- Don't compare current state for `reboot_*` actions
|
|
||||||
- Fix disable_gateway_ip for subnet
|
|
||||||
- Fix example in the dns_zone_info module doc
|
|
||||||
- Fix router module external IPs when only subnet specified
|
|
||||||
- Fix the bug reporting url
|
|
||||||
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
|
|
||||||
- Shows missing data in `stack_info` module output
|
|
||||||
release_summary: Bugfixes and minor changes
|
|
||||||
release_date: '2025-10-24'
|
|
||||||
|
|||||||
0
changelogs/fragments/.keep
Normal file
0
changelogs/fragments/.keep
Normal file
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
minor_changes:
|
|
||||||
- Added the new ``openstack.cloud.baremetal_port_group`` module to manage
|
|
||||||
Bare Metal port groups (create, update, and delete), including CI role
|
|
||||||
coverage and unit tests.
|
|
||||||
@@ -3,8 +3,7 @@
|
|||||||
vars:
|
vars:
|
||||||
collection_path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
|
collection_path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
|
||||||
build_collection_path: /tmp/collection_built/
|
build_collection_path: /tmp/collection_built/
|
||||||
ansible_virtualenv_path: /tmp/ansible_venv
|
ansible_galaxy_path: "~/.local/bin/ansible-galaxy"
|
||||||
ansible_galaxy_path: "{{ ansible_virtualenv_path }}/bin/ansible-galaxy"
|
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
@@ -12,15 +11,9 @@
|
|||||||
include_role:
|
include_role:
|
||||||
name: ensure-pip
|
name: ensure-pip
|
||||||
|
|
||||||
- name: Install Ansible in virtualenv
|
- name: Install ansible
|
||||||
pip:
|
pip:
|
||||||
name: ansible-core<2.19
|
name: ansible-core<2.12
|
||||||
virtualenv: "{{ ansible_virtualenv_path }}"
|
|
||||||
virtualenv_command: "{{ ensure_pip_virtualenv_command }}"
|
|
||||||
|
|
||||||
- name: Detect ansible version
|
|
||||||
command: "{{ ansible_virtualenv_path }}/bin/ansible --version"
|
|
||||||
register: ansible_version
|
|
||||||
|
|
||||||
- name: Discover tag version
|
- name: Discover tag version
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
expected_fields:
|
|
||||||
- description
|
|
||||||
- expires_at
|
|
||||||
- id
|
|
||||||
- name
|
|
||||||
- project_id
|
|
||||||
- roles
|
|
||||||
- secret
|
|
||||||
- unrestricted
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Create application credentials
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_creds
|
|
||||||
description: dummy description
|
|
||||||
register: appcred
|
|
||||||
|
|
||||||
- name: Assert return values of application_credential module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- appcred is changed
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(appcred.application_credential.keys())|length == 0
|
|
||||||
|
|
||||||
- name: Create the application credential again
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_creds
|
|
||||||
description: dummy description
|
|
||||||
register: appcred
|
|
||||||
|
|
||||||
- name: Assert return values of ansible_credential module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
# credentials are immutable so creating twice will cause delete and create
|
|
||||||
- appcred is changed
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(appcred.application_credential.keys())|length == 0
|
|
||||||
|
|
||||||
- name: Update the application credential again
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_creds
|
|
||||||
description: new description
|
|
||||||
register: appcred
|
|
||||||
|
|
||||||
- name: Assert application credential changed
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- appcred is changed
|
|
||||||
- appcred.application_credential.description == 'new description'
|
|
||||||
|
|
||||||
- name: Get list of all keypairs using application credential
|
|
||||||
openstack.cloud.keypair_info:
|
|
||||||
cloud: "{{ appcred.cloud }}"
|
|
||||||
|
|
||||||
- name: Delete application credential
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: ansible_creds
|
|
||||||
register: appcred
|
|
||||||
|
|
||||||
- name: Assert application credential changed
|
|
||||||
assert:
|
|
||||||
that: appcred is changed
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
expected_fields:
|
|
||||||
- address
|
|
||||||
- created_at
|
|
||||||
- extra
|
|
||||||
- id
|
|
||||||
- links
|
|
||||||
- mode
|
|
||||||
- name
|
|
||||||
- node_id
|
|
||||||
- properties
|
|
||||||
- standalone_ports_supported
|
|
||||||
- updated_at
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
---
|
|
||||||
# TODO: Actually run this role in CI. Atm we do not have DevStack's ironic plugin enabled.
|
|
||||||
- name: Create baremetal node
|
|
||||||
openstack.cloud.baremetal_node:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
driver_info:
|
|
||||||
ipmi_address: "1.2.3.4"
|
|
||||||
ipmi_username: "admin"
|
|
||||||
ipmi_password: "secret"
|
|
||||||
name: ansible_baremetal_node
|
|
||||||
nics:
|
|
||||||
- mac: "aa:bb:cc:aa:bb:cc"
|
|
||||||
state: present
|
|
||||||
register: node
|
|
||||||
|
|
||||||
- name: Create baremetal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_baremetal_port_group
|
|
||||||
node: ansible_baremetal_node
|
|
||||||
address: fa:16:3e:aa:aa:ab
|
|
||||||
mode: active-backup
|
|
||||||
standalone_ports_supported: true
|
|
||||||
extra:
|
|
||||||
test: created
|
|
||||||
properties:
|
|
||||||
miimon: '100'
|
|
||||||
register: port_group
|
|
||||||
|
|
||||||
- debug: var=port_group
|
|
||||||
|
|
||||||
- name: Assert return values of baremetal_port_group module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(port_group.port_group.keys())|length == 0
|
|
||||||
- port_group.port_group.name == "ansible_baremetal_port_group"
|
|
||||||
- port_group.port_group.node_id == node.node.id
|
|
||||||
|
|
||||||
- name: Update baremetal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
id: "{{ port_group.port_group.id }}"
|
|
||||||
mode: 802.3ad
|
|
||||||
standalone_ports_supported: false
|
|
||||||
extra:
|
|
||||||
test: updated
|
|
||||||
register: updated_port_group
|
|
||||||
|
|
||||||
- name: Assert return values of updated baremetal port group
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- updated_port_group is changed
|
|
||||||
- updated_port_group.port_group.id == port_group.port_group.id
|
|
||||||
- updated_port_group.port_group.mode == "802.3ad"
|
|
||||||
- not updated_port_group.port_group.standalone_ports_supported
|
|
||||||
- updated_port_group.port_group.extra.test == "updated"
|
|
||||||
|
|
||||||
- name: Update baremetal port group again
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
id: "{{ port_group.port_group.id }}"
|
|
||||||
mode: 802.3ad
|
|
||||||
standalone_ports_supported: false
|
|
||||||
extra:
|
|
||||||
test: updated
|
|
||||||
register: updated_port_group
|
|
||||||
|
|
||||||
- name: Assert idempotency for baremetal port group module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- updated_port_group is not changed
|
|
||||||
- updated_port_group.port_group.id == port_group.port_group.id
|
|
||||||
|
|
||||||
- name: Delete baremetal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
id: "{{ port_group.port_group.id }}"
|
|
||||||
|
|
||||||
- name: Delete baremetal port group again
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
id: "{{ port_group.port_group.id }}"
|
|
||||||
register: deleted_port_group
|
|
||||||
|
|
||||||
- name: Assert idempotency for deleted baremetal port group
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- deleted_port_group is not changed
|
|
||||||
|
|
||||||
- name: Delete baremetal node
|
|
||||||
openstack.cloud.baremetal_node:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: ansible_baremetal_node
|
|
||||||
state: absent
|
|
||||||
@@ -72,8 +72,6 @@
|
|||||||
image_id: '{{ image_id }}'
|
image_id: '{{ image_id }}'
|
||||||
is_floating_ip_enabled: true
|
is_floating_ip_enabled: true
|
||||||
keypair_id: '{{ keypair.keypair.id }}'
|
keypair_id: '{{ keypair.keypair.id }}'
|
||||||
flavor_id: 'm1.small'
|
|
||||||
master_flavor_id: 'm1.small'
|
|
||||||
name: k8s
|
name: k8s
|
||||||
state: present
|
state: present
|
||||||
register: coe_cluster_template
|
register: coe_cluster_template
|
||||||
|
|||||||
@@ -26,9 +26,6 @@
|
|||||||
keypair_id: '{{ keypair.keypair.id }}'
|
keypair_id: '{{ keypair.keypair.id }}'
|
||||||
name: k8s
|
name: k8s
|
||||||
state: present
|
state: present
|
||||||
labels:
|
|
||||||
docker_volume_size: 10
|
|
||||||
cloud_provider_tag: v1.23.1
|
|
||||||
register: coe_cluster_template
|
register: coe_cluster_template
|
||||||
|
|
||||||
- name: Assert return values of coe_cluster_template module
|
- name: Assert return values of coe_cluster_template module
|
||||||
@@ -46,9 +43,6 @@
|
|||||||
keypair_id: '{{ keypair.keypair.id }}'
|
keypair_id: '{{ keypair.keypair.id }}'
|
||||||
name: k8s
|
name: k8s
|
||||||
state: present
|
state: present
|
||||||
labels:
|
|
||||||
docker_volume_size: 10
|
|
||||||
cloud_provider_tag: v1.23.1
|
|
||||||
register: coe_cluster_template
|
register: coe_cluster_template
|
||||||
|
|
||||||
- name: Assert return values of coe_cluster_template module
|
- name: Assert return values of coe_cluster_template module
|
||||||
|
|||||||
@@ -241,7 +241,7 @@
|
|||||||
that:
|
that:
|
||||||
- server1_fips is success
|
- server1_fips is success
|
||||||
- server1_fips is not changed
|
- server1_fips is not changed
|
||||||
- server1_fips.floating_ips|length > 0
|
- server1_fips.floating_ips
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
# allow new fields to be introduced but prevent fields from being removed
|
||||||
- expected_fields|difference(server1_fips.floating_ips[0].keys())|length == 0
|
- expected_fields|difference(server1_fips.floating_ips[0].keys())|length == 0
|
||||||
|
|
||||||
@@ -260,7 +260,7 @@
|
|||||||
- name: Assert return values of floating_ip module
|
- name: Assert return values of floating_ip module
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- floating_ip.floating_ip|length > 0
|
- floating_ip.floating_ip
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
# allow new fields to be introduced but prevent fields from being removed
|
||||||
- expected_fields|difference(floating_ip.floating_ip.keys())|length == 0
|
- expected_fields|difference(floating_ip.floating_ip.keys())|length == 0
|
||||||
|
|
||||||
@@ -312,7 +312,7 @@
|
|||||||
- name: Assert floating ip attached to server 2
|
- name: Assert floating ip attached to server 2
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- server2_fip.floating_ip|length > 0
|
- server2_fip.floating_ip
|
||||||
|
|
||||||
- name: Find all floating ips for debugging
|
- name: Find all floating ips for debugging
|
||||||
openstack.cloud.floating_ip_info:
|
openstack.cloud.floating_ip_info:
|
||||||
|
|||||||
@@ -176,34 +176,6 @@
|
|||||||
- image is changed
|
- image is changed
|
||||||
- image.image.name == 'ansible_image-changed'
|
- image.image.name == 'ansible_image-changed'
|
||||||
|
|
||||||
- name: Deactivate raw image
|
|
||||||
openstack.cloud.image:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: inactive
|
|
||||||
id: "{{ image.image.id }}"
|
|
||||||
name: 'ansible_image-changed'
|
|
||||||
register: image
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- image is changed
|
|
||||||
- image.image.status == 'deactivated'
|
|
||||||
|
|
||||||
- name: Reactivate raw image
|
|
||||||
openstack.cloud.image:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
id: "{{ image.image.id }}"
|
|
||||||
name: 'ansible_image-changed'
|
|
||||||
register: image
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- image is changed
|
|
||||||
- image.image.status == 'active'
|
|
||||||
|
|
||||||
- name: Rename back raw image (defaults)
|
- name: Rename back raw image (defaults)
|
||||||
openstack.cloud.image:
|
openstack.cloud.image:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -279,11 +279,6 @@
|
|||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
cache: "{{ cache.content | b64decode | from_yaml }}"
|
cache: "{{ cache.content | b64decode | from_yaml }}"
|
||||||
|
|
||||||
- name: Further process Ansible 2.19+ cache
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
cache: "{{ cache.__payload__ | from_yaml }}"
|
|
||||||
when: cache.__payload__ is defined
|
|
||||||
|
|
||||||
- name: Check Ansible's cache
|
- name: Check Ansible's cache
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
@@ -308,25 +303,6 @@
|
|||||||
that:
|
that:
|
||||||
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
|
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
|
||||||
|
|
||||||
- name: List servers with inventory plugin with IPv4 only
|
|
||||||
ansible.builtin.command:
|
|
||||||
cmd: ansible-inventory --list --yaml --extra-vars only_ipv4=true --inventory-file openstack.yaml
|
|
||||||
chdir: "{{ tmp_dir.path }}"
|
|
||||||
environment:
|
|
||||||
ANSIBLE_INVENTORY_CACHE: "True"
|
|
||||||
ANSIBLE_INVENTORY_CACHE_PLUGIN: "jsonfile"
|
|
||||||
ANSIBLE_CACHE_PLUGIN_CONNECTION: "{{ tmp_dir.path }}/.cache/"
|
|
||||||
register: inventory
|
|
||||||
|
|
||||||
- name: Read YAML output from inventory plugin again
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
inventory: "{{ inventory.stdout | from_yaml }}"
|
|
||||||
|
|
||||||
- name: Check YAML output from inventory plugin again
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
|
|
||||||
|
|
||||||
- name: Delete server 2
|
- name: Delete server 2
|
||||||
openstack.cloud.resource:
|
openstack.cloud.resource:
|
||||||
service: compute
|
service: compute
|
||||||
|
|||||||
@@ -38,7 +38,7 @@
|
|||||||
- name: Ensure public key is returned
|
- name: Ensure public key is returned
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- keypair.keypair.public_key is defined and keypair.keypair.public_key|length > 0
|
- keypair.keypair.public_key is defined and keypair.keypair.public_key
|
||||||
|
|
||||||
- name: Create another keypair
|
- name: Create another keypair
|
||||||
openstack.cloud.keypair:
|
openstack.cloud.keypair:
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
- name: Check output of creating network
|
- name: Check output of creating network
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- infonet.network is defined
|
- infonet.network
|
||||||
- item in infonet.network
|
- item in infonet.network
|
||||||
loop: "{{ expected_fields }}"
|
loop: "{{ expected_fields }}"
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
expected_fields:
|
|
||||||
- description
|
|
||||||
- id
|
|
||||||
- name
|
|
||||||
- network_id
|
|
||||||
- network_type
|
|
||||||
- physical_network
|
|
||||||
- segmentation_id
|
|
||||||
|
|
||||||
network_name: segment_network
|
|
||||||
segment_name: example_segment
|
|
||||||
network_type: vlan
|
|
||||||
segmentation_id: 999
|
|
||||||
physical_network: public
|
|
||||||
initial_description: "example segment description"
|
|
||||||
updated_description: "updated segment description"
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create network {{ network_name }}
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ network_name }}"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Create segment {{ segment_name }}
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
description: "{{ initial_description }}"
|
|
||||||
network: "{{ network_name }}"
|
|
||||||
network_type: "{{ network_type }}"
|
|
||||||
segmentation_id: "{{ segmentation_id }}"
|
|
||||||
physical_network: "{{ physical_network }}"
|
|
||||||
state: present
|
|
||||||
register: segment
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: segment is changed
|
|
||||||
|
|
||||||
- name: Assert segment fields
|
|
||||||
assert:
|
|
||||||
that: item in segment.network_segment
|
|
||||||
loop: "{{ expected_fields }}"
|
|
||||||
|
|
||||||
- name: Update segment {{ segment_name }} by name - no changes
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
description: "{{ initial_description }}"
|
|
||||||
state: present
|
|
||||||
register: segment
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: segment is not changed
|
|
||||||
|
|
||||||
- name: Update segment {{ segment_name }} by all fields - changes
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
description: "{{ updated_description }}"
|
|
||||||
network: "{{ network_name }}"
|
|
||||||
network_type: "{{ network_type }}"
|
|
||||||
segmentation_id: "{{ segmentation_id }}"
|
|
||||||
physical_network: "{{ physical_network }}"
|
|
||||||
state: present
|
|
||||||
register: segment
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: segment is changed
|
|
||||||
|
|
||||||
- name: Delete segment {{ segment_name }}
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
state: absent
|
|
||||||
register: segment
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: segment is changed
|
|
||||||
|
|
||||||
- name: Delete network {{ network_name }}
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ network_name }}"
|
|
||||||
state: absent
|
|
||||||
@@ -7,4 +7,3 @@ expected_fields:
|
|||||||
- project_id
|
- project_id
|
||||||
- target_project_id
|
- target_project_id
|
||||||
- tenant_id
|
- tenant_id
|
||||||
all_project_symbol: '*'
|
|
||||||
|
|||||||
@@ -69,29 +69,6 @@
|
|||||||
id: "{{ rbac_policy.rbac_policy.id }}"
|
id: "{{ rbac_policy.rbac_policy.id }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
- name: Create a new network RBAC policy by targeting all projects
|
|
||||||
openstack.cloud.neutron_rbac_policy:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
object_id: "{{ network.network.id }}"
|
|
||||||
object_type: 'network'
|
|
||||||
action: 'access_as_shared'
|
|
||||||
target_all_project: true
|
|
||||||
project_id: "{{ source_project.project.id }}"
|
|
||||||
register: rbac_policy
|
|
||||||
|
|
||||||
- name: Assert return values of neutron_rbac_policy module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(rbac_policy.rbac_policy.keys())|length == 0
|
|
||||||
- rbac_policy.rbac_policy.target_project_id == all_project_symbol
|
|
||||||
|
|
||||||
- name: Delete RBAC policy
|
|
||||||
openstack.cloud.neutron_rbac_policy:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
id: "{{ rbac_policy.rbac_policy.id }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Get all rbac policies for {{ source_project.project.name }} - after deletion
|
- name: Get all rbac policies for {{ source_project.project.name }} - after deletion
|
||||||
openstack.cloud.neutron_rbac_policies_info:
|
openstack.cloud.neutron_rbac_policies_info:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
state: present
|
state: present
|
||||||
name: ansible_container
|
name: ansible_container
|
||||||
|
|
||||||
- name: Create object from data
|
- name: Create object
|
||||||
openstack.cloud.object:
|
openstack.cloud.object:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
state: present
|
state: present
|
||||||
@@ -28,47 +28,6 @@
|
|||||||
name: ansible_object
|
name: ansible_object
|
||||||
container: ansible_container
|
container: ansible_container
|
||||||
|
|
||||||
- name: Create object from file
|
|
||||||
block:
|
|
||||||
- name: Create temporary data file
|
|
||||||
ansible.builtin.tempfile:
|
|
||||||
register: tmp_file
|
|
||||||
|
|
||||||
- name: Populate data file
|
|
||||||
ansible.builtin.copy:
|
|
||||||
content: "this is a test"
|
|
||||||
dest: "{{ tmp_file.path }}"
|
|
||||||
|
|
||||||
- name: Create object from data file
|
|
||||||
openstack.cloud.object:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_object
|
|
||||||
filename: "{{ tmp_file.path }}"
|
|
||||||
container: ansible_container
|
|
||||||
register: object
|
|
||||||
|
|
||||||
always:
|
|
||||||
- name: Remove temporary data file
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ tmp_file.path }}"
|
|
||||||
state: absent
|
|
||||||
when: tmp_file is defined and 'path' in tmp_file
|
|
||||||
|
|
||||||
- name: Assert return values of object module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- object.object.id == "ansible_object"
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(object.object.keys())|length == 0
|
|
||||||
|
|
||||||
- name: Delete object
|
|
||||||
openstack.cloud.object:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: ansible_object
|
|
||||||
container: ansible_container
|
|
||||||
|
|
||||||
- name: Delete container
|
- name: Delete container
|
||||||
openstack.cloud.object_container:
|
openstack.cloud.object_container:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -31,21 +31,6 @@
|
|||||||
- ('cache-control' in container.container.metadata.keys()|map('lower'))
|
- ('cache-control' in container.container.metadata.keys()|map('lower'))
|
||||||
- container.container.metadata['foo'] == 'bar'
|
- container.container.metadata['foo'] == 'bar'
|
||||||
|
|
||||||
- name: Update container metadata
|
|
||||||
openstack.cloud.object_container:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: ansible_container
|
|
||||||
metadata:
|
|
||||||
'foo': 'baz'
|
|
||||||
register: container
|
|
||||||
|
|
||||||
- name: Verify container metadata was updated
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- container is changed
|
|
||||||
- ('cache-control' in container.container.metadata.keys()|map('lower'))
|
|
||||||
- container.container.metadata['foo'] == 'baz'
|
|
||||||
|
|
||||||
- name: Update a container
|
- name: Update a container
|
||||||
openstack.cloud.object_container:
|
openstack.cloud.object_container:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -60,7 +45,7 @@
|
|||||||
that:
|
that:
|
||||||
- container is changed
|
- container is changed
|
||||||
- ('cache-control' not in container.container.metadata.keys()|map('lower'))
|
- ('cache-control' not in container.container.metadata.keys()|map('lower'))
|
||||||
- "container.container.metadata == {'foo': 'baz'}"
|
- "container.container.metadata == {'foo': 'bar'}"
|
||||||
- container.container.read_ACL is none or container.container.read_ACL == ""
|
- container.container.read_ACL is none or container.container.read_ACL == ""
|
||||||
|
|
||||||
- name: Delete container
|
- name: Delete container
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
test_container_unprefixed_name: ansible-test-container
|
|
||||||
test_container_prefixed_prefix: ansible-prefixed-test-container
|
|
||||||
test_container_prefixed_num: 2
|
|
||||||
|
|
||||||
test_object_data: "Hello, world!"
|
|
||||||
|
|
||||||
expected_fields_single:
|
|
||||||
- bytes
|
|
||||||
- bytes_used
|
|
||||||
- content_type
|
|
||||||
- count
|
|
||||||
- history_location
|
|
||||||
- id
|
|
||||||
- if_none_match
|
|
||||||
- is_content_type_detected
|
|
||||||
- is_newest
|
|
||||||
- meta_temp_url_key
|
|
||||||
- meta_temp_url_key_2
|
|
||||||
- name
|
|
||||||
- object_count
|
|
||||||
- read_ACL
|
|
||||||
- storage_policy
|
|
||||||
- sync_key
|
|
||||||
- sync_to
|
|
||||||
- timestamp
|
|
||||||
- versions_location
|
|
||||||
- write_ACL
|
|
||||||
|
|
||||||
expected_fields_multiple:
|
|
||||||
- bytes
|
|
||||||
- bytes_used
|
|
||||||
- count
|
|
||||||
- id
|
|
||||||
- name
|
|
||||||
- object_count
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Generate list of containers to create
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
all_test_containers: >-
|
|
||||||
{{
|
|
||||||
[test_container_unprefixed_name]
|
|
||||||
+ (
|
|
||||||
[test_container_prefixed_prefix + '-']
|
|
||||||
| product(range(test_container_prefixed_num) | map('string'))
|
|
||||||
| map('join', '')
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
|
|
||||||
- name: Run checks
|
|
||||||
block:
|
|
||||||
|
|
||||||
- name: Create all containers
|
|
||||||
openstack.cloud.object_container:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ item }}"
|
|
||||||
read_ACL: ".r:*,.rlistings"
|
|
||||||
loop: "{{ all_test_containers }}"
|
|
||||||
|
|
||||||
- name: Create an object in all containers
|
|
||||||
openstack.cloud.object:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
container: "{{ item }}"
|
|
||||||
name: hello.txt
|
|
||||||
data: "{{ test_object_data }}"
|
|
||||||
loop: "{{ all_test_containers }}"
|
|
||||||
|
|
||||||
- name: Fetch single containers by name
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ item }}"
|
|
||||||
register: single_containers
|
|
||||||
loop: "{{ all_test_containers }}"
|
|
||||||
|
|
||||||
- name: Check that all fields are returned for single containers
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- (item.containers | length) == 1
|
|
||||||
- item.containers[0].name == item.item
|
|
||||||
- item.containers[0].bytes == (test_object_data | length)
|
|
||||||
- item.containers[0].read_ACL == ".r:*,.rlistings"
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- (expected_fields_single | difference(item.containers[0].keys()) | length) == 0
|
|
||||||
quiet: true
|
|
||||||
loop: "{{ single_containers.results }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.item }}"
|
|
||||||
|
|
||||||
- name: Fetch multiple containers by prefix
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
prefix: "{{ test_container_prefixed_prefix }}"
|
|
||||||
register: multiple_containers
|
|
||||||
|
|
||||||
- name: Check that the correct number of prefixed containers were returned
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- (multiple_containers.containers | length) == test_container_prefixed_num
|
|
||||||
fail_msg: >-
|
|
||||||
Incorrect number of containers found
|
|
||||||
(found {{ multiple_containers.containers | length }},
|
|
||||||
expected {{ test_container_prefixed_num }})
|
|
||||||
quiet: true
|
|
||||||
|
|
||||||
- name: Check that all prefixed containers exist
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- >-
|
|
||||||
(test_container_prefixed_prefix + '-' + (item | string))
|
|
||||||
in (multiple_containers.containers | map(attribute='name'))
|
|
||||||
fail_msg: "Container not found: {{ test_container_prefixed_prefix + '-' + (item | string) }}"
|
|
||||||
quiet: true
|
|
||||||
loop: "{{ range(test_container_prefixed_num) | list }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ test_container_prefixed_prefix + '-' + (item | string) }}"
|
|
||||||
|
|
||||||
- name: Check that the expected fields are returned for all prefixed containers
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- item.name.startswith(test_container_prefixed_prefix)
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- (expected_fields_multiple | difference(item.keys()) | length) == 0
|
|
||||||
quiet: true
|
|
||||||
loop: "{{ multiple_containers.containers | sort(attribute='name') }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.name }}"
|
|
||||||
|
|
||||||
- name: Fetch all containers
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
register: all_containers
|
|
||||||
|
|
||||||
- name: Check that all expected containers were returned
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- item in (all_containers.containers | map(attribute='name'))
|
|
||||||
fail_msg: "Container not found: {{ item }}"
|
|
||||||
quiet: true
|
|
||||||
loop: "{{ all_test_containers }}"
|
|
||||||
|
|
||||||
- name: Check that the expected fields are returned for all containers
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- (expected_fields_multiple | difference(item.keys()) | length) == 0
|
|
||||||
quiet: true
|
|
||||||
loop: "{{ all_containers.containers | selectattr('name', 'in', all_test_containers) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.name }}"
|
|
||||||
|
|
||||||
always:
|
|
||||||
|
|
||||||
- name: Delete all containers
|
|
||||||
openstack.cloud.object_container:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
delete_with_all_objects: true
|
|
||||||
loop: "{{ all_test_containers }}"
|
|
||||||
@@ -1,3 +1,6 @@
|
|||||||
|
binding_profile:
|
||||||
|
"pci_slot": "0000:03:11.1"
|
||||||
|
"physical_network": "provider"
|
||||||
expected_fields:
|
expected_fields:
|
||||||
- allowed_address_pairs
|
- allowed_address_pairs
|
||||||
- binding_host_id
|
- binding_host_id
|
||||||
|
|||||||
@@ -256,6 +256,27 @@
|
|||||||
state: absent
|
state: absent
|
||||||
name: ansible_security_group
|
name: ansible_security_group
|
||||||
|
|
||||||
|
- name: Create port (with binding profile)
|
||||||
|
openstack.cloud.port:
|
||||||
|
cloud: "{{ cloud }}"
|
||||||
|
state: present
|
||||||
|
name: "{{ port_name }}"
|
||||||
|
network: "{{ network_name }}"
|
||||||
|
binding_profile: "{{ binding_profile }}"
|
||||||
|
register: port
|
||||||
|
|
||||||
|
- name: Assert binding_profile exists in created port
|
||||||
|
assert:
|
||||||
|
that: "port.port['binding_profile']"
|
||||||
|
|
||||||
|
- debug: var=port
|
||||||
|
|
||||||
|
- name: Delete port (with binding profile)
|
||||||
|
openstack.cloud.port:
|
||||||
|
cloud: "{{ cloud }}"
|
||||||
|
state: absent
|
||||||
|
name: "{{ port_name }}"
|
||||||
|
|
||||||
- name: Delete subnet
|
- name: Delete subnet
|
||||||
openstack.cloud.subnet:
|
openstack.cloud.subnet:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -174,38 +174,6 @@
|
|||||||
that:
|
that:
|
||||||
- project.project.is_enabled == True
|
- project.project.is_enabled == True
|
||||||
|
|
||||||
- name: Update project to add new extra_specs
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_project
|
|
||||||
extra_specs:
|
|
||||||
is_enabled: True
|
|
||||||
another_tag: True
|
|
||||||
register: project
|
|
||||||
|
|
||||||
- name: Assert return values of project module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- project.project.is_enabled == True
|
|
||||||
- project.project.another_tag == True
|
|
||||||
|
|
||||||
- name: Update project to change existing extra_specs
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: ansible_project
|
|
||||||
extra_specs:
|
|
||||||
is_enabled: True
|
|
||||||
another_tag: False
|
|
||||||
register: project
|
|
||||||
|
|
||||||
- name: Assert return values of project module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- project.project.is_enabled == True
|
|
||||||
- project.project.another_tag == False
|
|
||||||
|
|
||||||
- name: Delete project
|
- name: Delete project
|
||||||
openstack.cloud.project:
|
openstack.cloud.project:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -28,9 +28,3 @@ test_compute_quota:
|
|||||||
ram: 5
|
ram: 5
|
||||||
server_group_members: 5
|
server_group_members: 5
|
||||||
server_groups: 5
|
server_groups: 5
|
||||||
test_load_balancer_quota:
|
|
||||||
load_balancers: 5
|
|
||||||
health_monitors: 5
|
|
||||||
listeners: 5
|
|
||||||
pools: 5
|
|
||||||
members: 5
|
|
||||||
|
|||||||
@@ -1,158 +0,0 @@
|
|||||||
---
|
|
||||||
- module_defaults:
|
|
||||||
group/openstack.cloud.openstack:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ test_project }}"
|
|
||||||
# Backward compatibility with Ansible 2.9
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ test_project }}"
|
|
||||||
openstack.cloud.quota:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ test_project }}"
|
|
||||||
block:
|
|
||||||
- name: Create test project
|
|
||||||
openstack.cloud.project:
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Clear quotas before tests
|
|
||||||
openstack.cloud.quota:
|
|
||||||
state: absent
|
|
||||||
register: default_quotas
|
|
||||||
|
|
||||||
- name: Set network quota
|
|
||||||
openstack.cloud.quota: "{{ test_network_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: quotas is changed
|
|
||||||
|
|
||||||
- name: Assert field values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.network[item.key] == item.value
|
|
||||||
loop: "{{ test_network_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Set network quota again
|
|
||||||
openstack.cloud.quota: "{{ test_network_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: quotas is not changed
|
|
||||||
|
|
||||||
- name: Set volume quotas
|
|
||||||
openstack.cloud.quota: "{{ test_volume_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: quotas is changed
|
|
||||||
|
|
||||||
- name: Assert field values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.volume[item.key] == item.value
|
|
||||||
loop: "{{ test_volume_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Set volume quotas again
|
|
||||||
openstack.cloud.quota: "{{ test_volume_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: quotas is not changed
|
|
||||||
|
|
||||||
- name: Set compute quotas
|
|
||||||
openstack.cloud.quota: "{{ test_compute_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: quotas is changed
|
|
||||||
|
|
||||||
- name: Assert field values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.compute[item.key] == item.value
|
|
||||||
loop: "{{ test_compute_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Set compute quotas again
|
|
||||||
openstack.cloud.quota: "{{ test_compute_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Set load_balancer quotas
|
|
||||||
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: quotas is changed
|
|
||||||
|
|
||||||
- name: Assert field values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.load_balancer[item.key] == item.value
|
|
||||||
loop: "{{ test_load_balancer_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Set load_balancer quotas again
|
|
||||||
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: quotas is not changed
|
|
||||||
|
|
||||||
- name: Unset all quotas
|
|
||||||
openstack.cloud.quota:
|
|
||||||
state: absent
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert defaults restore
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas == default_quotas.quotas
|
|
||||||
|
|
||||||
- name: Set all quotas at once
|
|
||||||
openstack.cloud.quota:
|
|
||||||
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: quotas is changed
|
|
||||||
|
|
||||||
- name: Assert volume values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.volume[item.key] == item.value
|
|
||||||
loop: "{{ test_volume_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Assert network values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.network[item.key] == item.value
|
|
||||||
loop: "{{ test_network_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Assert compute values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.compute[item.key] == item.value
|
|
||||||
loop: "{{ test_compute_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Assert load_balancer values
|
|
||||||
assert:
|
|
||||||
that: quotas.quotas.load_balancer[item.key] == item.value
|
|
||||||
loop: "{{ test_load_balancer_quota | dict2items }}"
|
|
||||||
|
|
||||||
- name: Set all quotas at once again
|
|
||||||
openstack.cloud.quota:
|
|
||||||
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: quotas is not changed
|
|
||||||
|
|
||||||
- name: Unset all quotas
|
|
||||||
openstack.cloud.quota:
|
|
||||||
state: absent
|
|
||||||
register: quotas
|
|
||||||
|
|
||||||
- name: Delete test project
|
|
||||||
openstack.cloud.project:
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
@@ -128,9 +128,4 @@
|
|||||||
|
|
||||||
- name: Delete test project
|
- name: Delete test project
|
||||||
openstack.cloud.project:
|
openstack.cloud.project:
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
- import_tasks: loadbalancer.yml
|
|
||||||
tags:
|
|
||||||
- loadbalancer
|
|
||||||
|
|
||||||
|
|||||||
@@ -14,15 +14,6 @@
|
|||||||
email: test@example.net
|
email: test@example.net
|
||||||
register: dns_zone
|
register: dns_zone
|
||||||
|
|
||||||
- name: Ensure recordset not present
|
|
||||||
openstack.cloud.recordset:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
zone: "{{ dns_zone.zone.name }}"
|
|
||||||
name: "{{ recordset_name }}"
|
|
||||||
recordset_type: "a"
|
|
||||||
records: "{{ records }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Create a recordset
|
- name: Create a recordset
|
||||||
openstack.cloud.recordset:
|
openstack.cloud.recordset:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -31,13 +22,11 @@
|
|||||||
recordset_type: "a"
|
recordset_type: "a"
|
||||||
records: "{{ records }}"
|
records: "{{ records }}"
|
||||||
register: recordset
|
register: recordset
|
||||||
until: '"PENDING" not in recordset["recordset"].status'
|
|
||||||
retries: 10
|
|
||||||
delay: 5
|
|
||||||
|
|
||||||
- name: Verify recordset info
|
- name: Verify recordset info
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
|
- recordset is changed
|
||||||
- recordset["recordset"].name == recordset_name
|
- recordset["recordset"].name == recordset_name
|
||||||
- recordset["recordset"].zone_name == dns_zone.zone.name
|
- recordset["recordset"].zone_name == dns_zone.zone.name
|
||||||
- recordset["recordset"].records | list | sort == records | list | sort
|
- recordset["recordset"].records | list | sort == records | list | sort
|
||||||
|
|||||||
@@ -45,6 +45,12 @@
|
|||||||
state: absent
|
state: absent
|
||||||
user: admin
|
user: admin
|
||||||
|
|
||||||
|
- name: Delete project
|
||||||
|
openstack.cloud.project:
|
||||||
|
cloud: "{{ cloud }}"
|
||||||
|
state: absent
|
||||||
|
name: ansible_project
|
||||||
|
|
||||||
- name: Create domain
|
- name: Create domain
|
||||||
openstack.cloud.identity_domain:
|
openstack.cloud.identity_domain:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -72,7 +78,6 @@
|
|||||||
state: present
|
state: present
|
||||||
name: ansible_user
|
name: ansible_user
|
||||||
domain: default
|
domain: default
|
||||||
register: specific_user
|
|
||||||
|
|
||||||
- name: Create user in specific domain
|
- name: Create user in specific domain
|
||||||
openstack.cloud.identity_user:
|
openstack.cloud.identity_user:
|
||||||
@@ -133,45 +138,6 @@
|
|||||||
that:
|
that:
|
||||||
- role_assignment is changed
|
- role_assignment is changed
|
||||||
|
|
||||||
- name: Assign role to user in specific domain on default domain project
|
|
||||||
openstack.cloud.role_assignment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
role: anotherrole
|
|
||||||
user: "{{ specific_user.user.id }}"
|
|
||||||
domain: default
|
|
||||||
project: ansible_project
|
|
||||||
register: role_assignment
|
|
||||||
|
|
||||||
- name: Assert role assignment
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- role_assignment is changed
|
|
||||||
|
|
||||||
- name: Revoke role to user in specific domain
|
|
||||||
openstack.cloud.role_assignment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
role: anotherrole
|
|
||||||
user: "{{ specific_user.user.id }}"
|
|
||||||
domain: default
|
|
||||||
project: ansible_project
|
|
||||||
state: absent
|
|
||||||
register: role_assignment
|
|
||||||
|
|
||||||
- name: Assert role assignment revoked
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- role_assignment is changed
|
|
||||||
|
|
||||||
- name: Assign role to user in specific domain on default domain project
|
|
||||||
openstack.cloud.role_assignment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
role: anotherrole
|
|
||||||
user: ansible_user
|
|
||||||
user_domain: "{{ specific_user.user.domain_id }}"
|
|
||||||
project: ansible_project
|
|
||||||
project_domain: default
|
|
||||||
register: role_assignment
|
|
||||||
|
|
||||||
- name: Delete group in default domain
|
- name: Delete group in default domain
|
||||||
openstack.cloud.identity_group:
|
openstack.cloud.identity_group:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -205,10 +171,3 @@
|
|||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
state: absent
|
state: absent
|
||||||
name: ansible_domain
|
name: ansible_domain
|
||||||
|
|
||||||
- name: Delete project
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: ansible_project
|
|
||||||
|
|
||||||
|
|||||||
@@ -558,46 +558,6 @@
|
|||||||
assert:
|
assert:
|
||||||
that: router is not changed
|
that: router is not changed
|
||||||
|
|
||||||
- name: Create router without explicit IP address
|
|
||||||
openstack.cloud.router:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ router_name }}"
|
|
||||||
enable_snat: false
|
|
||||||
interfaces:
|
|
||||||
- shade_subnet1
|
|
||||||
network: "{{ external_network_name }}"
|
|
||||||
external_fixed_ips:
|
|
||||||
- subnet_id: shade_subnet5
|
|
||||||
register: router
|
|
||||||
|
|
||||||
- name: Assert idempotent module
|
|
||||||
assert:
|
|
||||||
that: router is changed
|
|
||||||
|
|
||||||
- name: Update router without explicit IP address
|
|
||||||
openstack.cloud.router:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ router_name }}"
|
|
||||||
enable_snat: false
|
|
||||||
interfaces:
|
|
||||||
- shade_subnet1
|
|
||||||
network: "{{ external_network_name }}"
|
|
||||||
external_fixed_ips:
|
|
||||||
- subnet_id: shade_subnet5
|
|
||||||
register: router
|
|
||||||
|
|
||||||
- name: Assert idempotent module
|
|
||||||
assert:
|
|
||||||
that: router is not changed
|
|
||||||
|
|
||||||
- name: Delete router
|
|
||||||
openstack.cloud.router:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ router_name }}"
|
|
||||||
|
|
||||||
- name: Create router with simple interface
|
- name: Create router with simple interface
|
||||||
openstack.cloud.router:
|
openstack.cloud.router:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -760,5 +720,3 @@
|
|||||||
name: "{{ external_network_name }}"
|
name: "{{ external_network_name }}"
|
||||||
|
|
||||||
- include_tasks: shared_network.yml
|
- include_tasks: shared_network.yml
|
||||||
|
|
||||||
- include_tasks: shared_ext_network.yml
|
|
||||||
|
|||||||
@@ -1,99 +0,0 @@
|
|||||||
---
|
|
||||||
# Test the case where we have a shared external network in one project used as
|
|
||||||
# the gateway on a router in a second project.
|
|
||||||
# See https://bugs.launchpad.net/ansible-collections-openstack/+bug/2049658
|
|
||||||
|
|
||||||
- name: Create the first project
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "shared_ext_net_test_1"
|
|
||||||
description: "Project that contains the external network to be shared"
|
|
||||||
domain: default
|
|
||||||
is_enabled: True
|
|
||||||
register: project_1
|
|
||||||
|
|
||||||
- name: Create the external network to be shared
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ external_network_name }}"
|
|
||||||
project: "shared_ext_net_test_1"
|
|
||||||
external: true
|
|
||||||
shared: true
|
|
||||||
register: shared_ext_network
|
|
||||||
|
|
||||||
- name: Create subnet on external network
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
network_name: "{{ shared_ext_network.id }}"
|
|
||||||
name: "shared_ext_subnet"
|
|
||||||
project: "shared_ext_net_test_1"
|
|
||||||
cidr: "10.6.6.0/24"
|
|
||||||
register: shared_subnet
|
|
||||||
|
|
||||||
- name: Create the second project
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "shared_ext_net_test_2"
|
|
||||||
description: "Project that contains the subnet to be shared"
|
|
||||||
domain: default
|
|
||||||
is_enabled: True
|
|
||||||
register: project_2
|
|
||||||
|
|
||||||
- name: Create router with gateway on shared external network
|
|
||||||
openstack.cloud.router:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "shared_ext_net_test2_router"
|
|
||||||
project: "shared_ext_net_test_2"
|
|
||||||
network: "{{ external_network_name }}"
|
|
||||||
register: router
|
|
||||||
|
|
||||||
- name: Gather routers info
|
|
||||||
openstack.cloud.routers_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "shared_ext_net_test2_router"
|
|
||||||
register: routers
|
|
||||||
|
|
||||||
- name: Verify routers info
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- routers.routers.0.id == router.router.id
|
|
||||||
- routers.routers.0.external_gateway_info.external_fixed_ips|length == 1
|
|
||||||
|
|
||||||
- name: Delete router
|
|
||||||
openstack.cloud.router:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "shared_ext_net_test2_router"
|
|
||||||
project: "shared_ext_net_test_2"
|
|
||||||
|
|
||||||
- name: Delete subnet
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
network_name: "{{ shared_ext_network.id }}"
|
|
||||||
name: "shared_ext_subnet"
|
|
||||||
project: "shared_ext_net_test_1"
|
|
||||||
|
|
||||||
- name: Delete network
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ external_network_name }}"
|
|
||||||
project: "shared_ext_net_test_1"
|
|
||||||
|
|
||||||
- name: Delete project 2
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "shared_ext_net_test_2"
|
|
||||||
|
|
||||||
- name: Delete project 1
|
|
||||||
openstack.cloud.project:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "shared_ext_net_test_1"
|
|
||||||
@@ -399,9 +399,6 @@
|
|||||||
- port-id: "{{ port.port.id }}"
|
- port-id: "{{ port.port.id }}"
|
||||||
reuse_ips: false
|
reuse_ips: false
|
||||||
state: present
|
state: present
|
||||||
tags:
|
|
||||||
- first
|
|
||||||
- second
|
|
||||||
wait: true
|
wait: true
|
||||||
register: server
|
register: server
|
||||||
|
|
||||||
@@ -416,7 +413,6 @@
|
|||||||
|selectattr('OS-EXT-IPS:type', 'equalto', 'floating')
|
|selectattr('OS-EXT-IPS:type', 'equalto', 'floating')
|
||||||
|map(attribute='addr')
|
|map(attribute='addr')
|
||||||
|list|length == 0
|
|list|length == 0
|
||||||
- server.server.tags == ["first", "second"]
|
|
||||||
|
|
||||||
- name: Find all floating ips for debugging
|
- name: Find all floating ips for debugging
|
||||||
openstack.cloud.floating_ip_info:
|
openstack.cloud.floating_ip_info:
|
||||||
@@ -458,8 +454,6 @@
|
|||||||
- '{{ server_security_group }}'
|
- '{{ server_security_group }}'
|
||||||
- '{{ server_alt_security_group }}'
|
- '{{ server_alt_security_group }}'
|
||||||
state: present
|
state: present
|
||||||
tags:
|
|
||||||
- yellow
|
|
||||||
wait: true
|
wait: true
|
||||||
register: server_updated
|
register: server_updated
|
||||||
|
|
||||||
@@ -481,7 +475,6 @@
|
|||||||
- server_updated.server.addresses[server_network]|length == 2
|
- server_updated.server.addresses[server_network]|length == 2
|
||||||
- port.port.fixed_ips[0].ip_address in
|
- port.port.fixed_ips[0].ip_address in
|
||||||
server_updated.server.addresses[server_network]|map(attribute='addr')
|
server_updated.server.addresses[server_network]|map(attribute='addr')
|
||||||
- server_updated.server.tags == ['yellow']
|
|
||||||
# TODO: Verify networks once openstacksdk's issue #2010352 has been solved
|
# TODO: Verify networks once openstacksdk's issue #2010352 has been solved
|
||||||
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
||||||
#- server_updated.server.addresses.public|length > 0
|
#- server_updated.server.addresses.public|length > 0
|
||||||
@@ -516,8 +509,6 @@
|
|||||||
- '{{ server_security_group }}'
|
- '{{ server_security_group }}'
|
||||||
- '{{ server_alt_security_group }}'
|
- '{{ server_alt_security_group }}'
|
||||||
state: present
|
state: present
|
||||||
tags:
|
|
||||||
- yellow
|
|
||||||
wait: true
|
wait: true
|
||||||
register: server_updated_again
|
register: server_updated_again
|
||||||
|
|
||||||
@@ -526,7 +517,6 @@
|
|||||||
that:
|
that:
|
||||||
- server.server.id == server_updated_again.server.id
|
- server.server.id == server_updated_again.server.id
|
||||||
- server_updated_again is not changed
|
- server_updated_again is not changed
|
||||||
- server_updated_again.server.tags == ['yellow']
|
|
||||||
|
|
||||||
# TODO: Drop failure test once openstacksdk's issue #2010352 has been solved
|
# TODO: Drop failure test once openstacksdk's issue #2010352 has been solved
|
||||||
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
||||||
|
|||||||
@@ -460,14 +460,19 @@
|
|||||||
register: server
|
register: server
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Assert shelve offload server
|
||||||
|
assert:
|
||||||
|
that:
|
||||||
|
- ((server is success)
|
||||||
|
or (server is not success
|
||||||
|
and "Cannot 'shelveOffload' instance" in server.msg
|
||||||
|
and "while it is in vm_state shelved_offloaded" in server.msg))
|
||||||
|
|
||||||
- name: Get info about server
|
- name: Get info about server
|
||||||
openstack.cloud.server_info:
|
openstack.cloud.server_info:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
server: ansible_server
|
server: ansible_server
|
||||||
register: servers
|
register: servers
|
||||||
until: servers.servers.0.task_state == none
|
|
||||||
retries: 30
|
|
||||||
delay: 10
|
|
||||||
|
|
||||||
- name: Ensure status for server is SHELVED_OFFLOADED
|
- name: Ensure status for server is SHELVED_OFFLOADED
|
||||||
# no change if server has been offloaded automatically after first shelve command
|
# no change if server has been offloaded automatically after first shelve command
|
||||||
@@ -553,7 +558,7 @@
|
|||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- servers.servers.0.status == 'ACTIVE'
|
- servers.servers.0.status == 'ACTIVE'
|
||||||
- server is changed
|
- server is not changed
|
||||||
|
|
||||||
- name: Reboot server (HARD)
|
- name: Reboot server (HARD)
|
||||||
openstack.cloud.server_action:
|
openstack.cloud.server_action:
|
||||||
@@ -573,7 +578,7 @@
|
|||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- servers.servers.0.status == 'ACTIVE'
|
- servers.servers.0.status == 'ACTIVE'
|
||||||
- server is changed
|
- server is not changed
|
||||||
|
|
||||||
- name: Delete server
|
- name: Delete server
|
||||||
openstack.cloud.server:
|
openstack.cloud.server:
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
share_backend_name: GENERIC_BACKEND
|
|
||||||
share_type_name: test_share_type
|
|
||||||
share_type_description: Test share type for CI
|
|
||||||
share_type_alt_description: Changed test share type
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create share type
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
extra_specs:
|
|
||||||
share_backend_name: "{{ share_backend_name }}"
|
|
||||||
snapshot_support: true
|
|
||||||
create_share_from_snapshot_support: true
|
|
||||||
description: "{{ share_type_description }}"
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check created share type
|
|
||||||
vars:
|
|
||||||
the_share_type: "{{ the_result.share_type }}"
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- "'id' in the_result.share_type"
|
|
||||||
- the_share_type.description == share_type_description
|
|
||||||
- the_share_type.is_public == True
|
|
||||||
- the_share_type.name == share_type_name
|
|
||||||
- the_share_type.extra_specs['share_backend_name'] == share_backend_name
|
|
||||||
- the_share_type.extra_specs['snapshot_support'] == "True"
|
|
||||||
- the_share_type.extra_specs['create_share_from_snapshot_support'] == "True"
|
|
||||||
success_msg: >-
|
|
||||||
Created share type: {{ the_result.share_type.id }},
|
|
||||||
Name: {{ the_result.share_type.name }},
|
|
||||||
Description: {{ the_result.share_type.description }}
|
|
||||||
|
|
||||||
- name: Test share type info module
|
|
||||||
openstack.cloud.share_type_info:
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
register: info_result
|
|
||||||
|
|
||||||
- name: Check share type info result
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- info_result.share_type.id == the_result.share_type.id
|
|
||||||
- info_result.share_type.name == share_type_name
|
|
||||||
- info_result.share_type.description == share_type_description
|
|
||||||
success_msg: "Share type info retrieved successfully"
|
|
||||||
|
|
||||||
- name: Test, check idempotency
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
extra_specs:
|
|
||||||
share_backend_name: "{{ share_backend_name }}"
|
|
||||||
snapshot_support: true
|
|
||||||
create_share_from_snapshot_support: true
|
|
||||||
description: "{{ share_type_description }}"
|
|
||||||
is_public: true
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check result.changed is false
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- the_result.changed == false
|
|
||||||
success_msg: "Request with the same details lead to no changes"
|
|
||||||
|
|
||||||
- name: Add extra spec
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
state: present
|
|
||||||
extra_specs:
|
|
||||||
share_backend_name: "{{ share_backend_name }}"
|
|
||||||
snapshot_support: true
|
|
||||||
create_share_from_snapshot_support: true
|
|
||||||
some_spec: fake_spec
|
|
||||||
description: "{{ share_type_alt_description }}"
|
|
||||||
is_public: true
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check share type extra spec
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- "'some_spec' in the_result.share_type.extra_specs"
|
|
||||||
- the_result.share_type.extra_specs["some_spec"] == "fake_spec"
|
|
||||||
- the_result.share_type.description == share_type_alt_description
|
|
||||||
success_msg: >-
|
|
||||||
New extra specs: {{ the_result.share_type.extra_specs }}
|
|
||||||
|
|
||||||
- name: Remove extra spec by updating with reduced set
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
state: present
|
|
||||||
extra_specs:
|
|
||||||
share_backend_name: "{{ share_backend_name }}"
|
|
||||||
snapshot_support: true
|
|
||||||
create_share_from_snapshot_support: true
|
|
||||||
description: "{{ share_type_alt_description }}"
|
|
||||||
is_public: true
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check extra spec was removed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- "'some_spec' not in the_result.share_type.extra_specs"
|
|
||||||
success_msg: "Extra spec was successfully removed"
|
|
||||||
|
|
||||||
- name: Delete share type
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
state: absent
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check deletion was successful
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- the_result.changed == true
|
|
||||||
success_msg: "Share type deleted successfully"
|
|
||||||
|
|
||||||
- name: Test deletion idempotency
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ share_type_name }}"
|
|
||||||
state: absent
|
|
||||||
register: the_result
|
|
||||||
|
|
||||||
- name: Check deletion idempotency
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- the_result.changed == false
|
|
||||||
success_msg: "Deletion idempotency works correctly"
|
|
||||||
@@ -25,4 +25,3 @@ expected_fields:
|
|||||||
- updated_at
|
- updated_at
|
||||||
- use_default_subnet_pool
|
- use_default_subnet_pool
|
||||||
subnet_name: shade_subnet
|
subnet_name: shade_subnet
|
||||||
segment_name: example_segment
|
|
||||||
|
|||||||
@@ -17,20 +17,10 @@
|
|||||||
name: "{{ network_name }}"
|
name: "{{ network_name }}"
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Create network segment {{ segment_name }}
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
network: "{{ network_name }}"
|
|
||||||
network_type: "vxlan"
|
|
||||||
segmentation_id: 1000
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }}
|
- name: Create subnet {{ subnet_name }} on network {{ network_name }}
|
||||||
openstack.cloud.subnet:
|
openstack.cloud.subnet:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
network_name: "{{ network_name }}"
|
network_name: "{{ network_name }}"
|
||||||
network_segment: "{{ segment_name }}"
|
|
||||||
name: "{{ subnet_name }}"
|
name: "{{ subnet_name }}"
|
||||||
state: present
|
state: present
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
||||||
@@ -152,48 +142,6 @@
|
|||||||
assert:
|
assert:
|
||||||
that: subnet is not changed
|
that: subnet is not changed
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.0.0/24
|
|
||||||
disable_gateway_ip: true
|
|
||||||
register: subnet
|
|
||||||
|
|
||||||
- name: Assert changed
|
|
||||||
assert:
|
|
||||||
that: subnet is changed
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.0.0/24
|
|
||||||
disable_gateway_ip: true
|
|
||||||
register: subnet
|
|
||||||
|
|
||||||
- name: Assert not changed
|
|
||||||
assert:
|
|
||||||
that: subnet is not changed
|
|
||||||
|
|
||||||
- name: Delete subnet {{ subnet_name }} again
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: absent
|
|
||||||
register: subnet
|
|
||||||
|
|
||||||
- name: Delete network segment {{ segment_name }}
|
|
||||||
openstack.cloud.network_segment:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ segment_name }}"
|
|
||||||
network: "{{ network_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Delete network {{ network_name }}
|
- name: Delete network {{ network_name }}
|
||||||
openstack.cloud.network:
|
openstack.cloud.network:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
@@ -202,6 +150,3 @@
|
|||||||
|
|
||||||
- name: Subnet Allocation
|
- name: Subnet Allocation
|
||||||
include_tasks: subnet-allocation.yml
|
include_tasks: subnet-allocation.yml
|
||||||
|
|
||||||
- name: Subnet Allocations from Subnet Pool
|
|
||||||
include_tasks: subnet-pool.yaml
|
|
||||||
|
|||||||
@@ -62,81 +62,6 @@
|
|||||||
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2'
|
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2'
|
||||||
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8'
|
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8'
|
||||||
|
|
||||||
- name: Delete subnet {{ subnet_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.0.0/24
|
|
||||||
gateway_ip: 192.168.0.1
|
|
||||||
allocation_pools:
|
|
||||||
- start: 192.168.0.2
|
|
||||||
end: 192.168.0.4
|
|
||||||
- start: 192.168.0.10
|
|
||||||
end: 192.168.0.12
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} again
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.0.0/24
|
|
||||||
gateway_ip: 192.168.0.1
|
|
||||||
allocation_pools:
|
|
||||||
- start: 192.168.0.2
|
|
||||||
end: 192.168.0.4
|
|
||||||
- start: 192.168.0.10
|
|
||||||
end: 192.168.0.12
|
|
||||||
register: idem2
|
|
||||||
|
|
||||||
- name: Update subnet {{ subnet_name }} allocation pools
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.0.0/24
|
|
||||||
gateway_ip: 192.168.0.1
|
|
||||||
allocation_pools:
|
|
||||||
- start: 192.168.0.2
|
|
||||||
end: 192.168.0.8
|
|
||||||
- start: 192.168.0.10
|
|
||||||
end: 192.168.0.16
|
|
||||||
|
|
||||||
- name: Get Subnet Info
|
|
||||||
openstack.cloud.subnets_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
register: subnet_result
|
|
||||||
|
|
||||||
# TODO(sshnaidm): Uncomment this section when the issue with allocation_pools is fixed
|
|
||||||
# - name: Verify Subnet Allocation Pools Exist
|
|
||||||
# assert:
|
|
||||||
# that:
|
|
||||||
# - idem2 is not changed
|
|
||||||
# - subnet_result.subnets is defined
|
|
||||||
# - subnet_result.subnets | length == 1
|
|
||||||
# - subnet_result.subnets[0].allocation_pools is defined
|
|
||||||
# - subnet_result.subnets[0].allocation_pools | length == 2
|
|
||||||
|
|
||||||
# - name: Verify Subnet Allocation Pools
|
|
||||||
# assert:
|
|
||||||
# that:
|
|
||||||
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8') or
|
|
||||||
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.16')
|
|
||||||
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.8') or
|
|
||||||
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.16')
|
|
||||||
|
|
||||||
- name: Delete subnet {{ subnet_name }}
|
- name: Delete subnet {{ subnet_name }}
|
||||||
openstack.cloud.subnet:
|
openstack.cloud.subnet:
|
||||||
cloud: "{{ cloud }}"
|
cloud: "{{ cloud }}"
|
||||||
|
|||||||
@@ -1,168 +0,0 @@
|
|||||||
---
|
|
||||||
# This test cover case when subnet is constructed
|
|
||||||
# with few prefixes and neutron API is required
|
|
||||||
# CIDR parameter to be used together with subnet pool.
|
|
||||||
|
|
||||||
- name: Create network {{ network_name }}
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ network_name }}"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Create address_scope
|
|
||||||
openstack.cloud.address_scope:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ address_scope_name }}"
|
|
||||||
shared: false
|
|
||||||
ip_version: "4"
|
|
||||||
register: create_address_scope
|
|
||||||
|
|
||||||
- name: Create subnet pool
|
|
||||||
openstack.cloud.subnet_pool:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_pool_name }}"
|
|
||||||
is_shared: false
|
|
||||||
address_scope: "{{ address_scope_name }}"
|
|
||||||
prefixes:
|
|
||||||
- 192.168.0.0/24
|
|
||||||
- 192.168.42.0/24
|
|
||||||
register: subnet_pool
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
|
|
||||||
ip_version: 4
|
|
||||||
subnet_pool: "{{ subnet_pool_name }}"
|
|
||||||
gateway_ip: 192.168.42.1
|
|
||||||
allocation_pool_start: 192.168.42.2
|
|
||||||
allocation_pool_end: 192.168.42.4
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.42.0/24
|
|
||||||
ip_version: 4
|
|
||||||
subnet_pool: "{{ subnet_pool_name }}"
|
|
||||||
gateway_ip: 192.168.42.1
|
|
||||||
allocation_pool_start: 192.168.42.2
|
|
||||||
allocation_pool_end: 192.168.42.4
|
|
||||||
register: idem1
|
|
||||||
|
|
||||||
- name: Get Subnet Info
|
|
||||||
openstack.cloud.subnets_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
register: subnet_result
|
|
||||||
|
|
||||||
- name: Verify Subnet Allocation Pools Exist
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- idem1 is not changed
|
|
||||||
- subnet_result.subnets is defined
|
|
||||||
- subnet_result.subnets | length == 1
|
|
||||||
- subnet_result.subnets[0].allocation_pools is defined
|
|
||||||
- subnet_result.subnets[0].allocation_pools | length == 1
|
|
||||||
|
|
||||||
- name: Verify Subnet Allocation Pools
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2'
|
|
||||||
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4'
|
|
||||||
|
|
||||||
- name: Delete subnet {{ subnet_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
|
|
||||||
ip_version: 4
|
|
||||||
subnet_pool: "{{ subnet_pool_name }}"
|
|
||||||
gateway_ip: 192.168.42.1
|
|
||||||
allocation_pools:
|
|
||||||
- start: 192.168.42.2
|
|
||||||
end: 192.168.42.4
|
|
||||||
- start: 192.168.42.6
|
|
||||||
end: 192.168.42.8
|
|
||||||
|
|
||||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
network_name: "{{ network_name }}"
|
|
||||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: present
|
|
||||||
cidr: 192.168.42.0/24
|
|
||||||
ip_version: 4
|
|
||||||
subnet_pool: "{{ subnet_pool_name }}"
|
|
||||||
gateway_ip: 192.168.42.1
|
|
||||||
allocation_pools:
|
|
||||||
- start: 192.168.42.2
|
|
||||||
end: 192.168.42.4
|
|
||||||
- start: 192.168.42.6
|
|
||||||
end: 192.168.42.8
|
|
||||||
register: idem2
|
|
||||||
|
|
||||||
- name: Get Subnet Info
|
|
||||||
openstack.cloud.subnets_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
register: subnet_result
|
|
||||||
|
|
||||||
# NOT(gtema) Temporarily disable the check to land other gate fix
|
|
||||||
#- name: Verify Subnet Allocation Pools Exist
|
|
||||||
# assert:
|
|
||||||
# that:
|
|
||||||
# - idem2 is not changed
|
|
||||||
# - subnet_result.subnets is defined
|
|
||||||
# - subnet_result.subnets | length == 1
|
|
||||||
# - subnet_result.subnets[0].allocation_pools is defined
|
|
||||||
# - subnet_result.subnets[0].allocation_pools | length == 2
|
|
||||||
#
|
|
||||||
#- name: Verify Subnet Allocation Pools
|
|
||||||
# assert:
|
|
||||||
# that:
|
|
||||||
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4') or
|
|
||||||
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.8')
|
|
||||||
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.4') or
|
|
||||||
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.8')
|
|
||||||
|
|
||||||
- name: Delete subnet {{ subnet_name }}
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Delete created subnet pool
|
|
||||||
openstack.cloud.subnet_pool:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ subnet_pool_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Delete created address scope
|
|
||||||
openstack.cloud.address_scope:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ address_scope_name }}"
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Delete network {{ network_name }}
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
name: "{{ network_name }}"
|
|
||||||
state: absent
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
trait_name: CUSTOM_ANSIBLE_TRAIT
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create trait
|
|
||||||
openstack.cloud.trait:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
id: "{{ trait_name }}"
|
|
||||||
until: result is success
|
|
||||||
retries: 5
|
|
||||||
delay: 20
|
|
||||||
register: result
|
|
||||||
|
|
||||||
- name: Assert trait
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- "'name' in result.trait"
|
|
||||||
- "result.trait.id == trait_name"
|
|
||||||
|
|
||||||
- name: Remove trait
|
|
||||||
openstack.cloud.trait:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
id: "{{ trait_name }}"
|
|
||||||
register: result1
|
|
||||||
|
|
||||||
- name: Assert trait removed
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- "'trait' not in result1"
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
expected_fields:
|
|
||||||
- created_at
|
|
||||||
- description
|
|
||||||
- id
|
|
||||||
- is_admin_state_up
|
|
||||||
- name
|
|
||||||
- port_id
|
|
||||||
- project_id
|
|
||||||
- revision_number
|
|
||||||
- status
|
|
||||||
- sub_ports
|
|
||||||
- tags
|
|
||||||
- tenant_id
|
|
||||||
- updated_at
|
|
||||||
trunk_name: ansible_trunk
|
|
||||||
parent_network_name: ansible_parent_port_network
|
|
||||||
parent_subnet_name: ansible_parent_port_subnet
|
|
||||||
parent_port_name: ansible_parent_port
|
|
||||||
subport_network_name: ansible_subport_network
|
|
||||||
subport_subnet_name: ansible_subport_subnet
|
|
||||||
subport_name: ansible_subport
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create parent network
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ parent_network_name }}"
|
|
||||||
external: true
|
|
||||||
register: parent_network
|
|
||||||
|
|
||||||
- name: Create parent subnet
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ parent_subnet_name }}"
|
|
||||||
network_name: "{{ parent_network_name }}"
|
|
||||||
cidr: 10.5.5.0/24
|
|
||||||
register: parent_subnet
|
|
||||||
|
|
||||||
- name: Create parent port
|
|
||||||
openstack.cloud.port:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ parent_port_name }}"
|
|
||||||
network: "{{ parent_network_name }}"
|
|
||||||
fixed_ips:
|
|
||||||
- ip_address: 10.5.5.69
|
|
||||||
register: parent_port
|
|
||||||
|
|
||||||
- name: Create subport network
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ subport_network_name }}"
|
|
||||||
external: true
|
|
||||||
register: subport_network
|
|
||||||
|
|
||||||
- name: Create subport subnet
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ subport_subnet_name }}"
|
|
||||||
network_name: "{{ subport_network_name }}"
|
|
||||||
cidr: 10.5.6.0/24
|
|
||||||
register: subport_subnet
|
|
||||||
|
|
||||||
- name: Create subport
|
|
||||||
openstack.cloud.port:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ subport_name }}"
|
|
||||||
network: "{{ subport_network_name }}"
|
|
||||||
fixed_ips:
|
|
||||||
- ip_address: 10.5.6.55
|
|
||||||
register: subport
|
|
||||||
|
|
||||||
- name: Create trunk
|
|
||||||
openstack.cloud.trunk:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ trunk_name }}"
|
|
||||||
port: "{{ parent_port_name }}"
|
|
||||||
register: trunk
|
|
||||||
|
|
||||||
- debug: var=trunk
|
|
||||||
|
|
||||||
- name: assert return values of trunk module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(trunk.trunk.keys())|length == 0
|
|
||||||
|
|
||||||
- name: Add subport to trunk
|
|
||||||
openstack.cloud.trunk:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ trunk_name }}"
|
|
||||||
port: "{{ parent_port_name }}"
|
|
||||||
sub_ports:
|
|
||||||
- port: "{{ subport_name }}"
|
|
||||||
segmentation_type: vlan
|
|
||||||
segmentation_id: 123
|
|
||||||
|
|
||||||
- name: Update subport from trunk
|
|
||||||
openstack.cloud.trunk:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
name: "{{ trunk_name }}"
|
|
||||||
port: "{{ parent_port_name }}"
|
|
||||||
sub_ports: []
|
|
||||||
|
|
||||||
- name: Delete trunk
|
|
||||||
openstack.cloud.trunk:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ trunk_name }}"
|
|
||||||
|
|
||||||
- name: Delete subport
|
|
||||||
openstack.cloud.port:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ subport_name }}"
|
|
||||||
|
|
||||||
- name: Delete subport subnet
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ subport_subnet_name }}"
|
|
||||||
|
|
||||||
- name: Delete subport network
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ subport_network_name }}"
|
|
||||||
|
|
||||||
- name: Delete parent port
|
|
||||||
openstack.cloud.port:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ parent_port_name }}"
|
|
||||||
|
|
||||||
- name: Delete parent subnet
|
|
||||||
openstack.cloud.subnet:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ parent_subnet_name }}"
|
|
||||||
|
|
||||||
- name: Delete parent network
|
|
||||||
openstack.cloud.network:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ parent_network_name }}"
|
|
||||||
@@ -37,7 +37,7 @@
|
|||||||
- name: Check info
|
- name: Check info
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- info1.volumes | selectattr("id", "equalto", info.volumes.0.id) | list | length == 1
|
- info1.volumes | selectattr("id", "equalto", "{{ info.volumes.0.id }}") | list | length == 1
|
||||||
- info1.volumes.0.name == 'ansible_test'
|
- info1.volumes.0.name == 'ansible_test'
|
||||||
- info1.volumes.0.status == None
|
- info1.volumes.0.status == None
|
||||||
|
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
test_volume: ansible_test_volume
|
|
||||||
managed_volume: managed_test_volume
|
|
||||||
expected_fields:
|
|
||||||
- attachments
|
|
||||||
- availability_zone
|
|
||||||
- consistency_group_id
|
|
||||||
- created_at
|
|
||||||
- updated_at
|
|
||||||
- description
|
|
||||||
- extended_replication_status
|
|
||||||
- group_id
|
|
||||||
- host
|
|
||||||
- image_id
|
|
||||||
- is_bootable
|
|
||||||
- is_encrypted
|
|
||||||
- is_multiattach
|
|
||||||
- migration_id
|
|
||||||
- migration_status
|
|
||||||
- project_id
|
|
||||||
- replication_driver_data
|
|
||||||
- replication_status
|
|
||||||
- scheduler_hints
|
|
||||||
- size
|
|
||||||
- snapshot_id
|
|
||||||
- source_volume_id
|
|
||||||
- status
|
|
||||||
- user_id
|
|
||||||
- volume_image_metadata
|
|
||||||
- volume_type
|
|
||||||
- id
|
|
||||||
- name
|
|
||||||
- metadata
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Create volume
|
|
||||||
openstack.cloud.volume:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
size: 1
|
|
||||||
name: "{{ test_volume }}"
|
|
||||||
description: Test volume
|
|
||||||
register: vol
|
|
||||||
|
|
||||||
- assert:
|
|
||||||
that: item in vol.volume
|
|
||||||
loop: "{{ expected_fields }}"
|
|
||||||
|
|
||||||
- name: Unmanage volume
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ vol.volume.id }}"
|
|
||||||
|
|
||||||
- name: Unmanage volume again
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ vol.volume.id }}"
|
|
||||||
register: unmanage_idempotency
|
|
||||||
|
|
||||||
- assert:
|
|
||||||
that:
|
|
||||||
- unmanage_idempotency is not changed
|
|
||||||
|
|
||||||
- name: Manage volume
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
source_name: volume-{{ vol.volume.id }}
|
|
||||||
host: "{{ vol.volume.host }}"
|
|
||||||
name: "{{ managed_volume }}"
|
|
||||||
register: new_vol
|
|
||||||
|
|
||||||
- assert:
|
|
||||||
that:
|
|
||||||
- new_vol.volume.name == managed_volume
|
|
||||||
|
|
||||||
- name: Manage volume again
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: present
|
|
||||||
source_name: volume-{{ vol.volume.id }}
|
|
||||||
host: "{{ vol.volume.host }}"
|
|
||||||
name: "{{ managed_volume }}"
|
|
||||||
register: vol_idempotency
|
|
||||||
|
|
||||||
- assert:
|
|
||||||
that:
|
|
||||||
- vol_idempotency is not changed
|
|
||||||
|
|
||||||
- pause:
|
|
||||||
seconds: 10
|
|
||||||
|
|
||||||
- name: Delete volume
|
|
||||||
openstack.cloud.volume:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
state: absent
|
|
||||||
name: "{{ managed_volume }}"
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
expected_fields:
|
|
||||||
- availability_zone
|
|
||||||
- binary
|
|
||||||
- disabled_reason
|
|
||||||
- host
|
|
||||||
- name
|
|
||||||
- state
|
|
||||||
- status
|
|
||||||
- updated_at
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Fetch volume services
|
|
||||||
openstack.cloud.volume_service_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
register: volume_services
|
|
||||||
|
|
||||||
- name: Assert return values of volume_service_info module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- volume_services.volume_services | length > 0
|
|
||||||
# allow new fields to be introduced but prevent fields from being removed
|
|
||||||
- expected_fields|difference(volume_services.volume_services[0].keys())|length == 0
|
|
||||||
|
|
||||||
- name: Fetch volume services with filters
|
|
||||||
openstack.cloud.volume_service_info:
|
|
||||||
cloud: "{{ cloud }}"
|
|
||||||
binary: "cinder-volume"
|
|
||||||
register: volume_services
|
|
||||||
|
|
||||||
- name: Assert return values of volume_service_info module
|
|
||||||
assert:
|
|
||||||
that:
|
|
||||||
- volume_services.volume_services | length > 0
|
|
||||||
@@ -75,10 +75,10 @@ ansible-galaxy collection install --requirements-file ci/requirements.yml
|
|||||||
if [ -z "$PIP_INSTALL" ]; then
|
if [ -z "$PIP_INSTALL" ]; then
|
||||||
tox -ebuild
|
tox -ebuild
|
||||||
ansible-galaxy collection install "$(find build_artifact/ -maxdepth 1 -name 'openstack-cloud-*')" --force
|
ansible-galaxy collection install "$(find build_artifact/ -maxdepth 1 -name 'openstack-cloud-*')" --force
|
||||||
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATH
|
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
|
||||||
else
|
else
|
||||||
pip freeze | grep ansible-collections-openstack
|
pip freeze | grep ansible-collections-openstack
|
||||||
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATH
|
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We need to source the current tox environment so that Ansible will
|
# We need to source the current tox environment so that Ansible will
|
||||||
@@ -124,17 +124,12 @@ if [ ! -e /etc/magnum ]; then
|
|||||||
tag_opt+=" --skip-tags coe_cluster,coe_cluster_template"
|
tag_opt+=" --skip-tags coe_cluster,coe_cluster_template"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! systemctl is-enabled devstack@m-api.service 2>&1; then
|
|
||||||
# Skip share_type tasks if Manila is not available
|
|
||||||
tag_opt+=" --skip-tags share_type"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd ci/
|
cd ci/
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
ANSIBLE_COLLECTIONS_PATH=$TEST_COLLECTIONS_PATHS ansible-playbook \
|
ANSIBLE_COLLECTIONS_PATHS=$TEST_COLLECTIONS_PATHS ansible-playbook \
|
||||||
-vvv ./run-collection.yml \
|
-vvv ./run-collection.yml \
|
||||||
-e "sdk_version=${SDK_VER} cloud=${CLOUD} cloud_alt=${CLOUD_ALT} ${ANSIBLE_VARS}" \
|
-e "sdk_version=${SDK_VER} cloud=${CLOUD} cloud_alt=${CLOUD_ALT} ${ANSIBLE_VARS}" \
|
||||||
${tag_opt} 2>&1 | sudo tee /opt/stack/logs/test_output.log
|
${tag_opt} 2>&1 | sudo tee /opt/stack/logs/test_output.log
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
|
|
||||||
roles:
|
roles:
|
||||||
- { role: address_scope, tags: address_scope }
|
- { role: address_scope, tags: address_scope }
|
||||||
- { role: application_credential, tags: application_credential }
|
|
||||||
- { role: auth, tags: auth }
|
- { role: auth, tags: auth }
|
||||||
- { role: catalog_service, tags: catalog_service }
|
- { role: catalog_service, tags: catalog_service }
|
||||||
- { role: coe_cluster, tags: coe_cluster }
|
- { role: coe_cluster, tags: coe_cluster }
|
||||||
@@ -32,14 +31,10 @@
|
|||||||
- { role: loadbalancer, tags: loadbalancer }
|
- { role: loadbalancer, tags: loadbalancer }
|
||||||
- { role: logging, tags: logging }
|
- { role: logging, tags: logging }
|
||||||
- { role: network, tags: network }
|
- { role: network, tags: network }
|
||||||
- { role: network_segment, tags: network_segment }
|
|
||||||
- { role: neutron_rbac_policy, tags: neutron_rbac_policy }
|
- { role: neutron_rbac_policy, tags: neutron_rbac_policy }
|
||||||
- { role: object, tags: object }
|
- { role: object, tags: object }
|
||||||
- { role: object_container, tags: object_container }
|
- { role: object_container, tags: object_container }
|
||||||
- { role: object_containers_info, tags: object_containers_info }
|
|
||||||
- { role: port, tags: port }
|
- { role: port, tags: port }
|
||||||
- { role: trait, tags: trait }
|
|
||||||
- { role: trunk, tags: trunk }
|
|
||||||
- { role: project, tags: project }
|
- { role: project, tags: project }
|
||||||
- { role: quota, tags: quota }
|
- { role: quota, tags: quota }
|
||||||
- { role: recordset, tags: recordset }
|
- { role: recordset, tags: recordset }
|
||||||
@@ -54,14 +49,11 @@
|
|||||||
- { role: server_group, tags: server_group }
|
- { role: server_group, tags: server_group }
|
||||||
- { role: server_metadata, tags: server_metadata }
|
- { role: server_metadata, tags: server_metadata }
|
||||||
- { role: server_volume, tags: server_volume }
|
- { role: server_volume, tags: server_volume }
|
||||||
- { role: share_type, tags: share_type }
|
|
||||||
- { role: stack, tags: stack }
|
- { role: stack, tags: stack }
|
||||||
- { role: subnet, tags: subnet }
|
- { role: subnet, tags: subnet }
|
||||||
- { role: subnet_pool, tags: subnet_pool }
|
- { role: subnet_pool, tags: subnet_pool }
|
||||||
- { role: volume, tags: volume }
|
- { role: volume, tags: volume }
|
||||||
- { role: volume_type, tags: volume_type }
|
- { role: volume_type, tags: volume_type }
|
||||||
- { role: volume_backup, tags: volume_backup }
|
- { role: volume_backup, tags: volume_backup }
|
||||||
- { role: volume_manage, tags: volume_manage }
|
|
||||||
- { role: volume_service, tags: volume_service }
|
|
||||||
- { role: volume_snapshot, tags: volume_snapshot }
|
- { role: volume_snapshot, tags: volume_snapshot }
|
||||||
- { role: volume_type_access, tags: volume_type_access }
|
- { role: volume_type_access, tags: volume_type_access }
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ For hacking on the Ansible OpenStack collection it helps to [prepare a DevStack
|
|||||||
|
|
||||||
## Hosting
|
## Hosting
|
||||||
|
|
||||||
* [Bug tracker][bugtracker]
|
* [Bug tracker][storyboard]
|
||||||
* [Mailing list `openstack-discuss@lists.openstack.org`][openstack-discuss].
|
* [Mailing list `openstack-discuss@lists.openstack.org`][openstack-discuss].
|
||||||
Prefix subjects with `[aoc]` or `[aco]` for faster responses.
|
Prefix subjects with `[aoc]` or `[aco]` for faster responses.
|
||||||
* [Code Hosting][opendev-a-c-o]
|
* [Code Hosting][opendev-a-c-o]
|
||||||
@@ -188,4 +188,4 @@ Read [Release Guide](releasing.md) on how to publish new releases.
|
|||||||
[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
|
[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
|
||||||
[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
|
[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
|
||||||
[openstacksdk]: https://opendev.org/openstack/openstacksdk
|
[openstacksdk]: https://opendev.org/openstack/openstacksdk
|
||||||
[bugtracker]: https://bugs.launchpad.net/ansible-collections-openstack
|
[storyboard]: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ dependencies: {}
|
|||||||
repository: https://opendev.org/openstack/ansible-collections-openstack
|
repository: https://opendev.org/openstack/ansible-collections-openstack
|
||||||
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
||||||
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
||||||
issues: https://bugs.launchpad.net/ansible-collections-openstack
|
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||||
build_ignore:
|
build_ignore:
|
||||||
- "*.tar.gz"
|
- "*.tar.gz"
|
||||||
- build_artifact
|
- build_artifact
|
||||||
@@ -32,4 +32,4 @@ build_ignore:
|
|||||||
- .vscode
|
- .vscode
|
||||||
- ansible_collections_openstack.egg-info
|
- ansible_collections_openstack.egg-info
|
||||||
- changelogs
|
- changelogs
|
||||||
version: 2.5.0
|
version: 2.2.0
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ dependencies: {}
|
|||||||
repository: https://opendev.org/openstack/ansible-collections-openstack
|
repository: https://opendev.org/openstack/ansible-collections-openstack
|
||||||
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
||||||
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
||||||
issues: https://bugs.launchpad.net/ansible-collections-openstack
|
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||||
build_ignore:
|
build_ignore:
|
||||||
- "*.tar.gz"
|
- "*.tar.gz"
|
||||||
- build_artifact
|
- build_artifact
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ requires_ansible: ">=2.8"
|
|||||||
action_groups:
|
action_groups:
|
||||||
openstack:
|
openstack:
|
||||||
- address_scope
|
- address_scope
|
||||||
- application_credential
|
|
||||||
- auth
|
- auth
|
||||||
- baremetal_deploy_template
|
- baremetal_deploy_template
|
||||||
- baremetal_inspect
|
- baremetal_inspect
|
||||||
@@ -10,7 +9,6 @@ action_groups:
|
|||||||
- baremetal_node_action
|
- baremetal_node_action
|
||||||
- baremetal_node_info
|
- baremetal_node_info
|
||||||
- baremetal_port
|
- baremetal_port
|
||||||
- baremetal_port_group
|
|
||||||
- baremetal_port_info
|
- baremetal_port_info
|
||||||
- catalog_service
|
- catalog_service
|
||||||
- catalog_service_info
|
- catalog_service_info
|
||||||
@@ -52,13 +50,11 @@ action_groups:
|
|||||||
- lb_pool
|
- lb_pool
|
||||||
- loadbalancer
|
- loadbalancer
|
||||||
- network
|
- network
|
||||||
- network_segment
|
|
||||||
- networks_info
|
- networks_info
|
||||||
- neutron_rbac_policies_info
|
- neutron_rbac_policies_info
|
||||||
- neutron_rbac_policy
|
- neutron_rbac_policy
|
||||||
- object
|
- object
|
||||||
- object_container
|
- object_container
|
||||||
- object_containers_info
|
|
||||||
- port
|
- port
|
||||||
- port_info
|
- port_info
|
||||||
- project
|
- project
|
||||||
@@ -80,20 +76,15 @@ action_groups:
|
|||||||
- server_info
|
- server_info
|
||||||
- server_metadata
|
- server_metadata
|
||||||
- server_volume
|
- server_volume
|
||||||
- share_type
|
|
||||||
- share_type_info
|
|
||||||
- stack
|
- stack
|
||||||
- stack_info
|
- stack_info
|
||||||
- subnet
|
- subnet
|
||||||
- subnet_pool
|
- subnet_pool
|
||||||
- subnets_info
|
- subnets_info
|
||||||
- trunk
|
|
||||||
- volume
|
- volume
|
||||||
- volume_manage
|
|
||||||
- volume_backup
|
- volume_backup
|
||||||
- volume_backup_info
|
- volume_backup_info
|
||||||
- volume_info
|
- volume_info
|
||||||
- volume_service_info
|
|
||||||
- volume_snapshot
|
- volume_snapshot
|
||||||
- volume_snapshot_info
|
- volume_snapshot_info
|
||||||
- volume_type_access
|
- volume_type_access
|
||||||
|
|||||||
@@ -96,18 +96,6 @@ options:
|
|||||||
only.
|
only.
|
||||||
type: bool
|
type: bool
|
||||||
default: false
|
default: false
|
||||||
only_ipv4:
|
|
||||||
description:
|
|
||||||
- Use only ipv4 addresses for ansible_host and ansible_ssh_host.
|
|
||||||
- Using I(only_ipv4) helps when running Ansible in a ipv4 only setup.
|
|
||||||
type: bool
|
|
||||||
default: false
|
|
||||||
server_filters:
|
|
||||||
description:
|
|
||||||
- A dictionary of server filter value pairs.
|
|
||||||
- Available parameters can be seen under https://docs.openstack.org/api-ref/compute/#list-servers
|
|
||||||
type: dict
|
|
||||||
default: {}
|
|
||||||
show_all:
|
show_all:
|
||||||
description:
|
description:
|
||||||
- Whether all servers should be listed or not.
|
- Whether all servers should be listed or not.
|
||||||
@@ -285,7 +273,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
clouds_yaml_path = self.get_option('clouds_yaml_path')
|
clouds_yaml_path = self.get_option('clouds_yaml_path')
|
||||||
config_files = openstack.config.loader.CONFIG_FILES
|
config_files = openstack.config.loader.CONFIG_FILES
|
||||||
if clouds_yaml_path:
|
if clouds_yaml_path:
|
||||||
config_files = clouds_yaml_path + config_files
|
config_files += clouds_yaml_path
|
||||||
|
|
||||||
config = openstack.config.loader.OpenStackConfig(
|
config = openstack.config.loader.OpenStackConfig(
|
||||||
config_files=config_files)
|
config_files=config_files)
|
||||||
@@ -315,7 +303,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
|
|
||||||
expand_hostvars = self.get_option('expand_hostvars')
|
expand_hostvars = self.get_option('expand_hostvars')
|
||||||
all_projects = self.get_option('all_projects')
|
all_projects = self.get_option('all_projects')
|
||||||
server_filters = self.get_option('server_filters')
|
|
||||||
servers = []
|
servers = []
|
||||||
|
|
||||||
def _expand_server(server, cloud, volumes):
|
def _expand_server(server, cloud, volumes):
|
||||||
@@ -362,8 +349,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
all_projects=all_projects,
|
all_projects=all_projects,
|
||||||
# details are required because 'addresses'
|
# details are required because 'addresses'
|
||||||
# attribute must be populated
|
# attribute must be populated
|
||||||
details=True,
|
details=True)
|
||||||
**server_filters)
|
|
||||||
]:
|
]:
|
||||||
servers.append(server)
|
servers.append(server)
|
||||||
except openstack.exceptions.OpenStackCloudException as e:
|
except openstack.exceptions.OpenStackCloudException as e:
|
||||||
@@ -398,17 +384,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
if address['OS-EXT-IPS:type'] == 'floating'),
|
if address['OS-EXT-IPS:type'] == 'floating'),
|
||||||
None)
|
None)
|
||||||
|
|
||||||
if self.get_option('only_ipv4'):
|
fixed_ip = next(
|
||||||
fixed_ip = next(
|
(address['addr'] for address in addresses
|
||||||
(address['addr'] for address in addresses
|
if address['OS-EXT-IPS:type'] == 'fixed'),
|
||||||
if (address['OS-EXT-IPS:type'] == 'fixed' and address['version'] == 4)),
|
None)
|
||||||
None)
|
|
||||||
|
|
||||||
else:
|
|
||||||
fixed_ip = next(
|
|
||||||
(address['addr'] for address in addresses
|
|
||||||
if address['OS-EXT-IPS:type'] == 'fixed'),
|
|
||||||
None)
|
|
||||||
|
|
||||||
ip = floating_ip if floating_ip is not None and not self.get_option('private') else fixed_ip
|
ip = floating_ip if floating_ip is not None and not self.get_option('private') else fixed_ip
|
||||||
|
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
|
|||||||
" excluded.")
|
" excluded.")
|
||||||
for param in (
|
for param in (
|
||||||
'auth', 'region_name', 'validate_certs',
|
'auth', 'region_name', 'validate_certs',
|
||||||
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
|
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||||
if module.params[param] is not None:
|
if module.params[param] is not None:
|
||||||
module.fail_json(msg=fail_message.format(param=param))
|
module.fail_json(msg=fail_message.format(param=param))
|
||||||
# For 'interface' parameter, fail if we receive a non-default value
|
# For 'interface' parameter, fail if we receive a non-default value
|
||||||
@@ -199,7 +199,6 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
|
|||||||
verify=module.params['validate_certs'],
|
verify=module.params['validate_certs'],
|
||||||
cacert=module.params['ca_cert'],
|
cacert=module.params['ca_cert'],
|
||||||
key=module.params['client_key'],
|
key=module.params['client_key'],
|
||||||
cert=module.params['client_cert'],
|
|
||||||
api_timeout=module.params['api_timeout'],
|
api_timeout=module.params['api_timeout'],
|
||||||
interface=module.params['interface'],
|
interface=module.params['interface'],
|
||||||
)
|
)
|
||||||
@@ -359,7 +358,7 @@ class OpenStackModule:
|
|||||||
" excluded.")
|
" excluded.")
|
||||||
for param in (
|
for param in (
|
||||||
'auth', 'region_name', 'validate_certs',
|
'auth', 'region_name', 'validate_certs',
|
||||||
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
|
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||||
if self.params[param] is not None:
|
if self.params[param] is not None:
|
||||||
self.fail_json(msg=fail_message.format(param=param))
|
self.fail_json(msg=fail_message.format(param=param))
|
||||||
# For 'interface' parameter, fail if we receive a non-default value
|
# For 'interface' parameter, fail if we receive a non-default value
|
||||||
@@ -374,7 +373,6 @@ class OpenStackModule:
|
|||||||
verify=self.params['validate_certs'],
|
verify=self.params['validate_certs'],
|
||||||
cacert=self.params['ca_cert'],
|
cacert=self.params['ca_cert'],
|
||||||
key=self.params['client_key'],
|
key=self.params['client_key'],
|
||||||
cert=self.params['client_cert'],
|
|
||||||
api_timeout=self.params['api_timeout'],
|
api_timeout=self.params['api_timeout'],
|
||||||
interface=self.params['interface'],
|
interface=self.params['interface'],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,332 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2024 Red Hat, Inc.
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
|
||||||
---
|
|
||||||
module: application_credential
|
|
||||||
short_description: Manage OpenStack Identity (Keystone) application credentials
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Create or delete an OpenStack Identity (Keystone) application credential.
|
|
||||||
- When the secret parameter is not set a secret will be generated and returned
|
|
||||||
- in the response. Existing credentials cannot be modified so running this module
|
|
||||||
- against an existing credential will result in it being deleted and recreated.
|
|
||||||
- This needs to be taken into account when the secret is generated, as the secret
|
|
||||||
- will change on each run of the module.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name of the application credential.
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
description:
|
|
||||||
- Application credential description.
|
|
||||||
type: str
|
|
||||||
secret:
|
|
||||||
description:
|
|
||||||
- Secret to use for authentication
|
|
||||||
- (if not provided, one will be generated).
|
|
||||||
type: str
|
|
||||||
roles:
|
|
||||||
description:
|
|
||||||
- Roles to authorize (name or ID).
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
suboptions:
|
|
||||||
name:
|
|
||||||
description: Name of role
|
|
||||||
type: str
|
|
||||||
id:
|
|
||||||
description: ID of role
|
|
||||||
type: str
|
|
||||||
domain_id:
|
|
||||||
description: Domain ID
|
|
||||||
type: str
|
|
||||||
expires_at:
|
|
||||||
description:
|
|
||||||
- Sets an expiration date for the application credential,
|
|
||||||
- format of YYYY-mm-ddTHH:MM:SS
|
|
||||||
- (if not provided, the application credential will not expire).
|
|
||||||
type: str
|
|
||||||
unrestricted:
|
|
||||||
description:
|
|
||||||
- Enable application credential to create and delete other application
|
|
||||||
- credentials and trusts (this is potentially dangerous behavior and is
|
|
||||||
- disabled by default).
|
|
||||||
default: false
|
|
||||||
type: bool
|
|
||||||
access_rules:
|
|
||||||
description:
|
|
||||||
- List of access rules, each containing a request method, path, and service.
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
suboptions:
|
|
||||||
service:
|
|
||||||
description: Name of service endpoint
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
path:
|
|
||||||
description: Path portion of access URL
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
method:
|
|
||||||
description: HTTP method
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Should the resource be present or absent.
|
|
||||||
- Application credentials are immutable so running with an existing present
|
|
||||||
- credential will result in the credential being deleted and recreated.
|
|
||||||
choices: [present, absent]
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = r"""
|
|
||||||
- name: Create application credential
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: mycloud
|
|
||||||
description: demodescription
|
|
||||||
name: democreds
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Create application credential with expiration, access rules and roles
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: mycloud
|
|
||||||
description: demodescription
|
|
||||||
name: democreds
|
|
||||||
access_rules:
|
|
||||||
- service: "compute"
|
|
||||||
path: "/v2.1/servers"
|
|
||||||
method: "GET"
|
|
||||||
expires_at: "2024-02-29T09:29:59"
|
|
||||||
roles:
|
|
||||||
- name: Member
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Delete application credential
|
|
||||||
openstack.cloud.application_credential:
|
|
||||||
cloud: mycloud
|
|
||||||
name: democreds
|
|
||||||
state: absent
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = r"""
|
|
||||||
application_credential:
|
|
||||||
description: Dictionary describing the project.
|
|
||||||
returned: On success when I(state) is C(present).
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
id:
|
|
||||||
description: The ID of the application credential.
|
|
||||||
type: str
|
|
||||||
sample: "2e73d1b4f0cb473f920bd54dfce3c26d"
|
|
||||||
name:
|
|
||||||
description: The name of the application credential.
|
|
||||||
type: str
|
|
||||||
sample: "appcreds"
|
|
||||||
secret:
|
|
||||||
description: Secret to use for authentication
|
|
||||||
(if not provided, returns the generated value).
|
|
||||||
type: str
|
|
||||||
sample: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
|
|
||||||
description:
|
|
||||||
description: A description of the application credential's purpose.
|
|
||||||
type: str
|
|
||||||
sample: "App credential"
|
|
||||||
expires_at:
|
|
||||||
description: The expiration time of the application credential in UTC,
|
|
||||||
if one was specified.
|
|
||||||
type: str
|
|
||||||
sample: "2024-02-29T09:29:59.000000"
|
|
||||||
project_id:
|
|
||||||
description: The ID of the project the application credential was created
|
|
||||||
for and that authentication requests using this application
|
|
||||||
credential will be scoped to.
|
|
||||||
type: str
|
|
||||||
sample: "4b633c451ac74233be3721a3635275e5"
|
|
||||||
roles:
|
|
||||||
description: A list of one or more roles that this application credential
|
|
||||||
has associated with its project. A token using this application
|
|
||||||
credential will have these same roles.
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
sample: [{"name": "Member"}]
|
|
||||||
access_rules:
|
|
||||||
description: A list of access_rules objects
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
sample:
|
|
||||||
- id: "edecb6c791d541a3b458199858470d20"
|
|
||||||
service: "compute"
|
|
||||||
path: "/v2.1/servers"
|
|
||||||
method: "GET"
|
|
||||||
unrestricted:
|
|
||||||
description: A flag indicating whether the application credential may be
|
|
||||||
used for creation or destruction of other application credentials
|
|
||||||
or trusts.
|
|
||||||
type: bool
|
|
||||||
cloud:
|
|
||||||
description: The current cloud config with the username and password replaced
|
|
||||||
with the name and secret of the application credential. This
|
|
||||||
can be passed to the cloud parameter of other tasks, or written
|
|
||||||
to an openstack cloud config file.
|
|
||||||
returned: On success when I(state) is C(present).
|
|
||||||
type: dict
|
|
||||||
sample:
|
|
||||||
auth_type: "v3applicationcredential"
|
|
||||||
auth:
|
|
||||||
auth_url: "https://192.0.2.1/identity"
|
|
||||||
application_credential_secret: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
|
|
||||||
application_credential_id: "3e73d1b4f0cb473f920bd54dfce3c26d"
|
|
||||||
"""
|
|
||||||
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
import openstack.config
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class IdentityApplicationCredentialModule(OpenStackModule):
|
|
||||||
argument_spec = dict(
|
|
||||||
name=dict(required=True),
|
|
||||||
description=dict(),
|
|
||||||
secret=dict(no_log=True),
|
|
||||||
roles=dict(
|
|
||||||
type="list",
|
|
||||||
elements="dict",
|
|
||||||
options=dict(name=dict(), id=dict(), domain_id=dict()),
|
|
||||||
),
|
|
||||||
expires_at=dict(),
|
|
||||||
unrestricted=dict(type="bool", default=False),
|
|
||||||
access_rules=dict(
|
|
||||||
type="list",
|
|
||||||
elements="dict",
|
|
||||||
options=dict(
|
|
||||||
service=dict(required=True),
|
|
||||||
path=dict(required=True),
|
|
||||||
method=dict(required=True),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
state=dict(default="present", choices=["absent", "present"]),
|
|
||||||
)
|
|
||||||
module_kwargs = dict()
|
|
||||||
cloud = None
|
|
||||||
|
|
||||||
def openstack_cloud_from_module(self):
|
|
||||||
# Fetch cloud param before it is popped
|
|
||||||
self.cloud = self.params["cloud"]
|
|
||||||
return OpenStackModule.openstack_cloud_from_module(self)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
state = self.params["state"]
|
|
||||||
|
|
||||||
creds = self._find()
|
|
||||||
|
|
||||||
if state == "present" and not creds:
|
|
||||||
# Create creds
|
|
||||||
creds = self._create().to_dict(computed=False)
|
|
||||||
cloud_config = self._get_cloud_config(creds)
|
|
||||||
self.exit_json(
|
|
||||||
changed=True, application_credential=creds, cloud=cloud_config
|
|
||||||
)
|
|
||||||
|
|
||||||
elif state == "present" and creds:
|
|
||||||
# Recreate immutable creds
|
|
||||||
self._delete(creds)
|
|
||||||
creds = self._create().to_dict(computed=False)
|
|
||||||
cloud_config = self._get_cloud_config(creds)
|
|
||||||
self.exit_json(
|
|
||||||
changed=True, application_credential=creds, cloud=cloud_config
|
|
||||||
)
|
|
||||||
|
|
||||||
elif state == "absent" and creds:
|
|
||||||
# Delete creds
|
|
||||||
self._delete(creds)
|
|
||||||
self.exit_json(changed=True)
|
|
||||||
|
|
||||||
elif state == "absent" and not creds:
|
|
||||||
# Do nothing
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
|
|
||||||
def _get_user_id(self):
|
|
||||||
return self.conn.session.get_user_id()
|
|
||||||
|
|
||||||
def _create(self):
|
|
||||||
kwargs = dict(
|
|
||||||
(k, self.params[k])
|
|
||||||
for k in [
|
|
||||||
"name",
|
|
||||||
"description",
|
|
||||||
"secret",
|
|
||||||
"expires_at",
|
|
||||||
"unrestricted",
|
|
||||||
"access_rules",
|
|
||||||
]
|
|
||||||
if self.params[k] is not None
|
|
||||||
)
|
|
||||||
|
|
||||||
roles = self.params["roles"]
|
|
||||||
if roles:
|
|
||||||
kwroles = []
|
|
||||||
for role in roles:
|
|
||||||
kwroles.append(
|
|
||||||
dict(
|
|
||||||
(k, role[k])
|
|
||||||
for k in ["name", "id", "domain_id"]
|
|
||||||
if role[k] is not None
|
|
||||||
)
|
|
||||||
)
|
|
||||||
kwargs["roles"] = kwroles
|
|
||||||
|
|
||||||
kwargs["user"] = self._get_user_id()
|
|
||||||
creds = self.conn.identity.create_application_credential(**kwargs)
|
|
||||||
return creds
|
|
||||||
|
|
||||||
def _get_cloud_config(self, creds):
|
|
||||||
cloud_region = openstack.config.OpenStackConfig().get_one(self.cloud)
|
|
||||||
|
|
||||||
conf = cloud_region.config
|
|
||||||
cloud_config = copy.deepcopy(conf)
|
|
||||||
cloud_config["auth_type"] = "v3applicationcredential"
|
|
||||||
cloud_config["auth"] = {
|
|
||||||
"application_credential_id": creds["id"],
|
|
||||||
"application_credential_secret": creds["secret"],
|
|
||||||
"auth_url": conf["auth"]["auth_url"],
|
|
||||||
}
|
|
||||||
|
|
||||||
return cloud_config
|
|
||||||
|
|
||||||
def _delete(self, creds):
|
|
||||||
user = self._get_user_id()
|
|
||||||
self.conn.identity.delete_application_credential(user, creds.id)
|
|
||||||
|
|
||||||
def _find(self):
|
|
||||||
name = self.params["name"]
|
|
||||||
user = self._get_user_id()
|
|
||||||
return self.conn.identity.find_application_credential(
|
|
||||||
user=user, name_or_id=name
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = IdentityApplicationCredentialModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,257 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2026 OpenStack Ansible SIG
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
module: baremetal_port_group
|
|
||||||
short_description: Create/Delete Bare Metal port group resources from OpenStack
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Create, update and remove Bare Metal port groups from OpenStack.
|
|
||||||
options:
|
|
||||||
id:
|
|
||||||
description:
|
|
||||||
- ID of the port group.
|
|
||||||
- Will be auto-generated if not specified.
|
|
||||||
type: str
|
|
||||||
aliases: ['uuid']
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name of the port group.
|
|
||||||
type: str
|
|
||||||
node:
|
|
||||||
description:
|
|
||||||
- ID or Name of the node this resource belongs to.
|
|
||||||
- Required when creating a new port group.
|
|
||||||
type: str
|
|
||||||
address:
|
|
||||||
description:
|
|
||||||
- Physical hardware address of this port group, typically the hardware
|
|
||||||
MAC address.
|
|
||||||
type: str
|
|
||||||
extra:
|
|
||||||
description:
|
|
||||||
- A set of one or more arbitrary metadata key and value pairs.
|
|
||||||
type: dict
|
|
||||||
standalone_ports_supported:
|
|
||||||
description:
|
|
||||||
- Whether the port group supports ports that are not members of this
|
|
||||||
port group.
|
|
||||||
type: bool
|
|
||||||
mode:
|
|
||||||
description:
|
|
||||||
- The port group mode.
|
|
||||||
type: str
|
|
||||||
properties:
|
|
||||||
description:
|
|
||||||
- Key/value properties for the port group.
|
|
||||||
type: dict
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Indicates desired state of the resource.
|
|
||||||
choices: ['present', 'absent']
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = r'''
|
|
||||||
- name: Create Bare Metal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: devstack
|
|
||||||
state: present
|
|
||||||
name: bond0
|
|
||||||
node: bm-0
|
|
||||||
address: fa:16:3e:aa:aa:aa
|
|
||||||
mode: '802.3ad'
|
|
||||||
standalone_ports_supported: true
|
|
||||||
register: result
|
|
||||||
|
|
||||||
- name: Update Bare Metal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: devstack
|
|
||||||
state: present
|
|
||||||
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
|
|
||||||
mode: 'active-backup'
|
|
||||||
properties:
|
|
||||||
miimon: '100'
|
|
||||||
register: result
|
|
||||||
|
|
||||||
- name: Delete Bare Metal port group
|
|
||||||
openstack.cloud.baremetal_port_group:
|
|
||||||
cloud: devstack
|
|
||||||
state: absent
|
|
||||||
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
|
|
||||||
register: result
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = r'''
|
|
||||||
port_group:
|
|
||||||
description: A port group dictionary, subset of the dictionary keys listed
|
|
||||||
below may be returned, depending on your cloud provider.
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
address:
|
|
||||||
description: Physical hardware address of the port group.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
created_at:
|
|
||||||
description: Bare Metal port group created at timestamp.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
extra:
|
|
||||||
description: A set of one or more arbitrary metadata key and value
|
|
||||||
pairs.
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
id:
|
|
||||||
description: The UUID for the Bare Metal port group resource.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
links:
|
|
||||||
description: A list of relative links, including the self and
|
|
||||||
bookmark links.
|
|
||||||
returned: success
|
|
||||||
type: list
|
|
||||||
mode:
|
|
||||||
description: The port group mode.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
name:
|
|
||||||
description: Bare Metal port group name.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
node_id:
|
|
||||||
description: UUID of the Bare Metal node this resource belongs to.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
properties:
|
|
||||||
description: Key/value properties for this port group.
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
standalone_ports_supported:
|
|
||||||
description: Whether standalone ports are supported.
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
updated_at:
|
|
||||||
description: Bare Metal port group updated at timestamp.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BaremetalPortGroupModule(OpenStackModule):
|
|
||||||
argument_spec = dict(
|
|
||||||
id=dict(aliases=['uuid']),
|
|
||||||
name=dict(),
|
|
||||||
node=dict(),
|
|
||||||
address=dict(),
|
|
||||||
extra=dict(type='dict'),
|
|
||||||
standalone_ports_supported=dict(type='bool'),
|
|
||||||
mode=dict(),
|
|
||||||
properties=dict(type='dict'),
|
|
||||||
state=dict(default='present', choices=['present', 'absent']),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
required_one_of=[
|
|
||||||
('id', 'name'),
|
|
||||||
],
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _find_port_group(self):
|
|
||||||
id_or_name = self.params['id'] if self.params['id'] else self.params['name']
|
|
||||||
if not id_or_name:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
return self.conn.baremetal.find_port_group(id_or_name)
|
|
||||||
except self.sdk.exceptions.ResourceNotFound:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _build_create_attrs(self):
|
|
||||||
attrs = {}
|
|
||||||
|
|
||||||
for key in ['id', 'name', 'address', 'extra',
|
|
||||||
'standalone_ports_supported', 'mode', 'properties']:
|
|
||||||
if self.params[key] is not None:
|
|
||||||
attrs[key] = self.params[key]
|
|
||||||
|
|
||||||
node_name_or_id = self.params['node']
|
|
||||||
if not node_name_or_id:
|
|
||||||
self.fail_json(msg="Parameter 'node' is required when creating a new port group")
|
|
||||||
|
|
||||||
node = self.conn.baremetal.find_node(node_name_or_id, ignore_missing=False)
|
|
||||||
attrs['node_id'] = node['id']
|
|
||||||
return attrs
|
|
||||||
|
|
||||||
def _build_update_attrs(self, port_group):
|
|
||||||
attrs = {}
|
|
||||||
|
|
||||||
for key in ['name', 'address', 'extra',
|
|
||||||
'standalone_ports_supported', 'mode', 'properties']:
|
|
||||||
if self.params[key] is not None and self.params[key] != port_group.get(key):
|
|
||||||
attrs[key] = self.params[key]
|
|
||||||
|
|
||||||
return attrs
|
|
||||||
|
|
||||||
def _will_change(self, port_group, state):
|
|
||||||
if state == 'absent':
|
|
||||||
return bool(port_group)
|
|
||||||
|
|
||||||
if not port_group:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return bool(self._build_update_attrs(port_group))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
state = self.params['state']
|
|
||||||
port_group = self._find_port_group()
|
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
|
||||||
if state == 'present' and not port_group:
|
|
||||||
self._build_create_attrs()
|
|
||||||
self.exit_json(changed=self._will_change(port_group, state))
|
|
||||||
|
|
||||||
if state == 'present':
|
|
||||||
if not port_group:
|
|
||||||
port_group = self.conn.baremetal.create_port_group(
|
|
||||||
**self._build_create_attrs())
|
|
||||||
self.exit_json(
|
|
||||||
changed=True,
|
|
||||||
port_group=port_group.to_dict(computed=False))
|
|
||||||
|
|
||||||
update_attrs = self._build_update_attrs(port_group)
|
|
||||||
changed = bool(update_attrs)
|
|
||||||
|
|
||||||
if changed:
|
|
||||||
port_group = self.conn.baremetal.update_port_group(
|
|
||||||
port_group['id'], **update_attrs)
|
|
||||||
|
|
||||||
self.exit_json(
|
|
||||||
changed=changed,
|
|
||||||
port_group=port_group.to_dict(computed=False))
|
|
||||||
|
|
||||||
if not port_group:
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
|
|
||||||
self.conn.baremetal.delete_port_group(port_group['id'])
|
|
||||||
self.exit_json(changed=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = BaremetalPortGroupModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -80,10 +80,6 @@ options:
|
|||||||
- Magnum's default value for I(is_registry_enabled) is C(false).
|
- Magnum's default value for I(is_registry_enabled) is C(false).
|
||||||
type: bool
|
type: bool
|
||||||
aliases: ['registry_enabled']
|
aliases: ['registry_enabled']
|
||||||
insecure_registry:
|
|
||||||
description:
|
|
||||||
- The URL pointing to users own private insecure docker registry.
|
|
||||||
type: str
|
|
||||||
is_tls_disabled:
|
is_tls_disabled:
|
||||||
description:
|
description:
|
||||||
- Indicates whether the TLS should be disabled.
|
- Indicates whether the TLS should be disabled.
|
||||||
@@ -346,7 +342,6 @@ class COEClusterTemplateModule(OpenStackModule):
|
|||||||
keypair_id=dict(),
|
keypair_id=dict(),
|
||||||
labels=dict(type='raw'),
|
labels=dict(type='raw'),
|
||||||
master_flavor_id=dict(),
|
master_flavor_id=dict(),
|
||||||
insecure_registry=dict(),
|
|
||||||
is_master_lb_enabled=dict(type='bool', default=False,
|
is_master_lb_enabled=dict(type='bool', default=False,
|
||||||
aliases=['master_lb_enabled']),
|
aliases=['master_lb_enabled']),
|
||||||
is_public=dict(type='bool', aliases=['public']),
|
is_public=dict(type='bool', aliases=['public']),
|
||||||
@@ -417,7 +412,6 @@ class COEClusterTemplateModule(OpenStackModule):
|
|||||||
'fixed_subnet', 'flavor_id',
|
'fixed_subnet', 'flavor_id',
|
||||||
'http_proxy', 'https_proxy',
|
'http_proxy', 'https_proxy',
|
||||||
'image_id',
|
'image_id',
|
||||||
'insecure_registry',
|
|
||||||
'is_floating_ip_enabled',
|
'is_floating_ip_enabled',
|
||||||
'is_master_lb_enabled',
|
'is_master_lb_enabled',
|
||||||
'is_public', 'is_registry_enabled',
|
'is_public', 'is_registry_enabled',
|
||||||
@@ -433,9 +427,6 @@ class COEClusterTemplateModule(OpenStackModule):
|
|||||||
if isinstance(labels, str):
|
if isinstance(labels, str):
|
||||||
labels = dict([tuple(kv.split(":"))
|
labels = dict([tuple(kv.split(":"))
|
||||||
for kv in labels.split(",")])
|
for kv in labels.split(",")])
|
||||||
elif isinstance(labels, dict):
|
|
||||||
labels = dict({str(k): str(v)
|
|
||||||
for k, v in labels.items()})
|
|
||||||
if labels != cluster_template['labels']:
|
if labels != cluster_template['labels']:
|
||||||
non_updateable_keys.append('labels')
|
non_updateable_keys.append('labels')
|
||||||
|
|
||||||
@@ -467,7 +458,7 @@ class COEClusterTemplateModule(OpenStackModule):
|
|||||||
'external_network_id', 'fixed_network',
|
'external_network_id', 'fixed_network',
|
||||||
'fixed_subnet', 'flavor_id', 'http_proxy',
|
'fixed_subnet', 'flavor_id', 'http_proxy',
|
||||||
'https_proxy', 'image_id',
|
'https_proxy', 'image_id',
|
||||||
'insecure_registry', 'is_floating_ip_enabled',
|
'is_floating_ip_enabled',
|
||||||
'is_master_lb_enabled', 'is_public',
|
'is_master_lb_enabled', 'is_public',
|
||||||
'is_registry_enabled', 'is_tls_disabled',
|
'is_registry_enabled', 'is_tls_disabled',
|
||||||
'keypair_id', 'master_flavor_id', 'name',
|
'keypair_id', 'master_flavor_id', 'name',
|
||||||
|
|||||||
@@ -41,11 +41,11 @@ extends_documentation_fragment:
|
|||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r'''
|
||||||
- name: Fetch all DNS zones
|
- name: Fetch all DNS zones
|
||||||
openstack.cloud.dns_zone_info:
|
openstack.cloud.dns_zones:
|
||||||
cloud: devstack
|
cloud: devstack
|
||||||
|
|
||||||
- name: Fetch DNS zones by name
|
- name: Fetch DNS zones by name
|
||||||
openstack.cloud.dns_zone_info:
|
openstack.cloud.dns_zones:
|
||||||
cloud: devstack
|
cloud: devstack
|
||||||
name: ansible.test.zone.
|
name: ansible.test.zone.
|
||||||
'''
|
'''
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ options:
|
|||||||
description:
|
description:
|
||||||
- When I(update_password) is C(always), then the password will always be
|
- When I(update_password) is C(always), then the password will always be
|
||||||
updated.
|
updated.
|
||||||
- When I(update_password) is C(on_create), then the password is only set
|
- When I(update_password) is C(on_create), the the password is only set
|
||||||
when creating a user.
|
when creating a user.
|
||||||
type: str
|
type: str
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
|
|||||||
@@ -100,8 +100,8 @@ options:
|
|||||||
type: str
|
type: str
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Should the resource be present, absent or inactive.
|
- Should the resource be present or absent.
|
||||||
choices: [present, absent, inactive]
|
choices: [present, absent]
|
||||||
default: present
|
default: present
|
||||||
type: str
|
type: str
|
||||||
tags:
|
tags:
|
||||||
@@ -122,26 +122,6 @@ options:
|
|||||||
- I(volume) has been deprecated. Use module M(openstack.cloud.volume)
|
- I(volume) has been deprecated. Use module M(openstack.cloud.volume)
|
||||||
instead.
|
instead.
|
||||||
type: str
|
type: str
|
||||||
use_import:
|
|
||||||
description:
|
|
||||||
- Use the 'glance-direct' method of the interoperable image import mechanism.
|
|
||||||
- Should only be used when needed, such as when the user needs the cloud to
|
|
||||||
transform image format.
|
|
||||||
type: bool
|
|
||||||
import_method:
|
|
||||||
description:
|
|
||||||
- Method to use for importing the image. Not all deployments support all methods.
|
|
||||||
- Supports web-download or glance-download.
|
|
||||||
- copy-image is not supported with create actions.
|
|
||||||
- glance-direct is removed from the import method so use_import can be used in that case.
|
|
||||||
type: str
|
|
||||||
choices: [web-download, glance-download]
|
|
||||||
uri:
|
|
||||||
description:
|
|
||||||
- Required only if using the web-download import method.
|
|
||||||
- This url is where the data is made available to the Image service.
|
|
||||||
type: str
|
|
||||||
|
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- openstack.cloud.openstack
|
- openstack.cloud.openstack
|
||||||
'''
|
'''
|
||||||
@@ -167,7 +147,7 @@ EXAMPLES = r'''
|
|||||||
RETURN = r'''
|
RETURN = r'''
|
||||||
image:
|
image:
|
||||||
description: Dictionary describing the Glance image.
|
description: Dictionary describing the Glance image.
|
||||||
returned: On success when I(state) is C(present) or C(inactive).
|
returned: On success when I(state) is C(present).
|
||||||
type: dict
|
type: dict
|
||||||
contains:
|
contains:
|
||||||
id:
|
id:
|
||||||
@@ -408,18 +388,15 @@ class ImageModule(OpenStackModule):
|
|||||||
owner_domain=dict(aliases=['project_domain']),
|
owner_domain=dict(aliases=['project_domain']),
|
||||||
properties=dict(type='dict', default={}),
|
properties=dict(type='dict', default={}),
|
||||||
ramdisk=dict(),
|
ramdisk=dict(),
|
||||||
state=dict(default='present', choices=['absent', 'present', 'inactive']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
tags=dict(type='list', default=[], elements='str'),
|
tags=dict(type='list', default=[], elements='str'),
|
||||||
visibility=dict(choices=['public', 'private', 'shared', 'community']),
|
visibility=dict(choices=['public', 'private', 'shared', 'community']),
|
||||||
volume=dict(),
|
volume=dict(),
|
||||||
use_import=dict(type='bool'),
|
|
||||||
import_method=dict(choices=['web-download', 'glance-download']),
|
|
||||||
uri=dict()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
module_kwargs = dict(
|
module_kwargs = dict(
|
||||||
mutually_exclusive=[
|
mutually_exclusive=[
|
||||||
('filename', 'volume', 'uri'),
|
('filename', 'volume'),
|
||||||
('visibility', 'is_public'),
|
('visibility', 'is_public'),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -427,8 +404,7 @@ class ImageModule(OpenStackModule):
|
|||||||
# resource attributes obtainable directly from params
|
# resource attributes obtainable directly from params
|
||||||
attr_params = ('id', 'name', 'filename', 'disk_format',
|
attr_params = ('id', 'name', 'filename', 'disk_format',
|
||||||
'container_format', 'wait', 'timeout', 'is_public',
|
'container_format', 'wait', 'timeout', 'is_public',
|
||||||
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags',
|
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags')
|
||||||
'use_import', 'import_method', 'uri')
|
|
||||||
|
|
||||||
def _resolve_visibility(self):
|
def _resolve_visibility(self):
|
||||||
"""resolve a visibility value to be compatible with older versions"""
|
"""resolve a visibility value to be compatible with older versions"""
|
||||||
@@ -526,26 +502,6 @@ class ImageModule(OpenStackModule):
|
|||||||
self.exit_json(changed=changed,
|
self.exit_json(changed=changed,
|
||||||
image=self._return_value(image.id))
|
image=self._return_value(image.id))
|
||||||
|
|
||||||
if image['status'] == 'deactivated':
|
|
||||||
self.conn.image.reactivate_image(image)
|
|
||||||
changed = True
|
|
||||||
elif image['status'] == 'queued':
|
|
||||||
if (
|
|
||||||
self.params['filename']
|
|
||||||
and hasattr(self.conn.image, 'stage_image')):
|
|
||||||
self.conn.image.stage_image(
|
|
||||||
image, filename=self.params['filename'])
|
|
||||||
changed = True
|
|
||||||
elif self.params['filename']:
|
|
||||||
with open(self.params['filename'], 'rb') as image_data:
|
|
||||||
self.conn.image.upload_image(
|
|
||||||
container_format=self.params['container_format'],
|
|
||||||
disk_format=self.params['disk_format'],
|
|
||||||
data=image_data,
|
|
||||||
id=image.id,
|
|
||||||
name=image.name)
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
update_payload = self._build_update(image)
|
update_payload = self._build_update(image)
|
||||||
|
|
||||||
if update_payload:
|
if update_payload:
|
||||||
@@ -561,20 +517,6 @@ class ImageModule(OpenStackModule):
|
|||||||
wait=self.params['wait'],
|
wait=self.params['wait'],
|
||||||
timeout=self.params['timeout'])
|
timeout=self.params['timeout'])
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
elif self.params['state'] == 'inactive' and image is not None:
|
|
||||||
if image['status'] == 'active':
|
|
||||||
self.conn.image.deactivate_image(image)
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
update_payload = self._build_update(image)
|
|
||||||
|
|
||||||
if update_payload:
|
|
||||||
self.conn.image.update_image(image.id, **update_payload)
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
self.exit_json(changed=changed, image=self._return_value(image.id))
|
|
||||||
|
|
||||||
self.exit_json(changed=changed)
|
self.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ pool:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r'''
|
||||||
- name: Create a load-balancer pool
|
- name: Create a load-balander pool
|
||||||
openstack.cloud.lb_pool:
|
openstack.cloud.lb_pool:
|
||||||
cloud: mycloud
|
cloud: mycloud
|
||||||
lb_algorithm: ROUND_ROBIN
|
lb_algorithm: ROUND_ROBIN
|
||||||
@@ -151,7 +151,7 @@ EXAMPLES = r'''
|
|||||||
protocol: HTTP
|
protocol: HTTP
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Delete a load-balancer pool
|
- name: Delete a load-balander pool
|
||||||
openstack.cloud.lb_pool:
|
openstack.cloud.lb_pool:
|
||||||
cloud: mycloud
|
cloud: mycloud
|
||||||
name: test-pool
|
name: test-pool
|
||||||
|
|||||||
@@ -30,15 +30,6 @@ options:
|
|||||||
description:
|
description:
|
||||||
- Whether this network is externally accessible.
|
- Whether this network is externally accessible.
|
||||||
type: bool
|
type: bool
|
||||||
is_default:
|
|
||||||
description:
|
|
||||||
- Whether this network is default network or not. This is only effective
|
|
||||||
with external networks.
|
|
||||||
type: bool
|
|
||||||
is_vlan_transparent:
|
|
||||||
description:
|
|
||||||
- Whether this network is vlan_transparent or not.
|
|
||||||
type: bool
|
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Indicate desired state of the resource.
|
- Indicate desired state of the resource.
|
||||||
@@ -199,8 +190,6 @@ class NetworkModule(OpenStackModule):
|
|||||||
shared=dict(type='bool'),
|
shared=dict(type='bool'),
|
||||||
admin_state_up=dict(type='bool'),
|
admin_state_up=dict(type='bool'),
|
||||||
external=dict(type='bool'),
|
external=dict(type='bool'),
|
||||||
is_default=dict(type='bool'),
|
|
||||||
is_vlan_transparent=dict(type='bool'),
|
|
||||||
provider_physical_network=dict(),
|
provider_physical_network=dict(),
|
||||||
provider_network_type=dict(),
|
provider_network_type=dict(),
|
||||||
provider_segmentation_id=dict(type='int'),
|
provider_segmentation_id=dict(type='int'),
|
||||||
@@ -218,8 +207,6 @@ class NetworkModule(OpenStackModule):
|
|||||||
shared = self.params['shared']
|
shared = self.params['shared']
|
||||||
admin_state_up = self.params['admin_state_up']
|
admin_state_up = self.params['admin_state_up']
|
||||||
external = self.params['external']
|
external = self.params['external']
|
||||||
is_default = self.params['is_default']
|
|
||||||
is_vlan_transparent = self.params['is_vlan_transparent']
|
|
||||||
provider_physical_network = self.params['provider_physical_network']
|
provider_physical_network = self.params['provider_physical_network']
|
||||||
provider_network_type = self.params['provider_network_type']
|
provider_network_type = self.params['provider_network_type']
|
||||||
provider_segmentation_id = self.params['provider_segmentation_id']
|
provider_segmentation_id = self.params['provider_segmentation_id']
|
||||||
@@ -257,10 +244,6 @@ class NetworkModule(OpenStackModule):
|
|||||||
kwargs["admin_state_up"] = admin_state_up
|
kwargs["admin_state_up"] = admin_state_up
|
||||||
if external is not None:
|
if external is not None:
|
||||||
kwargs["is_router_external"] = external
|
kwargs["is_router_external"] = external
|
||||||
if is_default is not None:
|
|
||||||
kwargs["is_default"] = is_default
|
|
||||||
if is_vlan_transparent is not None:
|
|
||||||
kwargs["is_vlan_transparent"] = is_vlan_transparent
|
|
||||||
|
|
||||||
if not net:
|
if not net:
|
||||||
net = self.conn.network.create_network(name=name, **kwargs)
|
net = self.conn.network.create_network(name=name, **kwargs)
|
||||||
|
|||||||
@@ -1,183 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2025 British Broadcasting Corporation
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
module: network_segment
|
|
||||||
short_description: Creates/removes network segments from OpenStack
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Add, update or remove network segments from OpenStack.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name to be assigned to the segment. Although Neutron allows for
|
|
||||||
non-unique segment names, this module enforces segment name
|
|
||||||
uniqueness.
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
description:
|
|
||||||
- Description of the segment
|
|
||||||
type: str
|
|
||||||
network:
|
|
||||||
description:
|
|
||||||
- Name or id of the network to which the segment should be attached
|
|
||||||
type: str
|
|
||||||
network_type:
|
|
||||||
description:
|
|
||||||
- The type of physical network that maps to this segment resource.
|
|
||||||
type: str
|
|
||||||
physical_network:
|
|
||||||
description:
|
|
||||||
- The physical network where this segment object is implemented.
|
|
||||||
type: str
|
|
||||||
segmentation_id:
|
|
||||||
description:
|
|
||||||
- An isolated segment on the physical network. The I(network_type)
|
|
||||||
attribute defines the segmentation model. For example, if the
|
|
||||||
I(network_type) value is vlan, this ID is a vlan identifier. If
|
|
||||||
the I(network_type) value is gre, this ID is a gre key.
|
|
||||||
type: int
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Indicate desired state of the resource.
|
|
||||||
choices: ['present', 'absent']
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# Create a VLAN type network segment named 'segment1'.
|
|
||||||
- openstack.cloud.network_segment:
|
|
||||||
cloud: mycloud
|
|
||||||
name: segment1
|
|
||||||
network: my_network
|
|
||||||
network_type: vlan
|
|
||||||
segmentation_id: 2000
|
|
||||||
physical_network: my_physnet
|
|
||||||
state: present
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = '''
|
|
||||||
id:
|
|
||||||
description: Id of segment
|
|
||||||
returned: On success when segment exists.
|
|
||||||
type: str
|
|
||||||
network_segment:
|
|
||||||
description: Dictionary describing the network segment.
|
|
||||||
returned: On success when network segment exists.
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
description:
|
|
||||||
description: Description
|
|
||||||
type: str
|
|
||||||
id:
|
|
||||||
description: Id
|
|
||||||
type: str
|
|
||||||
name:
|
|
||||||
description: Name
|
|
||||||
type: str
|
|
||||||
network_id:
|
|
||||||
description: Network Id
|
|
||||||
type: str
|
|
||||||
network_type:
|
|
||||||
description: Network type
|
|
||||||
type: str
|
|
||||||
physical_network:
|
|
||||||
description: Physical network
|
|
||||||
type: str
|
|
||||||
segmentation_id:
|
|
||||||
description: Segmentation Id
|
|
||||||
type: int
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkSegmentModule(OpenStackModule):
|
|
||||||
|
|
||||||
argument_spec = dict(
|
|
||||||
name=dict(required=True),
|
|
||||||
description=dict(),
|
|
||||||
network=dict(),
|
|
||||||
network_type=dict(),
|
|
||||||
physical_network=dict(),
|
|
||||||
segmentation_id=dict(type='int'),
|
|
||||||
state=dict(default='present', choices=['absent', 'present'])
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
|
|
||||||
state = self.params['state']
|
|
||||||
name = self.params['name']
|
|
||||||
network_name_or_id = self.params['network']
|
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
filters = {}
|
|
||||||
for arg in ('description', 'network_type', 'physical_network', 'segmentation_id'):
|
|
||||||
if self.params[arg] is not None:
|
|
||||||
kwargs[arg] = self.params[arg]
|
|
||||||
|
|
||||||
for arg in ('network_type', 'physical_network'):
|
|
||||||
if self.params[arg] is not None:
|
|
||||||
filters[arg] = self.params[arg]
|
|
||||||
|
|
||||||
if network_name_or_id:
|
|
||||||
network = self.conn.network.find_network(network_name_or_id,
|
|
||||||
ignore_missing=False,
|
|
||||||
**filters)
|
|
||||||
kwargs['network_id'] = network.id
|
|
||||||
filters['network_id'] = network.id
|
|
||||||
|
|
||||||
segment = self.conn.network.find_segment(name, **filters)
|
|
||||||
|
|
||||||
if state == 'present':
|
|
||||||
if not segment:
|
|
||||||
segment = self.conn.network.create_segment(name=name, **kwargs)
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
changed = False
|
|
||||||
update_kwargs = {}
|
|
||||||
|
|
||||||
# As the name is required and all other attributes cannot be
|
|
||||||
# changed (and appear in filters above), we only need to handle
|
|
||||||
# updates to the description here.
|
|
||||||
for arg in ["description"]:
|
|
||||||
if (
|
|
||||||
arg in kwargs
|
|
||||||
# ensure user wants something specific
|
|
||||||
and kwargs[arg] is not None
|
|
||||||
# and this is not what we have right now
|
|
||||||
and kwargs[arg] != segment[arg]
|
|
||||||
):
|
|
||||||
update_kwargs[arg] = kwargs[arg]
|
|
||||||
|
|
||||||
if update_kwargs:
|
|
||||||
segment = self.conn.network.update_segment(
|
|
||||||
segment.id, **update_kwargs
|
|
||||||
)
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
segment = segment.to_dict(computed=False)
|
|
||||||
self.exit(changed=changed, network_segment=segment, id=segment['id'])
|
|
||||||
elif state == 'absent':
|
|
||||||
if not segment:
|
|
||||||
self.exit(changed=False)
|
|
||||||
else:
|
|
||||||
self.conn.network.delete_segment(segment['id'])
|
|
||||||
self.exit(changed=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = NetworkSegmentModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -65,12 +65,6 @@ options:
|
|||||||
- Required when creating or updating a RBAC policy rule, ignored when
|
- Required when creating or updating a RBAC policy rule, ignored when
|
||||||
deleting a policy.
|
deleting a policy.
|
||||||
type: str
|
type: str
|
||||||
target_all_project:
|
|
||||||
description:
|
|
||||||
- Whether all projects are targted for access.
|
|
||||||
- If this option set to true, C(target_project_id) is ignored.
|
|
||||||
type: bool
|
|
||||||
default: 'false'
|
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Whether the RBAC rule should be C(present) or C(absent).
|
- Whether the RBAC rule should be C(present) or C(absent).
|
||||||
@@ -151,8 +145,6 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
|
|||||||
|
|
||||||
|
|
||||||
class NeutronRBACPolicy(OpenStackModule):
|
class NeutronRBACPolicy(OpenStackModule):
|
||||||
all_project_symbol = '*'
|
|
||||||
|
|
||||||
argument_spec = dict(
|
argument_spec = dict(
|
||||||
action=dict(choices=['access_as_external', 'access_as_shared']),
|
action=dict(choices=['access_as_external', 'access_as_shared']),
|
||||||
id=dict(aliases=['policy_id']),
|
id=dict(aliases=['policy_id']),
|
||||||
@@ -161,22 +153,17 @@ class NeutronRBACPolicy(OpenStackModule):
|
|||||||
project_id=dict(),
|
project_id=dict(),
|
||||||
state=dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
target_project_id=dict(),
|
target_project_id=dict(),
|
||||||
target_all_project=dict(type='bool', default=False),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
module_kwargs = dict(
|
module_kwargs = dict(
|
||||||
required_if=[
|
required_if=[
|
||||||
('state', 'present', ('target_project_id', 'target_all_project',), True),
|
('state', 'present', ('target_project_id',)),
|
||||||
('state', 'absent', ('id',)),
|
('state', 'absent', ('id',)),
|
||||||
],
|
],
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
target_all_project = self.params.get('target_all_project')
|
|
||||||
if target_all_project:
|
|
||||||
self.params['target_project_id'] = self.all_project_symbol
|
|
||||||
|
|
||||||
state = self.params['state']
|
state = self.params['state']
|
||||||
|
|
||||||
policy = self._find()
|
policy = self._find()
|
||||||
@@ -275,7 +262,7 @@ class NeutronRBACPolicy(OpenStackModule):
|
|||||||
|
|
||||||
return [p for p in policies
|
return [p for p in policies
|
||||||
if any(p[k] == self.params[k]
|
if any(p[k] == self.params[k]
|
||||||
for k in ['object_id'])]
|
for k in ['object_id', 'target_project_id'])]
|
||||||
|
|
||||||
def _update(self, policy, update):
|
def _update(self, policy, update):
|
||||||
attributes = update.get('attributes')
|
attributes = update.get('attributes')
|
||||||
|
|||||||
@@ -295,11 +295,8 @@ class ObjectModule(OpenStackModule):
|
|||||||
for k in ['data', 'filename']
|
for k in ['data', 'filename']
|
||||||
if self.params[k] is not None)
|
if self.params[k] is not None)
|
||||||
|
|
||||||
object = self.conn.object_store.create_object(container_name, name,
|
return self.conn.object_store.create_object(container_name, name,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
if not object:
|
|
||||||
object = self._find()
|
|
||||||
return object
|
|
||||||
|
|
||||||
def _delete(self, object):
|
def _delete(self, object):
|
||||||
container_name = self.params['container']
|
container_name = self.params['container']
|
||||||
|
|||||||
@@ -269,7 +269,7 @@ class ContainerModule(OpenStackModule):
|
|||||||
if metadata is not None:
|
if metadata is not None:
|
||||||
# Swift metadata keys must be treated as case-insensitive
|
# Swift metadata keys must be treated as case-insensitive
|
||||||
old_metadata = dict((k.lower(), v)
|
old_metadata = dict((k.lower(), v)
|
||||||
for k, v in (container.metadata or {}).items())
|
for k, v in (container.metadata or {}))
|
||||||
new_metadata = dict((k, v) for k, v in metadata.items()
|
new_metadata = dict((k, v) for k, v in metadata.items()
|
||||||
if k.lower() not in old_metadata
|
if k.lower() not in old_metadata
|
||||||
or v != old_metadata[k.lower()])
|
or v != old_metadata[k.lower()])
|
||||||
|
|||||||
@@ -1,202 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2024 Catalyst Cloud Limited
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
|
||||||
---
|
|
||||||
module: object_containers_info
|
|
||||||
short_description: Fetch container info from the OpenStack Swift service.
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Fetch container info from the OpenStack Swift service.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name of the container
|
|
||||||
type: str
|
|
||||||
aliases: ["container"]
|
|
||||||
prefix:
|
|
||||||
description:
|
|
||||||
- Filter containers by prefix
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = r"""
|
|
||||||
- name: List all containers existing on the project
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
|
|
||||||
- name: Retrive a single container by name
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
name: test-container
|
|
||||||
|
|
||||||
- name: Retrieve and filter containers by prefix
|
|
||||||
openstack.cloud.object_containers_info:
|
|
||||||
prefix: test-
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = r"""
|
|
||||||
containers:
|
|
||||||
description: List of dictionaries describing matching containers.
|
|
||||||
returned: always
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
contains:
|
|
||||||
bytes:
|
|
||||||
description: The total number of bytes that are stored in Object Storage
|
|
||||||
for the container.
|
|
||||||
type: int
|
|
||||||
sample: 5449
|
|
||||||
bytes_used:
|
|
||||||
description: The count of bytes used in total.
|
|
||||||
type: int
|
|
||||||
sample: 5449
|
|
||||||
content_type:
|
|
||||||
description: The MIME type of the list of names.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
count:
|
|
||||||
description: The number of objects in the container.
|
|
||||||
type: int
|
|
||||||
sample: 1
|
|
||||||
history_location:
|
|
||||||
description: Enables versioning on the container.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
id:
|
|
||||||
description: The ID of the container. Equals I(name).
|
|
||||||
type: str
|
|
||||||
sample: "otc"
|
|
||||||
if_none_match:
|
|
||||||
description: "In combination with C(Expect: 100-Continue), specify an
|
|
||||||
C(If-None-Match: *) header to query whether the server
|
|
||||||
already has a copy of the object before any data is sent.
|
|
||||||
Only set when searching for a container by name."
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
is_content_type_detected:
|
|
||||||
description: If set to C(true), Object Storage guesses the content type
|
|
||||||
based on the file extension and ignores the value sent in
|
|
||||||
the Content-Type header, if present.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: bool
|
|
||||||
sample: null
|
|
||||||
is_newest:
|
|
||||||
description: If set to True, Object Storage queries all replicas to
|
|
||||||
return the most recent one. If you omit this header, Object
|
|
||||||
Storage responds faster after it finds one valid replica.
|
|
||||||
Because setting this header to True is more expensive for
|
|
||||||
the back end, use it only when it is absolutely needed.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: bool
|
|
||||||
sample: null
|
|
||||||
meta_temp_url_key:
|
|
||||||
description: The secret key value for temporary URLs. If not set,
|
|
||||||
this header is not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
meta_temp_url_key_2:
|
|
||||||
description: A second secret key value for temporary URLs. If not set,
|
|
||||||
this header is not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
name:
|
|
||||||
description: The name of the container.
|
|
||||||
type: str
|
|
||||||
sample: "otc"
|
|
||||||
object_count:
|
|
||||||
description: The number of objects.
|
|
||||||
type: int
|
|
||||||
sample: 1
|
|
||||||
read_ACL:
|
|
||||||
description: The ACL that grants read access. If not set, this header is
|
|
||||||
not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
storage_policy:
|
|
||||||
description: Storage policy used by the container. It is not possible to
|
|
||||||
change policy of an existing container.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
sync_key:
|
|
||||||
description: The secret key for container synchronization. If not set,
|
|
||||||
this header is not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
sync_to:
|
|
||||||
description: The destination for container synchronization. If not set,
|
|
||||||
this header is not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
timestamp:
|
|
||||||
description: The timestamp of the transaction.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
versions_location:
|
|
||||||
description: Enables versioning on this container. The value is the name
|
|
||||||
of another container. You must UTF-8-encode and then
|
|
||||||
URL-encode the name before you include it in the header. To
|
|
||||||
disable versioning, set the header to an empty string.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
write_ACL:
|
|
||||||
description: The ACL that grants write access. If not set, this header is
|
|
||||||
not returned by this operation.
|
|
||||||
Only fetched when searching for a container by name.
|
|
||||||
type: str
|
|
||||||
sample: null
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
|
||||||
|
|
||||||
|
|
||||||
class ObjectContainersInfoModule(OpenStackModule):
|
|
||||||
argument_spec = dict(
|
|
||||||
name=dict(aliases=["container"]),
|
|
||||||
prefix=dict(),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
if self.params["name"]:
|
|
||||||
containers = [
|
|
||||||
(
|
|
||||||
self.conn.object_store.get_container_metadata(
|
|
||||||
self.params["name"],
|
|
||||||
).to_dict(computed=False)
|
|
||||||
),
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
query = {}
|
|
||||||
if self.params["prefix"]:
|
|
||||||
query["prefix"] = self.params["prefix"]
|
|
||||||
containers = [
|
|
||||||
c.to_dict(computed=False)
|
|
||||||
for c in self.conn.object_store.containers(**query)
|
|
||||||
]
|
|
||||||
self.exit(changed=False, containers=containers)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = ObjectContainersInfoModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -511,7 +511,7 @@ class PortModule(OpenStackModule):
|
|||||||
**(dict(network_id=network.id) if network else dict()))
|
**(dict(network_id=network.id) if network else dict()))
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
if self.ansible.check_mode:
|
||||||
self.exit_json(changed=self._will_change(port, state))
|
self.exit_json(changed=self._will_change(network, port, state))
|
||||||
|
|
||||||
if state == 'present' and not port:
|
if state == 'present' and not port:
|
||||||
# create port
|
# create port
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ class IdentityProjectModule(OpenStackModule):
|
|||||||
raise ValueError('Duplicate key(s) in extra_specs: {0}'
|
raise ValueError('Duplicate key(s) in extra_specs: {0}'
|
||||||
.format(', '.join(list(duplicate_keys))))
|
.format(', '.join(list(duplicate_keys))))
|
||||||
for k, v in extra_specs.items():
|
for k, v in extra_specs.items():
|
||||||
if k not in project or v != project[k]:
|
if v != project[k]:
|
||||||
attributes[k] = v
|
attributes[k] = v
|
||||||
|
|
||||||
if attributes:
|
if attributes:
|
||||||
|
|||||||
@@ -38,9 +38,6 @@ options:
|
|||||||
groups:
|
groups:
|
||||||
description: Number of groups that are allowed for the project
|
description: Number of groups that are allowed for the project
|
||||||
type: int
|
type: int
|
||||||
health_monitors:
|
|
||||||
description: Maximum number of health monitors that can be created.
|
|
||||||
type: int
|
|
||||||
injected_file_content_bytes:
|
injected_file_content_bytes:
|
||||||
description:
|
description:
|
||||||
- Maximum file size in bytes.
|
- Maximum file size in bytes.
|
||||||
@@ -64,12 +61,6 @@ options:
|
|||||||
key_pairs:
|
key_pairs:
|
||||||
description: Number of key pairs to allow.
|
description: Number of key pairs to allow.
|
||||||
type: int
|
type: int
|
||||||
l7_policies:
|
|
||||||
description: The maximum amount of L7 policies you can create.
|
|
||||||
type: int
|
|
||||||
listeners:
|
|
||||||
description: The maximum number of listeners you can create.
|
|
||||||
type: int
|
|
||||||
load_balancers:
|
load_balancers:
|
||||||
description: The maximum amount of load balancers you can create
|
description: The maximum amount of load balancers you can create
|
||||||
type: int
|
type: int
|
||||||
@@ -77,9 +68,6 @@ options:
|
|||||||
metadata_items:
|
metadata_items:
|
||||||
description: Number of metadata items allowed per instance.
|
description: Number of metadata items allowed per instance.
|
||||||
type: int
|
type: int
|
||||||
members:
|
|
||||||
description: Number of members allowed for loadbalancer.
|
|
||||||
type: int
|
|
||||||
name:
|
name:
|
||||||
description: Name of the OpenStack Project to manage.
|
description: Name of the OpenStack Project to manage.
|
||||||
required: true
|
required: true
|
||||||
@@ -239,33 +227,6 @@ quotas:
|
|||||||
server_groups:
|
server_groups:
|
||||||
description: Number of server groups to allow.
|
description: Number of server groups to allow.
|
||||||
type: int
|
type: int
|
||||||
load_balancer:
|
|
||||||
description: Load_balancer service quotas
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
health_monitors:
|
|
||||||
description: Maximum number of health monitors that can be
|
|
||||||
created.
|
|
||||||
type: int
|
|
||||||
l7_policies:
|
|
||||||
description: The maximum amount of L7 policies you can
|
|
||||||
create.
|
|
||||||
type: int
|
|
||||||
listeners:
|
|
||||||
description: The maximum number of listeners you can create
|
|
||||||
type: int
|
|
||||||
load_balancers:
|
|
||||||
description: The maximum amount of load balancers one can
|
|
||||||
create
|
|
||||||
type: int
|
|
||||||
members:
|
|
||||||
description: The maximum amount of members for
|
|
||||||
loadbalancer.
|
|
||||||
type: int
|
|
||||||
pools:
|
|
||||||
description: The maximum amount of pools one can create.
|
|
||||||
type: int
|
|
||||||
|
|
||||||
network:
|
network:
|
||||||
description: Network service quotas
|
description: Network service quotas
|
||||||
type: dict
|
type: dict
|
||||||
@@ -273,9 +234,16 @@ quotas:
|
|||||||
floating_ips:
|
floating_ips:
|
||||||
description: Number of floating IP's to allow.
|
description: Number of floating IP's to allow.
|
||||||
type: int
|
type: int
|
||||||
|
load_balancers:
|
||||||
|
description: The maximum amount of load balancers one can
|
||||||
|
create
|
||||||
|
type: int
|
||||||
networks:
|
networks:
|
||||||
description: Number of networks to allow.
|
description: Number of networks to allow.
|
||||||
type: int
|
type: int
|
||||||
|
pools:
|
||||||
|
description: The maximum amount of pools one can create.
|
||||||
|
type: int
|
||||||
ports:
|
ports:
|
||||||
description: Number of Network ports to allow, this needs
|
description: Number of Network ports to allow, this needs
|
||||||
to be greater than the instances limit.
|
to be greater than the instances limit.
|
||||||
@@ -344,7 +312,9 @@ quotas:
|
|||||||
server_groups: 10,
|
server_groups: 10,
|
||||||
network:
|
network:
|
||||||
floating_ips: 50,
|
floating_ips: 50,
|
||||||
|
load_balancers: 10,
|
||||||
networks: 10,
|
networks: 10,
|
||||||
|
pools: 10,
|
||||||
ports: 160,
|
ports: 160,
|
||||||
rbac_policies: 10,
|
rbac_policies: 10,
|
||||||
routers: 10,
|
routers: 10,
|
||||||
@@ -360,13 +330,6 @@ quotas:
|
|||||||
per_volume_gigabytes: -1,
|
per_volume_gigabytes: -1,
|
||||||
snapshots: 10,
|
snapshots: 10,
|
||||||
volumes: 10,
|
volumes: 10,
|
||||||
load_balancer:
|
|
||||||
health_monitors: 10,
|
|
||||||
load_balancers: 10,
|
|
||||||
l7_policies: 10,
|
|
||||||
listeners: 10,
|
|
||||||
pools: 5,
|
|
||||||
members: 5,
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
||||||
@@ -374,8 +337,9 @@ from collections import defaultdict
|
|||||||
|
|
||||||
|
|
||||||
class QuotaModule(OpenStackModule):
|
class QuotaModule(OpenStackModule):
|
||||||
# TODO: Add missing network quota options 'check_limit'
|
# TODO: Add missing network quota options 'check_limit', 'health_monitors',
|
||||||
# to argument_spec, DOCUMENTATION and RETURN docstrings
|
# 'l7_policies', 'listeners' to argument_spec, DOCUMENTATION and
|
||||||
|
# RETURN docstrings
|
||||||
argument_spec = dict(
|
argument_spec = dict(
|
||||||
backup_gigabytes=dict(type='int'),
|
backup_gigabytes=dict(type='int'),
|
||||||
backups=dict(type='int'),
|
backups=dict(type='int'),
|
||||||
@@ -386,7 +350,6 @@ class QuotaModule(OpenStackModule):
|
|||||||
'network_floating_ips']),
|
'network_floating_ips']),
|
||||||
gigabytes=dict(type='int'),
|
gigabytes=dict(type='int'),
|
||||||
groups=dict(type='int'),
|
groups=dict(type='int'),
|
||||||
health_monitors=dict(type='int'),
|
|
||||||
injected_file_content_bytes=dict(type='int',
|
injected_file_content_bytes=dict(type='int',
|
||||||
aliases=['injected_file_size']),
|
aliases=['injected_file_size']),
|
||||||
injected_file_path_bytes=dict(type='int',
|
injected_file_path_bytes=dict(type='int',
|
||||||
@@ -394,11 +357,8 @@ class QuotaModule(OpenStackModule):
|
|||||||
injected_files=dict(type='int'),
|
injected_files=dict(type='int'),
|
||||||
instances=dict(type='int'),
|
instances=dict(type='int'),
|
||||||
key_pairs=dict(type='int', no_log=False),
|
key_pairs=dict(type='int', no_log=False),
|
||||||
l7_policies=dict(type='int'),
|
|
||||||
listeners=dict(type='int'),
|
|
||||||
load_balancers=dict(type='int', aliases=['loadbalancer']),
|
load_balancers=dict(type='int', aliases=['loadbalancer']),
|
||||||
metadata_items=dict(type='int'),
|
metadata_items=dict(type='int'),
|
||||||
members=dict(type='int'),
|
|
||||||
name=dict(required=True),
|
name=dict(required=True),
|
||||||
networks=dict(type='int', aliases=['network']),
|
networks=dict(type='int', aliases=['network']),
|
||||||
per_volume_gigabytes=dict(type='int'),
|
per_volume_gigabytes=dict(type='int'),
|
||||||
@@ -422,9 +382,9 @@ class QuotaModule(OpenStackModule):
|
|||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Some attributes in quota resources don't exist in the api anymore, e.g.
|
# Some attributes in quota resources don't exist in the api anymore, mostly
|
||||||
# compute quotas that were simply network proxies, and pre-Octavia network
|
# compute quotas that were simply network proxies. This map allows marking
|
||||||
# quotas. This map allows marking them to be skipped.
|
# them to be skipped.
|
||||||
exclusion_map = {
|
exclusion_map = {
|
||||||
'compute': {
|
'compute': {
|
||||||
# 'fixed_ips', # Available until Nova API version 2.35
|
# 'fixed_ips', # Available until Nova API version 2.35
|
||||||
@@ -437,39 +397,24 @@ class QuotaModule(OpenStackModule):
|
|||||||
# 'injected_file_path_bytes', # Nova API
|
# 'injected_file_path_bytes', # Nova API
|
||||||
# 'injected_files', # version 2.56
|
# 'injected_files', # version 2.56
|
||||||
},
|
},
|
||||||
'load_balancer': {'name'},
|
'network': {'name'},
|
||||||
'network': {
|
|
||||||
'name',
|
|
||||||
'l7_policies',
|
|
||||||
'load_balancers',
|
|
||||||
'loadbalancer',
|
|
||||||
'health_monitors',
|
|
||||||
'pools',
|
|
||||||
'listeners',
|
|
||||||
},
|
|
||||||
'volume': {'name'},
|
'volume': {'name'},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_quotas(self, project):
|
def _get_quotas(self, project):
|
||||||
quota = {}
|
quota = {}
|
||||||
if self.conn.has_service('block-storage'):
|
if self.conn.has_service('block-storage'):
|
||||||
quota['volume'] = self.conn.block_storage.get_quota_set(project.id)
|
quota['volume'] = self.conn.block_storage.get_quota_set(project)
|
||||||
else:
|
else:
|
||||||
self.warn('Block storage service aka volume service is not'
|
self.warn('Block storage service aka volume service is not'
|
||||||
' supported by your cloud. Ignoring volume quotas.')
|
' supported by your cloud. Ignoring volume quotas.')
|
||||||
|
|
||||||
if self.conn.has_service('load-balancer'):
|
|
||||||
quota['load_balancer'] = self.conn.load_balancer.get_quota(
|
|
||||||
project.id)
|
|
||||||
else:
|
|
||||||
self.warn('Loadbalancer service is not supported by your'
|
|
||||||
' cloud. Ignoring loadbalancer quotas.')
|
|
||||||
|
|
||||||
if self.conn.has_service('network'):
|
if self.conn.has_service('network'):
|
||||||
quota['network'] = self.conn.network.get_quota(project.id)
|
quota['network'] = self.conn.network.get_quota(project.id)
|
||||||
else:
|
else:
|
||||||
self.warn('Network service is not supported by your cloud.'
|
self.warn('Network service is not supported by your cloud.'
|
||||||
' Ignoring network quotas.')
|
' Ignoring network quotas.')
|
||||||
|
|
||||||
quota['compute'] = self.conn.compute.get_quota_set(project.id)
|
quota['compute'] = self.conn.compute.get_quota_set(project.id)
|
||||||
|
|
||||||
return quota
|
return quota
|
||||||
@@ -507,6 +452,7 @@ class QuotaModule(OpenStackModule):
|
|||||||
|
|
||||||
# Get current quota values
|
# Get current quota values
|
||||||
quotas = self._get_quotas(project)
|
quotas = self._get_quotas(project)
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
if self.ansible.check_mode:
|
||||||
@@ -522,8 +468,6 @@ class QuotaModule(OpenStackModule):
|
|||||||
self.conn.network.delete_quota(project.id)
|
self.conn.network.delete_quota(project.id)
|
||||||
if 'volume' in quotas:
|
if 'volume' in quotas:
|
||||||
self.conn.block_storage.revert_quota_set(project)
|
self.conn.block_storage.revert_quota_set(project)
|
||||||
if 'load_balancer' in quotas:
|
|
||||||
self.conn.load_balancer.delete_quota(project.id)
|
|
||||||
|
|
||||||
# Necessary since we can't tell what the default quotas are
|
# Necessary since we can't tell what the default quotas are
|
||||||
quotas = self._get_quotas(project)
|
quotas = self._get_quotas(project)
|
||||||
@@ -533,18 +477,14 @@ class QuotaModule(OpenStackModule):
|
|||||||
|
|
||||||
if changes:
|
if changes:
|
||||||
if 'volume' in changes:
|
if 'volume' in changes:
|
||||||
quotas['volume'] = self.conn.block_storage.update_quota_set(
|
self.conn.block_storage.update_quota_set(
|
||||||
project.id, **changes['volume'])
|
quotas['volume'], **changes['volume'])
|
||||||
if 'compute' in changes:
|
if 'compute' in changes:
|
||||||
quotas['compute'] = self.conn.compute.update_quota_set(
|
self.conn.compute.update_quota_set(
|
||||||
project.id, **changes['compute'])
|
quotas['compute'], **changes['compute'])
|
||||||
if 'network' in changes:
|
if 'network' in changes:
|
||||||
quotas['network'] = self.conn.network.update_quota(
|
quotas['network'] = self.conn.network.update_quota(
|
||||||
project.id, **changes['network'])
|
project.id, **changes['network'])
|
||||||
if 'load_balancer' in changes:
|
|
||||||
quotas['load_balancer'] = \
|
|
||||||
self.conn.load_balancer.update_quota(
|
|
||||||
project.id, **changes['load_balancer'])
|
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
quotas = {k: v.to_dict(computed=False) for k, v in quotas.items()}
|
quotas = {k: v.to_dict(computed=False) for k, v in quotas.items()}
|
||||||
|
|||||||
@@ -239,11 +239,7 @@ class DnsRecordsetModule(OpenStackModule):
|
|||||||
elif self._needs_update(kwargs, recordset):
|
elif self._needs_update(kwargs, recordset):
|
||||||
recordset = self.conn.dns.update_recordset(recordset, **kwargs)
|
recordset = self.conn.dns.update_recordset(recordset, **kwargs)
|
||||||
changed = True
|
changed = True
|
||||||
# NOTE(gtema): this is a workaround to temporarily bring the
|
self.exit_json(changed=changed, recordset=recordset)
|
||||||
# zone_id param back which may not me populated by SDK
|
|
||||||
rs = recordset.to_dict(computed=False)
|
|
||||||
rs["zone_id"] = zone.id
|
|
||||||
self.exit_json(changed=changed, recordset=rs)
|
|
||||||
elif state == 'absent' and recordset is not None:
|
elif state == 'absent' and recordset is not None:
|
||||||
self.conn.dns.delete_recordset(recordset)
|
self.conn.dns.delete_recordset(recordset)
|
||||||
changed = True
|
changed = True
|
||||||
|
|||||||
@@ -19,9 +19,7 @@ options:
|
|||||||
- Valid only with keystone version 3.
|
- Valid only with keystone version 3.
|
||||||
- Required if I(project) is not specified.
|
- Required if I(project) is not specified.
|
||||||
- When I(project) is specified, then I(domain) will not be used for
|
- When I(project) is specified, then I(domain) will not be used for
|
||||||
scoping the role association, only for finding resources. Deprecated
|
scoping the role association, only for finding resources.
|
||||||
for finding resources, please use I(group_domain), I(project_domain),
|
|
||||||
I(role_domain), or I(user_domain).
|
|
||||||
- "When scoping the role association, I(project) has precedence over
|
- "When scoping the role association, I(project) has precedence over
|
||||||
I(domain) and I(domain) has precedence over I(system): When I(project)
|
I(domain) and I(domain) has precedence over I(system): When I(project)
|
||||||
is specified, then I(domain) and I(system) are not used for role
|
is specified, then I(domain) and I(system) are not used for role
|
||||||
@@ -34,45 +32,24 @@ options:
|
|||||||
- Valid only with keystone version 3.
|
- Valid only with keystone version 3.
|
||||||
- If I(group) is not specified, then I(user) is required. Both may not be
|
- If I(group) is not specified, then I(user) is required. Both may not be
|
||||||
specified at the same time.
|
specified at the same time.
|
||||||
- You can supply I(group_domain) or the deprecated usage of I(domain) to
|
|
||||||
find group resources.
|
|
||||||
type: str
|
|
||||||
group_domain:
|
|
||||||
description:
|
|
||||||
- Name or ID for the domain.
|
|
||||||
- Valid only with keystone version 3.
|
|
||||||
- Only valid for finding group resources.
|
|
||||||
type: str
|
type: str
|
||||||
project:
|
project:
|
||||||
description:
|
description:
|
||||||
- Name or ID of the project to scope the role association to.
|
- Name or ID of the project to scope the role association to.
|
||||||
- If you are using keystone version 2, then this value is required.
|
- If you are using keystone version 2, then this value is required.
|
||||||
- When I(project) is specified, then I(domain) will not be used for
|
- When I(project) is specified, then I(domain) will not be used for
|
||||||
scoping the role association, only for finding resources. Prefer
|
scoping the role association, only for finding resources.
|
||||||
I(group_domain) over I(domain).
|
|
||||||
- "When scoping the role association, I(project) has precedence over
|
- "When scoping the role association, I(project) has precedence over
|
||||||
I(domain) and I(domain) has precedence over I(system): When I(project)
|
I(domain) and I(domain) has precedence over I(system): When I(project)
|
||||||
is specified, then I(domain) and I(system) are not used for role
|
is specified, then I(domain) and I(system) are not used for role
|
||||||
association. When I(domain) is specified, then I(system) will not be
|
association. When I(domain) is specified, then I(system) will not be
|
||||||
used for role association."
|
used for role association."
|
||||||
type: str
|
type: str
|
||||||
project_domain:
|
|
||||||
description:
|
|
||||||
- Name or ID for the domain.
|
|
||||||
- Valid only with keystone version 3.
|
|
||||||
- Only valid for finding project resources.
|
|
||||||
type: str
|
|
||||||
role:
|
role:
|
||||||
description:
|
description:
|
||||||
- Name or ID for the role.
|
- Name or ID for the role.
|
||||||
required: true
|
required: true
|
||||||
type: str
|
type: str
|
||||||
role_domain:
|
|
||||||
description:
|
|
||||||
- Name or ID for the domain.
|
|
||||||
- Valid only with keystone version 3.
|
|
||||||
- Only valid for finding role resources.
|
|
||||||
type: str
|
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Should the roles be present or absent on the user.
|
- Should the roles be present or absent on the user.
|
||||||
@@ -96,12 +73,6 @@ options:
|
|||||||
- If I(user) is not specified, then I(group) is required. Both may not be
|
- If I(user) is not specified, then I(group) is required. Both may not be
|
||||||
specified at the same time.
|
specified at the same time.
|
||||||
type: str
|
type: str
|
||||||
user_domain:
|
|
||||||
description:
|
|
||||||
- Name or ID for the domain.
|
|
||||||
- Valid only with keystone version 3.
|
|
||||||
- Only valid for finding user resources.
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- openstack.cloud.openstack
|
- openstack.cloud.openstack
|
||||||
'''
|
'''
|
||||||
@@ -130,15 +101,11 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
|||||||
argument_spec = dict(
|
argument_spec = dict(
|
||||||
domain=dict(),
|
domain=dict(),
|
||||||
group=dict(),
|
group=dict(),
|
||||||
group_domain=dict(type='str'),
|
|
||||||
project=dict(),
|
project=dict(),
|
||||||
project_domain=dict(type='str'),
|
|
||||||
role=dict(required=True),
|
role=dict(required=True),
|
||||||
role_domain=dict(type='str'),
|
|
||||||
state=dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
system=dict(),
|
system=dict(),
|
||||||
user=dict(),
|
user=dict(),
|
||||||
user_domain=dict(type='str'),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
module_kwargs = dict(
|
module_kwargs = dict(
|
||||||
@@ -146,33 +113,17 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
|||||||
('user', 'group'),
|
('user', 'group'),
|
||||||
('domain', 'project', 'system'),
|
('domain', 'project', 'system'),
|
||||||
],
|
],
|
||||||
mutually_exclusive=[
|
|
||||||
('user', 'group'),
|
|
||||||
('project', 'system'), # domain should be part of this
|
|
||||||
],
|
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def _find_domain_id(self, domain):
|
|
||||||
if domain is not None:
|
|
||||||
domain = self.conn.identity.find_domain(domain,
|
|
||||||
ignore_missing=False)
|
|
||||||
return dict(domain_id=domain['id'])
|
|
||||||
return dict()
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
filters = {}
|
filters = {}
|
||||||
group_find_filters = {}
|
find_filters = {}
|
||||||
project_find_filters = {}
|
kwargs = {}
|
||||||
role_find_filters = {}
|
|
||||||
user_find_filters = {}
|
|
||||||
|
|
||||||
role_find_filters.update(self._find_domain_id(
|
|
||||||
self.params['role_domain']))
|
|
||||||
role_name_or_id = self.params['role']
|
role_name_or_id = self.params['role']
|
||||||
role = self.conn.identity.find_role(role_name_or_id,
|
role = self.conn.identity.find_role(role_name_or_id,
|
||||||
ignore_missing=False,
|
ignore_missing=False)
|
||||||
**role_find_filters)
|
|
||||||
filters['role_id'] = role['id']
|
filters['role_id'] = role['id']
|
||||||
|
|
||||||
domain_name_or_id = self.params['domain']
|
domain_name_or_id = self.params['domain']
|
||||||
@@ -180,31 +131,22 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
|||||||
domain = self.conn.identity.find_domain(
|
domain = self.conn.identity.find_domain(
|
||||||
domain_name_or_id, ignore_missing=False)
|
domain_name_or_id, ignore_missing=False)
|
||||||
filters['scope_domain_id'] = domain['id']
|
filters['scope_domain_id'] = domain['id']
|
||||||
group_find_filters['domain_id'] = domain['id']
|
find_filters['domain_id'] = domain['id']
|
||||||
project_find_filters['domain_id'] = domain['id']
|
kwargs['domain'] = domain['id']
|
||||||
user_find_filters['domain_id'] = domain['id']
|
|
||||||
|
|
||||||
user_name_or_id = self.params['user']
|
user_name_or_id = self.params['user']
|
||||||
if user_name_or_id is not None:
|
if user_name_or_id is not None:
|
||||||
user_find_filters.update(self._find_domain_id(
|
|
||||||
self.params['user_domain']))
|
|
||||||
user = self.conn.identity.find_user(
|
user = self.conn.identity.find_user(
|
||||||
user_name_or_id, ignore_missing=False,
|
user_name_or_id, ignore_missing=False, **find_filters)
|
||||||
**user_find_filters)
|
|
||||||
filters['user_id'] = user['id']
|
filters['user_id'] = user['id']
|
||||||
else:
|
kwargs['user'] = user['id']
|
||||||
user = None
|
|
||||||
|
|
||||||
group_name_or_id = self.params['group']
|
group_name_or_id = self.params['group']
|
||||||
if group_name_or_id is not None:
|
if group_name_or_id is not None:
|
||||||
group_find_filters.update(self._find_domain_id(
|
|
||||||
self.params['group_domain']))
|
|
||||||
group = self.conn.identity.find_group(
|
group = self.conn.identity.find_group(
|
||||||
group_name_or_id, ignore_missing=False,
|
group_name_or_id, ignore_missing=False, **find_filters)
|
||||||
**group_find_filters)
|
|
||||||
filters['group_id'] = group['id']
|
filters['group_id'] = group['id']
|
||||||
else:
|
kwargs['group'] = group['id']
|
||||||
group = None
|
|
||||||
|
|
||||||
system_name = self.params['system']
|
system_name = self.params['system']
|
||||||
if system_name is not None:
|
if system_name is not None:
|
||||||
@@ -212,14 +154,14 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
|||||||
if 'scope_domain_id' not in filters:
|
if 'scope_domain_id' not in filters:
|
||||||
filters['scope.system'] = system_name
|
filters['scope.system'] = system_name
|
||||||
|
|
||||||
|
kwargs['system'] = system_name
|
||||||
|
|
||||||
project_name_or_id = self.params['project']
|
project_name_or_id = self.params['project']
|
||||||
if project_name_or_id is not None:
|
if project_name_or_id is not None:
|
||||||
project_find_filters.update(self._find_domain_id(
|
|
||||||
self.params['project_domain']))
|
|
||||||
project = self.conn.identity.find_project(
|
project = self.conn.identity.find_project(
|
||||||
project_name_or_id, ignore_missing=False,
|
project_name_or_id, ignore_missing=False, **find_filters)
|
||||||
**project_find_filters)
|
|
||||||
filters['scope_project_id'] = project['id']
|
filters['scope_project_id'] = project['id']
|
||||||
|
kwargs['project'] = project['id']
|
||||||
|
|
||||||
# project has precedence over domain and system
|
# project has precedence over domain and system
|
||||||
filters.pop('scope_domain_id', None)
|
filters.pop('scope_domain_id', None)
|
||||||
@@ -234,50 +176,10 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
|||||||
or (state == 'absent' and role_assignments)))
|
or (state == 'absent' and role_assignments)))
|
||||||
|
|
||||||
if state == 'present' and not role_assignments:
|
if state == 'present' and not role_assignments:
|
||||||
if 'scope_domain_id' in filters:
|
self.conn.grant_role(role['id'], **kwargs)
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.assign_domain_role_to_user(
|
|
||||||
filters['scope_domain_id'], user, role)
|
|
||||||
else:
|
|
||||||
self.conn.identity.assign_domain_role_to_group(
|
|
||||||
filters['scope_domain_id'], group, role)
|
|
||||||
elif 'scope_project_id' in filters:
|
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.assign_project_role_to_user(
|
|
||||||
filters['scope_project_id'], user, role)
|
|
||||||
else:
|
|
||||||
self.conn.identity.assign_project_role_to_group(
|
|
||||||
filters['scope_project_id'], group, role)
|
|
||||||
elif 'scope.system' in filters:
|
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.assign_system_role_to_user(
|
|
||||||
user, role, filters['scope.system'])
|
|
||||||
else:
|
|
||||||
self.conn.identity.assign_system_role_to_group(
|
|
||||||
group, role, filters['scope.system'])
|
|
||||||
self.exit_json(changed=True)
|
self.exit_json(changed=True)
|
||||||
elif state == 'absent' and role_assignments:
|
elif state == 'absent' and role_assignments:
|
||||||
if 'scope_domain_id' in filters:
|
self.conn.revoke_role(role['id'], **kwargs)
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.unassign_domain_role_from_user(
|
|
||||||
filters['scope_domain_id'], user, role)
|
|
||||||
else:
|
|
||||||
self.conn.identity.unassign_domain_role_from_group(
|
|
||||||
filters['scope_domain_id'], group, role)
|
|
||||||
elif 'scope_project_id' in filters:
|
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.unassign_project_role_from_user(
|
|
||||||
filters['scope_project_id'], user, role)
|
|
||||||
else:
|
|
||||||
self.conn.identity.unassign_project_role_from_group(
|
|
||||||
filters['scope_project_id'], group, role)
|
|
||||||
elif 'scope.system' in filters:
|
|
||||||
if user is not None:
|
|
||||||
self.conn.identity.unassign_system_role_from_user(
|
|
||||||
user, role, filters['scope.system'])
|
|
||||||
else:
|
|
||||||
self.conn.identity.unassign_system_role_from_group(
|
|
||||||
group, role, filters['scope.system'])
|
|
||||||
self.exit_json(changed=True)
|
self.exit_json(changed=True)
|
||||||
else:
|
else:
|
||||||
self.exit_json(changed=False)
|
self.exit_json(changed=False)
|
||||||
|
|||||||
@@ -372,10 +372,6 @@ class RouterModule(OpenStackModule):
|
|||||||
for p in external_fixed_ips:
|
for p in external_fixed_ips:
|
||||||
if 'ip_address' in p:
|
if 'ip_address' in p:
|
||||||
req_fip_map[p['subnet_id']].add(p['ip_address'])
|
req_fip_map[p['subnet_id']].add(p['ip_address'])
|
||||||
elif p['subnet_id'] in cur_fip_map:
|
|
||||||
# handle idempotence of updating with no explicit ip
|
|
||||||
req_fip_map[p['subnet_id']].update(
|
|
||||||
cur_fip_map[p['subnet_id']])
|
|
||||||
|
|
||||||
# Check if external ip addresses need to be added
|
# Check if external ip addresses need to be added
|
||||||
for fip in external_fixed_ips:
|
for fip in external_fixed_ips:
|
||||||
@@ -468,7 +464,7 @@ class RouterModule(OpenStackModule):
|
|||||||
subnet = self.conn.network.find_subnet(
|
subnet = self.conn.network.find_subnet(
|
||||||
iface['subnet_id'], ignore_missing=False, **filters)
|
iface['subnet_id'], ignore_missing=False, **filters)
|
||||||
fip = dict(subnet_id=subnet.id)
|
fip = dict(subnet_id=subnet.id)
|
||||||
if iface.get('ip_address', None) is not None:
|
if 'ip_address' in iface:
|
||||||
fip['ip_address'] = iface['ip_address']
|
fip['ip_address'] = iface['ip_address']
|
||||||
external_fixed_ips.append(fip)
|
external_fixed_ips.append(fip)
|
||||||
|
|
||||||
@@ -620,13 +616,9 @@ class RouterModule(OpenStackModule):
|
|||||||
router = self.conn.network.find_router(name, **query_filters)
|
router = self.conn.network.find_router(name, **query_filters)
|
||||||
network = None
|
network = None
|
||||||
if network_name_or_id:
|
if network_name_or_id:
|
||||||
# First try to find a network in the specified project.
|
|
||||||
network = self.conn.network.find_network(network_name_or_id,
|
network = self.conn.network.find_network(network_name_or_id,
|
||||||
|
ignore_missing=False,
|
||||||
**query_filters)
|
**query_filters)
|
||||||
if not network:
|
|
||||||
# Fall back to a global search for the network.
|
|
||||||
network = self.conn.network.find_network(network_name_or_id,
|
|
||||||
ignore_missing=False)
|
|
||||||
|
|
||||||
# Validate and cache the subnet IDs so we can avoid duplicate checks
|
# Validate and cache the subnet IDs so we can avoid duplicate checks
|
||||||
# and expensive API calls.
|
# and expensive API calls.
|
||||||
|
|||||||
@@ -205,12 +205,6 @@ options:
|
|||||||
choices: [present, absent]
|
choices: [present, absent]
|
||||||
default: present
|
default: present
|
||||||
type: str
|
type: str
|
||||||
tags:
|
|
||||||
description:
|
|
||||||
- A list of tags should be added to instance
|
|
||||||
type: list
|
|
||||||
elements: str
|
|
||||||
default: []
|
|
||||||
terminate_volume:
|
terminate_volume:
|
||||||
description:
|
description:
|
||||||
- If C(true), delete volume when deleting the instance and if it has
|
- If C(true), delete volume when deleting the instance and if it has
|
||||||
@@ -762,7 +756,6 @@ server:
|
|||||||
description: A list of associated tags.
|
description: A list of associated tags.
|
||||||
returned: success
|
returned: success
|
||||||
type: list
|
type: list
|
||||||
elements: str
|
|
||||||
task_state:
|
task_state:
|
||||||
description: The task state of this server.
|
description: The task state of this server.
|
||||||
returned: success
|
returned: success
|
||||||
@@ -832,7 +825,6 @@ class ServerModule(OpenStackModule):
|
|||||||
scheduler_hints=dict(type='dict'),
|
scheduler_hints=dict(type='dict'),
|
||||||
security_groups=dict(default=[], type='list', elements='str'),
|
security_groups=dict(default=[], type='list', elements='str'),
|
||||||
state=dict(default='present', choices=['absent', 'present']),
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
tags=dict(type='list', default=[], elements='str'),
|
|
||||||
terminate_volume=dict(default=False, type='bool'),
|
terminate_volume=dict(default=False, type='bool'),
|
||||||
userdata=dict(),
|
userdata=dict(),
|
||||||
volume_size=dict(type='int'),
|
volume_size=dict(type='int'),
|
||||||
@@ -898,8 +890,7 @@ class ServerModule(OpenStackModule):
|
|||||||
return {
|
return {
|
||||||
**self._build_update_ips(server),
|
**self._build_update_ips(server),
|
||||||
**self._build_update_security_groups(server),
|
**self._build_update_security_groups(server),
|
||||||
**self._build_update_server(server),
|
**self._build_update_server(server)}
|
||||||
**self._build_update_tags(server)}
|
|
||||||
|
|
||||||
def _build_update_ips(self, server):
|
def _build_update_ips(self, server):
|
||||||
auto_ip = self.params['auto_ip']
|
auto_ip = self.params['auto_ip']
|
||||||
@@ -1039,16 +1030,9 @@ class ServerModule(OpenStackModule):
|
|||||||
|
|
||||||
return update
|
return update
|
||||||
|
|
||||||
def _build_update_tags(self, server):
|
|
||||||
required_tags = self.params.get('tags')
|
|
||||||
if set(server["tags"]) == set(required_tags):
|
|
||||||
return {}
|
|
||||||
update = dict(tags=required_tags)
|
|
||||||
return update
|
|
||||||
|
|
||||||
def _create(self):
|
def _create(self):
|
||||||
for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
|
for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
|
||||||
if self.params[k] \
|
if self.params[k] is not None \
|
||||||
and self.params['wait'] is False:
|
and self.params['wait'] is False:
|
||||||
# floating ip addresses will only be added if
|
# floating ip addresses will only be added if
|
||||||
# we wait until the server has been created
|
# we wait until the server has been created
|
||||||
@@ -1088,7 +1072,7 @@ class ServerModule(OpenStackModule):
|
|||||||
for k in ['auto_ip', 'availability_zone', 'boot_from_volume',
|
for k in ['auto_ip', 'availability_zone', 'boot_from_volume',
|
||||||
'boot_volume', 'config_drive', 'description', 'key_name',
|
'boot_volume', 'config_drive', 'description', 'key_name',
|
||||||
'name', 'network', 'reuse_ips', 'scheduler_hints',
|
'name', 'network', 'reuse_ips', 'scheduler_hints',
|
||||||
'security_groups', 'tags', 'terminate_volume', 'timeout',
|
'security_groups', 'terminate_volume', 'timeout',
|
||||||
'userdata', 'volume_size', 'volumes', 'wait']:
|
'userdata', 'volume_size', 'volumes', 'wait']:
|
||||||
if self.params[k] is not None:
|
if self.params[k] is not None:
|
||||||
args[k] = self.params[k]
|
args[k] = self.params[k]
|
||||||
@@ -1107,20 +1091,10 @@ class ServerModule(OpenStackModule):
|
|||||||
server.id,
|
server.id,
|
||||||
**dict((k, self.params[k])
|
**dict((k, self.params[k])
|
||||||
for k in ['wait', 'timeout', 'delete_ips']))
|
for k in ['wait', 'timeout', 'delete_ips']))
|
||||||
# Nova returns server for some time with the "DELETED" state. Our tests
|
|
||||||
# are not able to handle this, so wait for server to really disappear.
|
|
||||||
if self.params['wait']:
|
|
||||||
for count in self.sdk.utils.iterate_timeout(
|
|
||||||
timeout=self.params['timeout'],
|
|
||||||
message="Timeout waiting for server to be absent"
|
|
||||||
):
|
|
||||||
if self.conn.compute.find_server(server.id) is None:
|
|
||||||
break
|
|
||||||
|
|
||||||
def _update(self, server, update):
|
def _update(self, server, update):
|
||||||
server = self._update_ips(server, update)
|
server = self._update_ips(server, update)
|
||||||
server = self._update_security_groups(server, update)
|
server = self._update_security_groups(server, update)
|
||||||
server = self._update_tags(server, update)
|
|
||||||
server = self._update_server(server, update)
|
server = self._update_server(server, update)
|
||||||
# Refresh server attributes after security groups etc. have changed
|
# Refresh server attributes after security groups etc. have changed
|
||||||
#
|
#
|
||||||
@@ -1193,16 +1167,6 @@ class ServerModule(OpenStackModule):
|
|||||||
# be postponed until all updates have been applied.
|
# be postponed until all updates have been applied.
|
||||||
return server
|
return server
|
||||||
|
|
||||||
def _update_tags(self, server, update):
|
|
||||||
tags = update.get('tags')
|
|
||||||
|
|
||||||
self.conn.compute.put(
|
|
||||||
"/servers/{server_id}/tags".format(server_id=server['id']),
|
|
||||||
json={"tags": tags},
|
|
||||||
microversion="2.26"
|
|
||||||
)
|
|
||||||
return server
|
|
||||||
|
|
||||||
def _parse_metadata(self, metadata):
|
def _parse_metadata(self, metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
return {}
|
return {}
|
||||||
|
|||||||
@@ -136,9 +136,6 @@ class ServerActionModule(OpenStackModule):
|
|||||||
# rebuild does not depend on state
|
# rebuild does not depend on state
|
||||||
will_change = (
|
will_change = (
|
||||||
(action == 'rebuild')
|
(action == 'rebuild')
|
||||||
# `reboot_*` actions do not change state, servers remain `ACTIVE`
|
|
||||||
or (action == 'reboot_hard')
|
|
||||||
or (action == 'reboot_soft')
|
|
||||||
or (action == 'lock' and not server['is_locked'])
|
or (action == 'lock' and not server['is_locked'])
|
||||||
or (action == 'unlock' and server['is_locked'])
|
or (action == 'unlock' and server['is_locked'])
|
||||||
or server.status.lower() not in [a.lower()
|
or server.status.lower() not in [a.lower()
|
||||||
|
|||||||
@@ -377,9 +377,7 @@ class ServerInfoModule(OpenStackModule):
|
|||||||
kwargs['name_or_id'] = self.params['name']
|
kwargs['name_or_id'] = self.params['name']
|
||||||
|
|
||||||
self.exit(changed=False,
|
self.exit(changed=False,
|
||||||
servers=[server.to_dict(computed=False)
|
servers=[server.to_dict(computed=False) for server in
|
||||||
if hasattr(server, "to_dict") else server
|
|
||||||
for server in
|
|
||||||
self.conn.search_servers(**kwargs)])
|
self.conn.search_servers(**kwargs)])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,520 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2025 VEXXHOST, Inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
|
||||||
---
|
|
||||||
module: share_type
|
|
||||||
short_description: Manage OpenStack share type
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Add, remove or update share types in OpenStack Manila.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Share type name or id.
|
|
||||||
- For private share types, the UUID must be used instead of name.
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
description:
|
|
||||||
- Description of the share type.
|
|
||||||
type: str
|
|
||||||
extra_specs:
|
|
||||||
description:
|
|
||||||
- Dictionary of share type extra specifications
|
|
||||||
type: dict
|
|
||||||
is_public:
|
|
||||||
description:
|
|
||||||
- Make share type accessible to the public.
|
|
||||||
- Can be updated after creation using Manila API direct updates.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
driver_handles_share_servers:
|
|
||||||
description:
|
|
||||||
- Boolean flag indicating whether share servers are managed by the driver.
|
|
||||||
- Required for share type creation.
|
|
||||||
- This is automatically added to extra_specs as 'driver_handles_share_servers'.
|
|
||||||
type: bool
|
|
||||||
default: true
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Indicate desired state of the resource.
|
|
||||||
choices: ['present', 'absent']
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = r"""
|
|
||||||
- name: Delete share type by name
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
name: test_share_type
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Delete share type by id
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
name: fbadfa6b-5f17-4c26-948e-73b94de57b42
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Create share type
|
|
||||||
openstack.cloud.share_type:
|
|
||||||
name: manila-generic-share
|
|
||||||
state: present
|
|
||||||
driver_handles_share_servers: true
|
|
||||||
extra_specs:
|
|
||||||
share_backend_name: GENERIC_BACKEND
|
|
||||||
snapshot_support: true
|
|
||||||
create_share_from_snapshot_support: true
|
|
||||||
description: Generic share type
|
|
||||||
is_public: true
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
share_type:
|
|
||||||
description: Dictionary describing share type
|
|
||||||
returned: On success when I(state) is 'present'
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
name:
|
|
||||||
description: share type name
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: manila-generic-share
|
|
||||||
extra_specs:
|
|
||||||
description: share type extra specifications
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
sample: {"share_backend_name": "GENERIC_BACKEND", "snapshot_support": "true"}
|
|
||||||
is_public:
|
|
||||||
description: whether the share type is public
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
sample: True
|
|
||||||
description:
|
|
||||||
description: share type description
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: Generic share type
|
|
||||||
driver_handles_share_servers:
|
|
||||||
description: whether driver handles share servers
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
sample: true
|
|
||||||
id:
|
|
||||||
description: share type uuid
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Manila API microversion 2.50 provides complete share type information
|
|
||||||
# including is_default field and description
|
|
||||||
# Reference: https://docs.openstack.org/api-ref/shared-file-system/#show-share-type-detail
|
|
||||||
MANILA_MICROVERSION = "2.50"
|
|
||||||
|
|
||||||
|
|
||||||
class ShareTypeModule(OpenStackModule):
|
|
||||||
argument_spec = dict(
|
|
||||||
name=dict(type="str", required=True),
|
|
||||||
description=dict(type="str", required=False),
|
|
||||||
extra_specs=dict(type="dict", required=False),
|
|
||||||
is_public=dict(type="bool", default=True),
|
|
||||||
driver_handles_share_servers=dict(type="bool", default=True),
|
|
||||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
|
||||||
)
|
|
||||||
module_kwargs = dict(
|
|
||||||
required_if=[("state", "present", ["driver_handles_share_servers"])],
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _extract_result(details):
|
|
||||||
if details is not None:
|
|
||||||
if hasattr(details, "to_dict"):
|
|
||||||
result = details.to_dict(computed=False)
|
|
||||||
elif isinstance(details, dict):
|
|
||||||
result = details.copy()
|
|
||||||
else:
|
|
||||||
result = dict(details) if details else {}
|
|
||||||
|
|
||||||
# Normalize is_public field from API response
|
|
||||||
if result and "os-share-type-access:is_public" in result:
|
|
||||||
result["is_public"] = result["os-share-type-access:is_public"]
|
|
||||||
elif result and "share_type_access:is_public" in result:
|
|
||||||
result["is_public"] = result["share_type_access:is_public"]
|
|
||||||
|
|
||||||
return result
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def _find_share_type(self, name_or_id):
|
|
||||||
"""
|
|
||||||
Find share type by name or ID with comprehensive information.
|
|
||||||
|
|
||||||
Uses direct Manila API calls since SDK methods are not available.
|
|
||||||
Handles both public and private share types.
|
|
||||||
"""
|
|
||||||
# Try direct access first for complete information
|
|
||||||
share_type = self._find_by_direct_access(name_or_id)
|
|
||||||
if share_type:
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
# If direct access fails, try searching in public listing
|
|
||||||
# This handles cases where we have the name but need to find the ID
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get("/types")
|
|
||||||
share_types = response.json().get("share_types", [])
|
|
||||||
|
|
||||||
for share_type in share_types:
|
|
||||||
if share_type["name"] == name_or_id or share_type["id"] == name_or_id:
|
|
||||||
# Found by name, now get complete info using the ID
|
|
||||||
result = self._find_by_direct_access(share_type["id"])
|
|
||||||
if result:
|
|
||||||
return result
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _find_by_direct_access(self, name_or_id):
|
|
||||||
"""
|
|
||||||
Find share type by direct access using Manila API.
|
|
||||||
|
|
||||||
Uses microversion to get complete information including description and is_default.
|
|
||||||
Falls back to basic API if microversion is not supported.
|
|
||||||
"""
|
|
||||||
# Try with microversion first for complete information
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get(
|
|
||||||
f"/types/{name_or_id}", microversion=MANILA_MICROVERSION
|
|
||||||
)
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
if share_type_data:
|
|
||||||
return share_type_data
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Fallback: try without microversion for basic information
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get(f"/types/{name_or_id}")
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
if share_type_data:
|
|
||||||
return share_type_data
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
state = self.params["state"]
|
|
||||||
name_or_id = self.params["name"]
|
|
||||||
|
|
||||||
# Find existing share type (similar to volume_type.py pattern)
|
|
||||||
share_type = self._find_share_type(name_or_id)
|
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
|
||||||
self.exit_json(changed=self._will_change(state, share_type))
|
|
||||||
|
|
||||||
if state == "present" and not share_type:
|
|
||||||
# Create type
|
|
||||||
create_result = self._create()
|
|
||||||
share_type = self._extract_result(create_result)
|
|
||||||
self.exit_json(changed=True, share_type=share_type)
|
|
||||||
|
|
||||||
elif state == "present" and share_type:
|
|
||||||
# Update type
|
|
||||||
update = self._build_update(share_type)
|
|
||||||
update_result = self._update(share_type, update)
|
|
||||||
share_type = self._extract_result(update_result)
|
|
||||||
self.exit_json(changed=bool(update), share_type=share_type)
|
|
||||||
|
|
||||||
elif state == "absent" and share_type:
|
|
||||||
# Delete type
|
|
||||||
self._delete(share_type)
|
|
||||||
self.exit_json(changed=True)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# state == 'absent' and not share_type
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
|
|
||||||
def _build_update(self, share_type):
|
|
||||||
return {
|
|
||||||
**self._build_update_extra_specs(share_type),
|
|
||||||
**self._build_update_share_type(share_type),
|
|
||||||
}
|
|
||||||
|
|
||||||
def _build_update_extra_specs(self, share_type):
|
|
||||||
update = {}
|
|
||||||
|
|
||||||
old_extra_specs = share_type.get("extra_specs", {})
|
|
||||||
|
|
||||||
# Build the complete new extra specs including driver_handles_share_servers
|
|
||||||
new_extra_specs = {}
|
|
||||||
|
|
||||||
# Add driver_handles_share_servers (always required)
|
|
||||||
if self.params.get("driver_handles_share_servers") is not None:
|
|
||||||
new_extra_specs["driver_handles_share_servers"] = str(
|
|
||||||
self.params["driver_handles_share_servers"]
|
|
||||||
).title()
|
|
||||||
|
|
||||||
# Add user-defined extra specs
|
|
||||||
if self.params.get("extra_specs"):
|
|
||||||
new_extra_specs.update(
|
|
||||||
{k: str(v) for k, v in self.params["extra_specs"].items()}
|
|
||||||
)
|
|
||||||
|
|
||||||
delete_extra_specs_keys = set(old_extra_specs.keys()) - set(
|
|
||||||
new_extra_specs.keys()
|
|
||||||
)
|
|
||||||
|
|
||||||
if delete_extra_specs_keys:
|
|
||||||
update["delete_extra_specs_keys"] = delete_extra_specs_keys
|
|
||||||
|
|
||||||
if old_extra_specs != new_extra_specs:
|
|
||||||
update["create_extra_specs"] = new_extra_specs
|
|
||||||
|
|
||||||
return update
|
|
||||||
|
|
||||||
def _build_update_share_type(self, share_type):
|
|
||||||
update = {}
|
|
||||||
# Only allow description updates - name is used for identification
|
|
||||||
allowed_attributes = ["description"]
|
|
||||||
|
|
||||||
# Handle is_public updates - CLI supports this, so we should too
|
|
||||||
# Always check is_public since it has a default value of True
|
|
||||||
current_is_public = share_type.get(
|
|
||||||
"os-share-type-access:is_public",
|
|
||||||
share_type.get("share_type_access:is_public"),
|
|
||||||
)
|
|
||||||
requested_is_public = self.params["is_public"] # Will be True by default now
|
|
||||||
if current_is_public != requested_is_public:
|
|
||||||
# Mark this as needing a special access update
|
|
||||||
update["update_access"] = {
|
|
||||||
"is_public": requested_is_public,
|
|
||||||
"share_type_id": share_type.get("id"),
|
|
||||||
}
|
|
||||||
|
|
||||||
type_attributes = {
|
|
||||||
k: self.params[k]
|
|
||||||
for k in allowed_attributes
|
|
||||||
if k in self.params
|
|
||||||
and self.params.get(k) is not None
|
|
||||||
and self.params.get(k) != share_type.get(k)
|
|
||||||
}
|
|
||||||
|
|
||||||
if type_attributes:
|
|
||||||
update["type_attributes"] = type_attributes
|
|
||||||
|
|
||||||
return update
|
|
||||||
|
|
||||||
def _create(self):
|
|
||||||
share_type_attrs = {"name": self.params["name"]}
|
|
||||||
|
|
||||||
if self.params.get("description") is not None:
|
|
||||||
share_type_attrs["description"] = self.params["description"]
|
|
||||||
|
|
||||||
# Handle driver_handles_share_servers - this is the key required parameter
|
|
||||||
extra_specs = {}
|
|
||||||
if self.params.get("driver_handles_share_servers") is not None:
|
|
||||||
extra_specs["driver_handles_share_servers"] = str(
|
|
||||||
self.params["driver_handles_share_servers"]
|
|
||||||
).title()
|
|
||||||
|
|
||||||
# Add user-defined extra specs
|
|
||||||
if self.params.get("extra_specs"):
|
|
||||||
extra_specs.update(
|
|
||||||
{k: str(v) for k, v in self.params["extra_specs"].items()}
|
|
||||||
)
|
|
||||||
|
|
||||||
if extra_specs:
|
|
||||||
share_type_attrs["extra_specs"] = extra_specs
|
|
||||||
|
|
||||||
# Handle is_public parameter - field name depends on API version
|
|
||||||
if self.params.get("is_public") is not None:
|
|
||||||
# For microversion (API 2.7+), use share_type_access:is_public
|
|
||||||
# For older versions, use os-share-type-access:is_public
|
|
||||||
share_type_attrs["share_type_access:is_public"] = self.params["is_public"]
|
|
||||||
# Also include legacy field for compatibility
|
|
||||||
share_type_attrs["os-share-type-access:is_public"] = self.params[
|
|
||||||
"is_public"
|
|
||||||
]
|
|
||||||
|
|
||||||
try:
|
|
||||||
payload = {"share_type": share_type_attrs}
|
|
||||||
|
|
||||||
# Try with microversion first (supports share_type_access:is_public)
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.post(
|
|
||||||
"/types", json=payload, microversion=MANILA_MICROVERSION
|
|
||||||
)
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
except Exception:
|
|
||||||
# Fallback: try without microversion (uses os-share-type-access:is_public)
|
|
||||||
# Remove the newer field name for older API compatibility
|
|
||||||
if "share_type_access:is_public" in share_type_attrs:
|
|
||||||
del share_type_attrs["share_type_access:is_public"]
|
|
||||||
payload = {"share_type": share_type_attrs}
|
|
||||||
response = self.conn.shared_file_system.post("/types", json=payload)
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
|
|
||||||
return share_type_data
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to create share type: {str(e)}")
|
|
||||||
|
|
||||||
def _delete(self, share_type):
|
|
||||||
# Use direct API call since SDK method may not exist
|
|
||||||
try:
|
|
||||||
share_type_id = (
|
|
||||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
|
||||||
)
|
|
||||||
# Try with microversion first, fallback if not supported
|
|
||||||
try:
|
|
||||||
self.conn.shared_file_system.delete(
|
|
||||||
f"/types/{share_type_id}", microversion=MANILA_MICROVERSION
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.conn.shared_file_system.delete(f"/types/{share_type_id}")
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to delete share type: {str(e)}")
|
|
||||||
|
|
||||||
def _update(self, share_type, update):
|
|
||||||
if not update:
|
|
||||||
return share_type
|
|
||||||
share_type = self._update_share_type(share_type, update)
|
|
||||||
share_type = self._update_extra_specs(share_type, update)
|
|
||||||
share_type = self._update_access(share_type, update)
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
def _update_extra_specs(self, share_type, update):
|
|
||||||
share_type_id = (
|
|
||||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
|
||||||
)
|
|
||||||
|
|
||||||
delete_extra_specs_keys = update.get("delete_extra_specs_keys")
|
|
||||||
if delete_extra_specs_keys:
|
|
||||||
for key in delete_extra_specs_keys:
|
|
||||||
try:
|
|
||||||
# Try with microversion first, fallback if not supported
|
|
||||||
try:
|
|
||||||
self.conn.shared_file_system.delete(
|
|
||||||
f"/types/{share_type_id}/extra_specs/{key}",
|
|
||||||
microversion=MANILA_MICROVERSION,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.conn.shared_file_system.delete(
|
|
||||||
f"/types/{share_type_id}/extra_specs/{key}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to delete extra spec '{key}': {str(e)}")
|
|
||||||
# refresh share_type information
|
|
||||||
share_type = self._find_share_type(share_type_id)
|
|
||||||
|
|
||||||
create_extra_specs = update.get("create_extra_specs")
|
|
||||||
if create_extra_specs:
|
|
||||||
# Convert values to strings as Manila API expects string values
|
|
||||||
string_specs = {k: str(v) for k, v in create_extra_specs.items()}
|
|
||||||
try:
|
|
||||||
# Try with microversion first, fallback if not supported
|
|
||||||
try:
|
|
||||||
self.conn.shared_file_system.post(
|
|
||||||
f"/types/{share_type_id}/extra_specs",
|
|
||||||
json={"extra_specs": string_specs},
|
|
||||||
microversion=MANILA_MICROVERSION,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.conn.shared_file_system.post(
|
|
||||||
f"/types/{share_type_id}/extra_specs",
|
|
||||||
json={"extra_specs": string_specs},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to update extra specs: {str(e)}")
|
|
||||||
# refresh share_type information
|
|
||||||
share_type = self._find_share_type(share_type_id)
|
|
||||||
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
def _update_access(self, share_type, update):
|
|
||||||
"""Update share type access (public/private) using direct API update"""
|
|
||||||
access_update = update.get("update_access")
|
|
||||||
if not access_update:
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
share_type_id = access_update["share_type_id"]
|
|
||||||
is_public = access_update["is_public"]
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Use direct update with share_type_access:is_public (works for both public and private)
|
|
||||||
update_payload = {"share_type": {"share_type_access:is_public": is_public}}
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.conn.shared_file_system.put(
|
|
||||||
f"/types/{share_type_id}",
|
|
||||||
json=update_payload,
|
|
||||||
microversion=MANILA_MICROVERSION,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
# Fallback: try with legacy field name for older API versions
|
|
||||||
update_payload = {
|
|
||||||
"share_type": {"os-share-type-access:is_public": is_public}
|
|
||||||
}
|
|
||||||
self.conn.shared_file_system.put(
|
|
||||||
f"/types/{share_type_id}", json=update_payload
|
|
||||||
)
|
|
||||||
|
|
||||||
# Refresh share type information after access change
|
|
||||||
share_type = self._find_share_type(share_type_id)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to update share type access: {str(e)}")
|
|
||||||
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
def _update_share_type(self, share_type, update):
|
|
||||||
type_attributes = update.get("type_attributes")
|
|
||||||
if type_attributes:
|
|
||||||
share_type_id = (
|
|
||||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
# Try with microversion first, fallback if not supported
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.put(
|
|
||||||
f"/types/{share_type_id}",
|
|
||||||
json={"share_type": type_attributes},
|
|
||||||
microversion=MANILA_MICROVERSION,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
response = self.conn.shared_file_system.put(
|
|
||||||
f"/types/{share_type_id}", json={"share_type": type_attributes}
|
|
||||||
)
|
|
||||||
updated_type = response.json().get("share_type", {})
|
|
||||||
return updated_type
|
|
||||||
except Exception as e:
|
|
||||||
self.fail_json(msg=f"Failed to update share type: {str(e)}")
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
def _will_change(self, state, share_type):
|
|
||||||
if state == "present" and not share_type:
|
|
||||||
return True
|
|
||||||
if state == "present" and share_type:
|
|
||||||
return bool(self._build_update(share_type))
|
|
||||||
if state == "absent" and share_type:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = ShareTypeModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2025 VEXXHOST, Inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
|
||||||
---
|
|
||||||
module: share_type_info
|
|
||||||
short_description: Get OpenStack share type details
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Get share type details in OpenStack Manila.
|
|
||||||
- Get share type access details for private share types.
|
|
||||||
- Uses Manila API microversion 2.50 to retrieve complete share type information including is_default field.
|
|
||||||
- Safely falls back to basic information if microversion 2.50 is not supported by the backend.
|
|
||||||
- Private share types can only be accessed by UUID.
|
|
||||||
options:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Share type name or id.
|
|
||||||
- For private share types, the UUID must be used instead of name.
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = r"""
|
|
||||||
- name: Get share type details
|
|
||||||
openstack.cloud.share_type_info:
|
|
||||||
name: manila-generic-share
|
|
||||||
|
|
||||||
- name: Get share type details by id
|
|
||||||
openstack.cloud.share_type_info:
|
|
||||||
name: fbadfa6b-5f17-4c26-948e-73b94de57b42
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
share_type:
|
|
||||||
description: Dictionary describing share type
|
|
||||||
returned: On success
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
id:
|
|
||||||
description: share type uuid
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: 59575cfc-3582-4efc-8eee-f47fcb25ea6b
|
|
||||||
name:
|
|
||||||
description: share type name
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: default
|
|
||||||
description:
|
|
||||||
description:
|
|
||||||
- share type description
|
|
||||||
- Available when Manila API microversion 2.50 is supported
|
|
||||||
- Falls back to empty string if microversion is not available
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "Default Manila share type"
|
|
||||||
is_default:
|
|
||||||
description:
|
|
||||||
- whether this is the default share type
|
|
||||||
- Retrieved from the API response when microversion 2.50 is supported
|
|
||||||
- Falls back to null if microversion is not available or field is not present
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
sample: true
|
|
||||||
is_public:
|
|
||||||
description: whether the share type is public (true) or private (false)
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
sample: true
|
|
||||||
required_extra_specs:
|
|
||||||
description: Required extra specifications for the share type
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
sample: {"driver_handles_share_servers": "True"}
|
|
||||||
optional_extra_specs:
|
|
||||||
description: Optional extra specifications for the share type
|
|
||||||
returned: success
|
|
||||||
type: dict
|
|
||||||
sample: {"snapshot_support": "True", "create_share_from_snapshot_support": "True"}
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Manila API microversion 2.50 provides complete share type information
|
|
||||||
# including is_default field and description
|
|
||||||
# Reference: https://docs.openstack.org/api-ref/shared-file-system/#show-share-type-detail
|
|
||||||
MANILA_MICROVERSION = "2.50"
|
|
||||||
|
|
||||||
|
|
||||||
class ShareTypeInfoModule(OpenStackModule):
|
|
||||||
argument_spec = dict(name=dict(type="str", required=True))
|
|
||||||
module_kwargs = dict(
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(ShareTypeInfoModule, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
def _find_share_type(self, name_or_id):
|
|
||||||
"""
|
|
||||||
Find share type by name or ID with comprehensive information.
|
|
||||||
"""
|
|
||||||
share_type = self._find_by_direct_access(name_or_id)
|
|
||||||
if share_type:
|
|
||||||
return share_type
|
|
||||||
|
|
||||||
# If direct access fails, try searching in public listing
|
|
||||||
# This handles cases where we have the name but need to find the ID
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get("/types")
|
|
||||||
share_types = response.json().get("share_types", [])
|
|
||||||
|
|
||||||
for share_type in share_types:
|
|
||||||
if share_type["name"] == name_or_id or share_type["id"] == name_or_id:
|
|
||||||
# Found by name, now get complete info using the ID
|
|
||||||
result = self._find_by_direct_access(share_type["id"])
|
|
||||||
if result:
|
|
||||||
return result
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _find_by_direct_access(self, name_or_id):
|
|
||||||
"""
|
|
||||||
Find share type by direct access (for private share types).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get(
|
|
||||||
f"/types/{name_or_id}", microversion=MANILA_MICROVERSION
|
|
||||||
)
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
if share_type_data:
|
|
||||||
return share_type_data
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Fallback: try without microversion for basic information
|
|
||||||
try:
|
|
||||||
response = self.conn.shared_file_system.get(f"/types/{name_or_id}")
|
|
||||||
share_type_data = response.json().get("share_type", {})
|
|
||||||
if share_type_data:
|
|
||||||
return share_type_data
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _normalize_share_type_dict(self, share_type_dict):
|
|
||||||
"""
|
|
||||||
Normalize share type dictionary to match CLI output format.
|
|
||||||
"""
|
|
||||||
# Extract extra specs information
|
|
||||||
extra_specs = share_type_dict.get("extra_specs", {})
|
|
||||||
required_extra_specs = share_type_dict.get("required_extra_specs", {})
|
|
||||||
|
|
||||||
# Optional extra specs are those in extra_specs but not in required_extra_specs
|
|
||||||
optional_extra_specs = {
|
|
||||||
key: value
|
|
||||||
for key, value in extra_specs.items()
|
|
||||||
if key not in required_extra_specs
|
|
||||||
}
|
|
||||||
|
|
||||||
# Determine if this is the default share type
|
|
||||||
# Use the is_default field from API response (available with microversion 2.50)
|
|
||||||
# If not available (older API versions), default to None
|
|
||||||
is_default = share_type_dict.get("is_default", None)
|
|
||||||
|
|
||||||
# Handle the description field - available through microversion 2.50
|
|
||||||
# Convert None to empty string if API returns null
|
|
||||||
description = share_type_dict.get("description") or ""
|
|
||||||
|
|
||||||
# Determine visibility - check both new and legacy field names
|
|
||||||
# Use the same logic as share_type.py for consistency
|
|
||||||
is_public = share_type_dict.get(
|
|
||||||
"os-share-type-access:is_public",
|
|
||||||
share_type_dict.get("share_type_access:is_public"),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Build the normalized dictionary matching CLI output
|
|
||||||
normalized = {
|
|
||||||
"id": share_type_dict.get("id"),
|
|
||||||
"name": share_type_dict.get("name"),
|
|
||||||
"is_public": is_public,
|
|
||||||
"is_default": is_default,
|
|
||||||
"required_extra_specs": required_extra_specs,
|
|
||||||
"optional_extra_specs": optional_extra_specs,
|
|
||||||
"description": description,
|
|
||||||
}
|
|
||||||
|
|
||||||
return normalized
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""
|
|
||||||
Main execution method following OpenStackModule pattern.
|
|
||||||
|
|
||||||
Retrieves share type information using Manila API microversion for complete
|
|
||||||
details including description and is_default fields. Falls back gracefully to
|
|
||||||
basic API calls if microversion is not supported by the backend.
|
|
||||||
"""
|
|
||||||
name_or_id = self.params["name"]
|
|
||||||
|
|
||||||
share_type = self._find_share_type(name_or_id)
|
|
||||||
if not share_type:
|
|
||||||
self.fail_json(
|
|
||||||
msg=f"Share type '{name_or_id}' not found. "
|
|
||||||
f"If this is a private share type, use its UUID instead of name."
|
|
||||||
)
|
|
||||||
|
|
||||||
if hasattr(share_type, "to_dict"):
|
|
||||||
share_type_dict = share_type.to_dict()
|
|
||||||
elif isinstance(share_type, dict):
|
|
||||||
share_type_dict = share_type
|
|
||||||
else:
|
|
||||||
share_type_dict = dict(share_type) if share_type else {}
|
|
||||||
|
|
||||||
# Normalize the output to match CLI format
|
|
||||||
normalized_share_type = self._normalize_share_type_dict(share_type_dict)
|
|
||||||
|
|
||||||
# Return results in the standard format
|
|
||||||
result = dict(changed=False, share_type=normalized_share_type)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = ShareTypeInfoModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -229,10 +229,8 @@ class StackInfoModule(OpenStackModule):
|
|||||||
if self.params[k] is not None:
|
if self.params[k] is not None:
|
||||||
kwargs[k] = self.params[k]
|
kwargs[k] = self.params[k]
|
||||||
|
|
||||||
stacks = []
|
stacks = [stack.to_dict(computed=False)
|
||||||
for stack in self.conn.orchestration.stacks(**kwargs):
|
for stack in self.conn.orchestration.stacks(**kwargs)]
|
||||||
stack_obj = self.conn.orchestration.get_stack(stack.id)
|
|
||||||
stacks.append(stack_obj.to_dict(computed=False))
|
|
||||||
|
|
||||||
self.exit_json(changed=False, stacks=stacks)
|
self.exit_json(changed=False, stacks=stacks)
|
||||||
|
|
||||||
|
|||||||
@@ -28,12 +28,6 @@ options:
|
|||||||
- From the subnet pool the last IP that should be assigned to the
|
- From the subnet pool the last IP that should be assigned to the
|
||||||
virtual machines.
|
virtual machines.
|
||||||
type: str
|
type: str
|
||||||
allocation_pools:
|
|
||||||
description:
|
|
||||||
- List of allocation pools to assign to the subnet. Each element
|
|
||||||
consists of a 'start' and 'end' value.
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
cidr:
|
cidr:
|
||||||
description:
|
description:
|
||||||
- The CIDR representation of the subnet that should be assigned to
|
- The CIDR representation of the subnet that should be assigned to
|
||||||
@@ -115,10 +109,6 @@ options:
|
|||||||
- Required when I(state) is 'present'
|
- Required when I(state) is 'present'
|
||||||
aliases: ['network_name']
|
aliases: ['network_name']
|
||||||
type: str
|
type: str
|
||||||
network_segment:
|
|
||||||
description:
|
|
||||||
- Name or id of the network segment to which the subnet should be associated
|
|
||||||
type: str
|
|
||||||
project:
|
project:
|
||||||
description:
|
description:
|
||||||
- Project name or ID containing the subnet (name admin-only)
|
- Project name or ID containing the subnet (name admin-only)
|
||||||
@@ -298,7 +288,6 @@ class SubnetModule(OpenStackModule):
|
|||||||
argument_spec = dict(
|
argument_spec = dict(
|
||||||
name=dict(required=True),
|
name=dict(required=True),
|
||||||
network=dict(aliases=['network_name']),
|
network=dict(aliases=['network_name']),
|
||||||
network_segment=dict(),
|
|
||||||
cidr=dict(),
|
cidr=dict(),
|
||||||
description=dict(),
|
description=dict(),
|
||||||
ip_version=dict(type='int', default=4, choices=[4, 6]),
|
ip_version=dict(type='int', default=4, choices=[4, 6]),
|
||||||
@@ -310,7 +299,6 @@ class SubnetModule(OpenStackModule):
|
|||||||
dns_nameservers=dict(type='list', elements='str'),
|
dns_nameservers=dict(type='list', elements='str'),
|
||||||
allocation_pool_start=dict(),
|
allocation_pool_start=dict(),
|
||||||
allocation_pool_end=dict(),
|
allocation_pool_end=dict(),
|
||||||
allocation_pools=dict(type='list', elements='dict'),
|
|
||||||
host_routes=dict(type='list', elements='dict'),
|
host_routes=dict(type='list', elements='dict'),
|
||||||
ipv6_ra_mode=dict(choices=ipv6_mode_choices),
|
ipv6_ra_mode=dict(choices=ipv6_mode_choices),
|
||||||
ipv6_address_mode=dict(choices=ipv6_mode_choices),
|
ipv6_address_mode=dict(choices=ipv6_mode_choices),
|
||||||
@@ -333,9 +321,7 @@ class SubnetModule(OpenStackModule):
|
|||||||
('cidr', 'use_default_subnet_pool', 'subnet_pool'), True),
|
('cidr', 'use_default_subnet_pool', 'subnet_pool'), True),
|
||||||
],
|
],
|
||||||
mutually_exclusive=[
|
mutually_exclusive=[
|
||||||
('use_default_subnet_pool', 'subnet_pool'),
|
('cidr', 'use_default_subnet_pool', 'subnet_pool')
|
||||||
('allocation_pool_start', 'allocation_pools'),
|
|
||||||
('allocation_pool_end', 'allocation_pools')
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -374,23 +360,16 @@ class SubnetModule(OpenStackModule):
|
|||||||
return [dict(start=pool_start, end=pool_end)]
|
return [dict(start=pool_start, end=pool_end)]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _build_params(self, network, segment, project, subnet_pool):
|
def _build_params(self, network, project, subnet_pool):
|
||||||
params = {attr: self.params[attr] for attr in self.attr_params}
|
params = {attr: self.params[attr] for attr in self.attr_params}
|
||||||
params['network_id'] = network.id
|
params['network_id'] = network.id
|
||||||
if segment:
|
|
||||||
params['segment_id'] = segment.id
|
|
||||||
if project:
|
if project:
|
||||||
params['project_id'] = project.id
|
params['project_id'] = project.id
|
||||||
if subnet_pool:
|
if subnet_pool:
|
||||||
params['subnet_pool_id'] = subnet_pool.id
|
params['subnet_pool_id'] = subnet_pool.id
|
||||||
if self.params['allocation_pool_start']:
|
params['allocation_pools'] = self._build_pool()
|
||||||
params['allocation_pools'] = self._build_pool()
|
|
||||||
else:
|
|
||||||
params['allocation_pools'] = self.params['allocation_pools']
|
|
||||||
params = self._add_extra_attrs(params)
|
params = self._add_extra_attrs(params)
|
||||||
params = {k: v for k, v in params.items() if v is not None}
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
if self.params['disable_gateway_ip']:
|
|
||||||
params['gateway_ip'] = None
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def _build_updates(self, subnet, params):
|
def _build_updates(self, subnet, params):
|
||||||
@@ -403,10 +382,6 @@ class SubnetModule(OpenStackModule):
|
|||||||
params['host_routes'].sort(key=lambda r: sorted(r.items()))
|
params['host_routes'].sort(key=lambda r: sorted(r.items()))
|
||||||
subnet['host_routes'].sort(key=lambda r: sorted(r.items()))
|
subnet['host_routes'].sort(key=lambda r: sorted(r.items()))
|
||||||
|
|
||||||
if 'allocation_pools' in params:
|
|
||||||
params['allocation_pools'].sort(key=lambda r: sorted(r.items()))
|
|
||||||
subnet['allocation_pools'].sort(key=lambda r: sorted(r.items()))
|
|
||||||
|
|
||||||
updates = {k: params[k] for k in params if params[k] != subnet[k]}
|
updates = {k: params[k] for k in params if params[k] != subnet[k]}
|
||||||
if self.params['disable_gateway_ip'] and subnet.gateway_ip:
|
if self.params['disable_gateway_ip'] and subnet.gateway_ip:
|
||||||
updates['gateway_ip'] = None
|
updates['gateway_ip'] = None
|
||||||
@@ -423,7 +398,6 @@ class SubnetModule(OpenStackModule):
|
|||||||
def run(self):
|
def run(self):
|
||||||
state = self.params['state']
|
state = self.params['state']
|
||||||
network_name_or_id = self.params['network']
|
network_name_or_id = self.params['network']
|
||||||
network_segment_name_or_id = self.params['network_segment']
|
|
||||||
project_name_or_id = self.params['project']
|
project_name_or_id = self.params['project']
|
||||||
subnet_pool_name_or_id = self.params['subnet_pool']
|
subnet_pool_name_or_id = self.params['subnet_pool']
|
||||||
subnet_name = self.params['name']
|
subnet_name = self.params['name']
|
||||||
@@ -452,13 +426,6 @@ class SubnetModule(OpenStackModule):
|
|||||||
**filters)
|
**filters)
|
||||||
filters['network_id'] = network.id
|
filters['network_id'] = network.id
|
||||||
|
|
||||||
segment = None
|
|
||||||
if network_segment_name_or_id:
|
|
||||||
segment = self.conn.network.find_segment(network_segment_name_or_id,
|
|
||||||
ignore_missing=False,
|
|
||||||
**filters)
|
|
||||||
filters['segment_id'] = segment.id
|
|
||||||
|
|
||||||
subnet_pool = None
|
subnet_pool = None
|
||||||
if subnet_pool_name_or_id:
|
if subnet_pool_name_or_id:
|
||||||
subnet_pool = self.conn.network.find_subnet_pool(
|
subnet_pool = self.conn.network.find_subnet_pool(
|
||||||
@@ -475,7 +442,7 @@ class SubnetModule(OpenStackModule):
|
|||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
params = self._build_params(network, segment, project, subnet_pool)
|
params = self._build_params(network, project, subnet_pool)
|
||||||
if subnet is None:
|
if subnet is None:
|
||||||
subnet = self.conn.network.create_subnet(**params)
|
subnet = self.conn.network.create_subnet(**params)
|
||||||
changed = True
|
changed = True
|
||||||
|
|||||||
@@ -1,110 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2025, ScaleUp Technologies GmbH & Co. KG
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
module: trait
|
|
||||||
short_description: Add/Delete a trait from OpenStack
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Add or Delete a trait from OpenStack
|
|
||||||
options:
|
|
||||||
id:
|
|
||||||
description:
|
|
||||||
- ID/Name of this trait
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Should the resource be present or absent.
|
|
||||||
choices: [present, absent]
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# Creates a trait with the ID CUSTOM_WINDOWS_SPLA
|
|
||||||
- openstack.cloud.trait:
|
|
||||||
cloud: openstack
|
|
||||||
state: present
|
|
||||||
id: CUSTOM_WINDOWS_SPLA
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = '''
|
|
||||||
trait:
|
|
||||||
description: Dictionary describing the trait.
|
|
||||||
returned: On success when I(state) is 'present'
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
id:
|
|
||||||
description: ID of the trait.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule)
|
|
||||||
|
|
||||||
|
|
||||||
class TraitModule(OpenStackModule):
|
|
||||||
|
|
||||||
argument_spec = dict(
|
|
||||||
id=dict(required=True),
|
|
||||||
state=dict(default='present',
|
|
||||||
choices=['absent', 'present']),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _system_state_change(self, trait):
|
|
||||||
state = self.params['state']
|
|
||||||
if state == 'present' and not trait:
|
|
||||||
return True
|
|
||||||
if state == 'absent' and trait:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
|
|
||||||
state = self.params['state']
|
|
||||||
id = self.params['id']
|
|
||||||
|
|
||||||
try:
|
|
||||||
trait = self.conn.placement.get_trait(id)
|
|
||||||
except self.sdk.exceptions.NotFoundException:
|
|
||||||
trait = None
|
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
|
||||||
self.exit_json(changed=self._system_state_change(trait), trait=trait)
|
|
||||||
|
|
||||||
changed = False
|
|
||||||
if state == 'present':
|
|
||||||
if not trait:
|
|
||||||
trait = self.conn.placement.create_trait(id)
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
self.exit_json(
|
|
||||||
changed=changed, trait=trait.to_dict(computed=False))
|
|
||||||
|
|
||||||
elif state == 'absent':
|
|
||||||
if trait:
|
|
||||||
self.conn.placement.delete_trait(id, ignore_missing=False)
|
|
||||||
self.exit_json(changed=True)
|
|
||||||
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = TraitModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -1,306 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright (c) 2024 Binero AB
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
module: trunk
|
|
||||||
short_description: Add or delete trunks from an OpenStack cloud.
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Add or delete trunk from an OpenStack cloud.
|
|
||||||
options:
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Should the resource be present or absent.
|
|
||||||
choices: [present, absent]
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name that has to be given to the trunk.
|
|
||||||
- This port attribute cannot be updated.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- The name or ID of the port for the trunk.
|
|
||||||
type: str
|
|
||||||
required: false
|
|
||||||
sub_ports:
|
|
||||||
description:
|
|
||||||
- The sub ports on the trunk.
|
|
||||||
type: list
|
|
||||||
required: false
|
|
||||||
elements: dict
|
|
||||||
suboptions:
|
|
||||||
port:
|
|
||||||
description: The ID or name of the port.
|
|
||||||
type: str
|
|
||||||
segmentation_type:
|
|
||||||
description: The segmentation type to use.
|
|
||||||
type: str
|
|
||||||
segmentation_id:
|
|
||||||
description: The segmentation ID to use.
|
|
||||||
type: int
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# Create a trunk
|
|
||||||
- openstack.cloud.trunk:
|
|
||||||
state: present
|
|
||||||
auth:
|
|
||||||
auth_url: https://identity.example.com
|
|
||||||
username: admin
|
|
||||||
password: admin
|
|
||||||
project_name: admin
|
|
||||||
name: trunk1
|
|
||||||
port: port1
|
|
||||||
|
|
||||||
# Create a trunk with a subport
|
|
||||||
- openstack.cloud.trunk:
|
|
||||||
state: present
|
|
||||||
cloud: my-cloud
|
|
||||||
name: trunk1
|
|
||||||
port: port1
|
|
||||||
sub_ports:
|
|
||||||
- name: subport1
|
|
||||||
segmentation_type: vlan
|
|
||||||
segmentation_id: 123
|
|
||||||
|
|
||||||
# Remove a trunk
|
|
||||||
- openstack.cloud.trunk:
|
|
||||||
state: absent
|
|
||||||
auth:
|
|
||||||
auth_url: https://identity.example.com
|
|
||||||
username: admin
|
|
||||||
password: admin
|
|
||||||
project_name: admin
|
|
||||||
name: trunk1
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = '''
|
|
||||||
trunk:
|
|
||||||
description: Dictionary describing the trunk.
|
|
||||||
type: dict
|
|
||||||
returned: On success when I(state) is C(present).
|
|
||||||
contains:
|
|
||||||
created_at:
|
|
||||||
description: Timestamp when the trunk was created.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "2022-02-03T13:28:25Z"
|
|
||||||
description:
|
|
||||||
description: The trunk description.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
id:
|
|
||||||
description: The trunk ID.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
|
|
||||||
is_admin_state_up:
|
|
||||||
description: |
|
|
||||||
The administrative state of the trunk, which is up C(True) or
|
|
||||||
down C(False).
|
|
||||||
returned: success
|
|
||||||
type: bool
|
|
||||||
sample: true
|
|
||||||
name:
|
|
||||||
description: The trunk name.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "trunk_name"
|
|
||||||
port_id:
|
|
||||||
description: The ID of the port for the trunk
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "5ec25c97-7052-4ab8-a8ba-92faf84148df"
|
|
||||||
project_id:
|
|
||||||
description: The ID of the project who owns the trunk.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "aa1ede4f-3952-4131-aab6-3b8902268c7d"
|
|
||||||
revision_number:
|
|
||||||
description: The revision number of the resource.
|
|
||||||
returned: success
|
|
||||||
type: int
|
|
||||||
sample: 0
|
|
||||||
status:
|
|
||||||
description: The trunk status. Value is C(ACTIVE) or C(DOWN).
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "ACTIVE"
|
|
||||||
sub_ports:
|
|
||||||
description: List of sub ports on the trunk.
|
|
||||||
returned: success
|
|
||||||
type: list
|
|
||||||
sample: []
|
|
||||||
tags:
|
|
||||||
description: The list of tags on the resource.
|
|
||||||
returned: success
|
|
||||||
type: list
|
|
||||||
sample: []
|
|
||||||
tenant_id:
|
|
||||||
description: Same as I(project_id). Deprecated.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "51fce036d7984ba6af4f6c849f65ef00"
|
|
||||||
updated_at:
|
|
||||||
description: Timestamp when the trunk was last updated.
|
|
||||||
returned: success
|
|
||||||
type: str
|
|
||||||
sample: "2022-02-03T13:28:25Z"
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
|
||||||
|
|
||||||
|
|
||||||
class TrunkModule(OpenStackModule):
|
|
||||||
argument_spec = dict(
|
|
||||||
state=dict(default='present', choices=['absent', 'present']),
|
|
||||||
name=dict(required=True),
|
|
||||||
port=dict(),
|
|
||||||
sub_ports=dict(type='list', elements='dict'),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
required_if=[
|
|
||||||
('state', 'present', ('port',)),
|
|
||||||
],
|
|
||||||
supports_check_mode=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
port_name_or_id = self.params['port']
|
|
||||||
name_or_id = self.params['name']
|
|
||||||
state = self.params['state']
|
|
||||||
|
|
||||||
port = None
|
|
||||||
if port_name_or_id:
|
|
||||||
port = self.conn.network.find_port(
|
|
||||||
port_name_or_id, ignore_missing=False)
|
|
||||||
|
|
||||||
trunk = self.conn.network.find_trunk(name_or_id)
|
|
||||||
|
|
||||||
sub_ports = []
|
|
||||||
psp = self.params['sub_ports'] or []
|
|
||||||
for sp in psp:
|
|
||||||
subport = self.conn.network.find_port(
|
|
||||||
sp['port'], ignore_missing=False)
|
|
||||||
sub_ports.append(subport)
|
|
||||||
|
|
||||||
if self.ansible.check_mode:
|
|
||||||
self.exit_json(changed=self._will_change(state, trunk, sub_ports))
|
|
||||||
|
|
||||||
if state == 'present' and not trunk:
|
|
||||||
# create trunk
|
|
||||||
trunk = self._create(name_or_id, port)
|
|
||||||
self.exit_json(changed=True,
|
|
||||||
trunk=trunk.to_dict(computed=False))
|
|
||||||
elif state == 'present' and trunk:
|
|
||||||
# update trunk
|
|
||||||
update = self._build_update(trunk, sub_ports)
|
|
||||||
if update:
|
|
||||||
trunk = self._update(trunk, update)
|
|
||||||
|
|
||||||
self.exit_json(changed=bool(update),
|
|
||||||
trunk=trunk.to_dict(computed=False))
|
|
||||||
elif state == 'absent' and trunk:
|
|
||||||
# delete trunk
|
|
||||||
self._delete(trunk)
|
|
||||||
self.exit_json(changed=True)
|
|
||||||
elif state == 'absent' and not trunk:
|
|
||||||
# do nothing
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
|
|
||||||
def _build_update(self, trunk, sub_ports):
|
|
||||||
add_sub_ports = []
|
|
||||||
del_sub_ports = []
|
|
||||||
|
|
||||||
for sp in sub_ports:
|
|
||||||
found = False
|
|
||||||
for tsp in trunk['sub_ports']:
|
|
||||||
if tsp['port_id'] == sp['id']:
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
if found is False:
|
|
||||||
psp = self.params['sub_ports'] or []
|
|
||||||
for k in psp:
|
|
||||||
if sp['name'] == k['port']:
|
|
||||||
spobj = {
|
|
||||||
'port_id': sp['id'],
|
|
||||||
'segmentation_type': k['segmentation_type'],
|
|
||||||
'segmentation_id': k['segmentation_id'],
|
|
||||||
}
|
|
||||||
add_sub_ports.append(spobj)
|
|
||||||
break
|
|
||||||
|
|
||||||
for tsp in trunk['sub_ports']:
|
|
||||||
found = False
|
|
||||||
for sp in sub_ports:
|
|
||||||
if sp['id'] == tsp['port_id']:
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
if found is False:
|
|
||||||
del_sub_ports.append({'port_id': tsp['port_id']})
|
|
||||||
|
|
||||||
update = {}
|
|
||||||
|
|
||||||
if len(add_sub_ports) > 0:
|
|
||||||
update['add_sub_ports'] = add_sub_ports
|
|
||||||
|
|
||||||
if len(del_sub_ports) > 0:
|
|
||||||
update['del_sub_ports'] = del_sub_ports
|
|
||||||
|
|
||||||
return update
|
|
||||||
|
|
||||||
def _create(self, name, port):
|
|
||||||
args = {}
|
|
||||||
args['name'] = name
|
|
||||||
args['port_id'] = port.id
|
|
||||||
|
|
||||||
return self.conn.network.create_trunk(**args)
|
|
||||||
|
|
||||||
def _delete(self, trunk):
|
|
||||||
sub_ports = []
|
|
||||||
for sp in trunk['sub_ports']:
|
|
||||||
sub_ports.append({'port_id': sp['port_id']})
|
|
||||||
|
|
||||||
self.conn.network.delete_trunk_subports(trunk.id, sub_ports)
|
|
||||||
self.conn.network.delete_trunk(trunk.id)
|
|
||||||
|
|
||||||
def _update(self, trunk, update):
|
|
||||||
if update.get('add_sub_ports', None):
|
|
||||||
self.conn.network.add_trunk_subports(
|
|
||||||
trunk, update['add_sub_ports'])
|
|
||||||
|
|
||||||
if update.get('del_sub_ports', None):
|
|
||||||
self.conn.network.delete_trunk_subports(
|
|
||||||
trunk, update['del_sub_ports'])
|
|
||||||
|
|
||||||
return self.conn.network.find_trunk(trunk.id)
|
|
||||||
|
|
||||||
def _will_change(self, state, trunk, sub_ports):
|
|
||||||
if state == 'present' and not trunk:
|
|
||||||
return True
|
|
||||||
elif state == 'present' and trunk:
|
|
||||||
return bool(self._build_update(trunk, sub_ports))
|
|
||||||
elif state == 'absent' and trunk:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = TrunkModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -1,309 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2025 by Pure Storage, Inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
|
||||||
---
|
|
||||||
module: volume_manage
|
|
||||||
short_description: Manage/Unmanage Volumes
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Manage or Unmanage Volume in OpenStack.
|
|
||||||
options:
|
|
||||||
description:
|
|
||||||
description:
|
|
||||||
- String describing the volume
|
|
||||||
type: str
|
|
||||||
metadata:
|
|
||||||
description: Metadata for the volume
|
|
||||||
type: dict
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Name of the volume to be unmanaged or
|
|
||||||
the new name of a managed volume
|
|
||||||
- When I(state) is C(absent) this must be
|
|
||||||
the cinder volume ID
|
|
||||||
required: true
|
|
||||||
type: str
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- Should the resource be present or absent.
|
|
||||||
choices: [present, absent]
|
|
||||||
default: present
|
|
||||||
type: str
|
|
||||||
bootable:
|
|
||||||
description:
|
|
||||||
- Bootable flag for volume.
|
|
||||||
type: bool
|
|
||||||
default: False
|
|
||||||
volume_type:
|
|
||||||
description:
|
|
||||||
- Volume type for volume
|
|
||||||
type: str
|
|
||||||
availability_zone:
|
|
||||||
description:
|
|
||||||
- The availability zone.
|
|
||||||
type: str
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Cinder host on which the existing volume resides
|
|
||||||
- Takes the form "host@backend-name#pool"
|
|
||||||
- Required when I(state) is C(present).
|
|
||||||
type: str
|
|
||||||
source_name:
|
|
||||||
description:
|
|
||||||
- Name of existing volume
|
|
||||||
type: str
|
|
||||||
source_id:
|
|
||||||
description:
|
|
||||||
- Identifier of existing volume
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = r"""
|
|
||||||
volume:
|
|
||||||
description: Cinder's representation of the volume object
|
|
||||||
returned: always
|
|
||||||
type: dict
|
|
||||||
contains:
|
|
||||||
attachments:
|
|
||||||
description: Instance attachment information. For a amanaged volume, this
|
|
||||||
will always be empty.
|
|
||||||
type: list
|
|
||||||
availability_zone:
|
|
||||||
description: The name of the availability zone.
|
|
||||||
type: str
|
|
||||||
consistency_group_id:
|
|
||||||
description: The UUID of the consistency group.
|
|
||||||
type: str
|
|
||||||
created_at:
|
|
||||||
description: The date and time when the resource was created.
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
description: The volume description.
|
|
||||||
type: str
|
|
||||||
extended_replication_status:
|
|
||||||
description: Extended replication status on this volume.
|
|
||||||
type: str
|
|
||||||
group_id:
|
|
||||||
description: The ID of the group.
|
|
||||||
type: str
|
|
||||||
host:
|
|
||||||
description: The volume's current back-end.
|
|
||||||
type: str
|
|
||||||
id:
|
|
||||||
description: The UUID of the volume.
|
|
||||||
type: str
|
|
||||||
image_id:
|
|
||||||
description: Image on which the volume was based
|
|
||||||
type: str
|
|
||||||
is_bootable:
|
|
||||||
description: Enables or disables the bootable attribute. You can boot an
|
|
||||||
instance from a bootable volume.
|
|
||||||
type: str
|
|
||||||
is_encrypted:
|
|
||||||
description: If true, this volume is encrypted.
|
|
||||||
type: bool
|
|
||||||
is_multiattach:
|
|
||||||
description: Whether this volume can be attached to more than one
|
|
||||||
server.
|
|
||||||
type: bool
|
|
||||||
metadata:
|
|
||||||
description: A metadata object. Contains one or more metadata key and
|
|
||||||
value pairs that are associated with the volume.
|
|
||||||
type: dict
|
|
||||||
migration_id:
|
|
||||||
description: The volume ID that this volume name on the backend is
|
|
||||||
based on.
|
|
||||||
type: str
|
|
||||||
migration_status:
|
|
||||||
description: The status of this volume migration (None means that a
|
|
||||||
migration is not currently in progress).
|
|
||||||
type: str
|
|
||||||
name:
|
|
||||||
description: The volume name.
|
|
||||||
type: str
|
|
||||||
project_id:
|
|
||||||
description: The project ID which the volume belongs to.
|
|
||||||
type: str
|
|
||||||
replication_driver_data:
|
|
||||||
description: Data set by the replication driver
|
|
||||||
type: str
|
|
||||||
replication_status:
|
|
||||||
description: The volume replication status.
|
|
||||||
type: str
|
|
||||||
scheduler_hints:
|
|
||||||
description: Scheduler hints for the volume
|
|
||||||
type: dict
|
|
||||||
size:
|
|
||||||
description: The size of the volume, in gibibytes (GiB).
|
|
||||||
type: int
|
|
||||||
snapshot_id:
|
|
||||||
description: To create a volume from an existing snapshot, specify the
|
|
||||||
UUID of the volume snapshot. The volume is created in same
|
|
||||||
availability zone and with same size as the snapshot.
|
|
||||||
type: str
|
|
||||||
source_volume_id:
|
|
||||||
description: The UUID of the source volume. The API creates a new volume
|
|
||||||
with the same size as the source volume unless a larger size
|
|
||||||
is requested.
|
|
||||||
type: str
|
|
||||||
status:
|
|
||||||
description: The volume status.
|
|
||||||
type: str
|
|
||||||
updated_at:
|
|
||||||
description: The date and time when the resource was updated.
|
|
||||||
type: str
|
|
||||||
user_id:
|
|
||||||
description: The UUID of the user.
|
|
||||||
type: str
|
|
||||||
volume_image_metadata:
|
|
||||||
description: List of image metadata entries. Only included for volumes
|
|
||||||
that were created from an image, or from a snapshot of a
|
|
||||||
volume originally created from an image.
|
|
||||||
type: dict
|
|
||||||
volume_type:
|
|
||||||
description: The associated volume type name for the volume.
|
|
||||||
type: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = r"""
|
|
||||||
- name: Manage volume
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
name: newly-managed-vol
|
|
||||||
source_name: manage-me
|
|
||||||
host: host@backend-name#pool
|
|
||||||
|
|
||||||
- name: Unmanage volume
|
|
||||||
openstack.cloud.volume_manage:
|
|
||||||
name: "5c831866-3bb3-4d67-a7d3-1b90880c9d18"
|
|
||||||
state: absent
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
|
||||||
OpenStackModule,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeManageModule(OpenStackModule):
|
|
||||||
|
|
||||||
argument_spec = dict(
|
|
||||||
description=dict(type="str"),
|
|
||||||
metadata=dict(type="dict"),
|
|
||||||
source_name=dict(type="str"),
|
|
||||||
source_id=dict(type="str"),
|
|
||||||
availability_zone=dict(type="str"),
|
|
||||||
host=dict(type="str"),
|
|
||||||
bootable=dict(default="false", type="bool"),
|
|
||||||
volume_type=dict(type="str"),
|
|
||||||
name=dict(required=True, type="str"),
|
|
||||||
state=dict(
|
|
||||||
default="present", choices=["absent", "present"], type="str"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
required_if=[("state", "present", ["host"])],
|
|
||||||
supports_check_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
name = self.params["name"]
|
|
||||||
state = self.params["state"]
|
|
||||||
changed = False
|
|
||||||
|
|
||||||
if state == "present":
|
|
||||||
changed = True
|
|
||||||
if not self.ansible.check_mode:
|
|
||||||
volumes = self._manage_list()
|
|
||||||
manageable = volumes["manageable-volumes"]
|
|
||||||
safe_to_manage = self._is_safe_to_manage(
|
|
||||||
manageable, self.params["source_name"]
|
|
||||||
)
|
|
||||||
if not safe_to_manage:
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
volume = self._manage()
|
|
||||||
if volume:
|
|
||||||
self.exit_json(
|
|
||||||
changed=changed, volume=volume.to_dict(computed=False)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.exit_json(changed=False)
|
|
||||||
else:
|
|
||||||
self.exit_json(changed=changed)
|
|
||||||
|
|
||||||
else:
|
|
||||||
volume = self.conn.block_storage.find_volume(name)
|
|
||||||
if volume:
|
|
||||||
changed = True
|
|
||||||
if not self.ansible.check_mode:
|
|
||||||
self._unmanage()
|
|
||||||
self.exit_json(changed=changed)
|
|
||||||
else:
|
|
||||||
self.exit_json(changed=changed)
|
|
||||||
|
|
||||||
def _is_safe_to_manage(self, manageable_list, target_name):
|
|
||||||
entry = next(
|
|
||||||
(
|
|
||||||
v
|
|
||||||
for v in manageable_list
|
|
||||||
if isinstance(v.get("reference"), dict)
|
|
||||||
and (
|
|
||||||
v["reference"].get("name") == target_name
|
|
||||||
or v["reference"].get("source-name") == target_name
|
|
||||||
)
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if entry is None:
|
|
||||||
return False
|
|
||||||
return entry.get("safe_to_manage", False)
|
|
||||||
|
|
||||||
def _manage(self):
|
|
||||||
kwargs = {
|
|
||||||
key: self.params[key]
|
|
||||||
for key in [
|
|
||||||
"description",
|
|
||||||
"bootable",
|
|
||||||
"volume_type",
|
|
||||||
"availability_zone",
|
|
||||||
"host",
|
|
||||||
"metadata",
|
|
||||||
"name",
|
|
||||||
]
|
|
||||||
if self.params.get(key) is not None
|
|
||||||
}
|
|
||||||
kwargs["ref"] = {}
|
|
||||||
if self.params["source_name"]:
|
|
||||||
kwargs["ref"]["source-name"] = self.params["source_name"]
|
|
||||||
if self.params["source_id"]:
|
|
||||||
kwargs["ref"]["source-id"] = self.params["source_id"]
|
|
||||||
|
|
||||||
volume = self.conn.block_storage.manage_volume(**kwargs)
|
|
||||||
|
|
||||||
return volume
|
|
||||||
|
|
||||||
def _manage_list(self):
|
|
||||||
response = self.conn.block_storage.get(
|
|
||||||
"/manageable_volumes?host=" + self.params["host"],
|
|
||||||
microversion="3.8",
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
manageable_volumes = response.json()
|
|
||||||
return manageable_volumes
|
|
||||||
|
|
||||||
def _unmanage(self):
|
|
||||||
self.conn.block_storage.unmanage_volume(self.params["name"])
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = VolumeManageModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright (c) 2023 Bitswalk, inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
---
|
|
||||||
module: volume_service_info
|
|
||||||
short_description: Fetch OpenStack Volume (Cinder) services
|
|
||||||
author: OpenStack Ansible SIG
|
|
||||||
description:
|
|
||||||
- Fetch OpenStack Volume (Cinder) services.
|
|
||||||
options:
|
|
||||||
binary:
|
|
||||||
description:
|
|
||||||
- Filter the service list result by binary name of the service.
|
|
||||||
type: str
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Filter the service list result by the host name.
|
|
||||||
type: str
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- openstack.cloud.openstack
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = r'''
|
|
||||||
- name: Fetch all OpenStack Volume (Cinder) services
|
|
||||||
openstack.cloud.volume_service_info:
|
|
||||||
cloud: awesomecloud
|
|
||||||
|
|
||||||
- name: Fetch a subset of OpenStack Volume (Cinder) services
|
|
||||||
openstack.cloud.volume_service_info:
|
|
||||||
cloud: awesomecloud
|
|
||||||
binary: "cinder-volume"
|
|
||||||
host: "localhost"
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = r'''
|
|
||||||
volume_services:
|
|
||||||
description: List of dictionaries describing Volume (Cinder) services.
|
|
||||||
returned: always
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
contains:
|
|
||||||
availability_zone:
|
|
||||||
description: The availability zone name.
|
|
||||||
type: str
|
|
||||||
binary:
|
|
||||||
description: The binary name of the service.
|
|
||||||
type: str
|
|
||||||
disabled_reason:
|
|
||||||
description: The reason why the service is disabled
|
|
||||||
type: str
|
|
||||||
host:
|
|
||||||
description: The name of the host.
|
|
||||||
type: str
|
|
||||||
name:
|
|
||||||
description: Service name
|
|
||||||
type: str
|
|
||||||
state:
|
|
||||||
description: The state of the service. One of up or down.
|
|
||||||
type: str
|
|
||||||
status:
|
|
||||||
description: The status of the service. One of enabled or disabled.
|
|
||||||
type: str
|
|
||||||
update_at:
|
|
||||||
description: The date and time when the resource was updated
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeServiceInfoModule(OpenStackModule):
|
|
||||||
|
|
||||||
argument_spec = dict(
|
|
||||||
binary=dict(),
|
|
||||||
host=dict(),
|
|
||||||
)
|
|
||||||
|
|
||||||
module_kwargs = dict(
|
|
||||||
supports_check_mode=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
kwargs = {k: self.params[k]
|
|
||||||
for k in ['binary', 'host']
|
|
||||||
if self.params[k] is not None}
|
|
||||||
volume_services = self.conn.block_storage.services(**kwargs)
|
|
||||||
|
|
||||||
self.exit_json(changed=False,
|
|
||||||
volume_services=[s.to_dict(computed=False)
|
|
||||||
for s in volume_services])
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
module = VolumeServiceInfoModule()
|
|
||||||
module()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
5
setup.py
5
setup.py
@@ -4,6 +4,5 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
setup_requires=['pbr', 'setuptools'],
|
setup_requires=['pbr'],
|
||||||
pbr=True,
|
pbr=True)
|
||||||
py_modules=[])
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
ansible-core>=2.16.0,<2.17.0
|
|
||||||
flake8
|
|
||||||
galaxy-importer
|
|
||||||
openstacksdk
|
|
||||||
pycodestyle
|
|
||||||
pylint
|
|
||||||
rstcheck
|
|
||||||
ruamel.yaml
|
|
||||||
tox
|
|
||||||
voluptuous
|
|
||||||
yamllint
|
|
||||||
setuptools
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
ansible-core>=2.18.0,<2.19.0
|
|
||||||
flake8
|
|
||||||
galaxy-importer
|
|
||||||
openstacksdk
|
|
||||||
pycodestyle
|
|
||||||
pylint
|
|
||||||
rstcheck
|
|
||||||
ruamel.yaml
|
|
||||||
tox
|
|
||||||
voluptuous
|
|
||||||
yamllint
|
|
||||||
setuptools
|
|
||||||
0
tests/unit/compat/__init__.py
Normal file
0
tests/unit/compat/__init__.py
Normal file
31
tests/unit/compat/builtins.py
Normal file
31
tests/unit/compat/builtins.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compat for python2.7
|
||||||
|
#
|
||||||
|
|
||||||
|
# One unittest needs to import builtins via __import__() so we need to have
|
||||||
|
# the string that represents it
|
||||||
|
try:
|
||||||
|
import __builtin__ # noqa
|
||||||
|
except ImportError:
|
||||||
|
BUILTINS = 'builtins'
|
||||||
|
else:
|
||||||
|
BUILTINS = '__builtin__'
|
||||||
120
tests/unit/compat/mock.py
Normal file
120
tests/unit/compat/mock.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
|
||||||
|
'''
|
||||||
|
Compat module for Python3.x's unittest.mock module
|
||||||
|
'''
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Python 2.7
|
||||||
|
|
||||||
|
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
|
||||||
|
# is the same as the python3 stdlib mock library
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Allow wildcard import because we really do want to import all of mock's
|
||||||
|
# symbols into this compat shim
|
||||||
|
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||||
|
from unittest.mock import * # noqa
|
||||||
|
except ImportError:
|
||||||
|
# Python 2
|
||||||
|
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||||
|
try:
|
||||||
|
from mock import * # noqa
|
||||||
|
except ImportError:
|
||||||
|
print('You need the mock library installed on python2.x to run tests')
|
||||||
|
|
||||||
|
|
||||||
|
# Prior to 3.4.4, mock_open cannot handle binary read_data
|
||||||
|
if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
|
||||||
|
file_spec = None
|
||||||
|
|
||||||
|
def _iterate_read_data(read_data):
|
||||||
|
# Helper for mock_open:
|
||||||
|
# Retrieve lines from read_data via a generator so that separate calls to
|
||||||
|
# readline, read, and readlines are properly interleaved
|
||||||
|
sep = b'\n' if isinstance(read_data, bytes) else '\n'
|
||||||
|
data_as_list = [li + sep for li in read_data.split(sep)]
|
||||||
|
|
||||||
|
if data_as_list[-1] == sep:
|
||||||
|
# If the last line ended in a newline, the list comprehension will have an
|
||||||
|
# extra entry that's just a newline. Remove this.
|
||||||
|
data_as_list = data_as_list[:-1]
|
||||||
|
else:
|
||||||
|
# If there wasn't an extra newline by itself, then the file being
|
||||||
|
# emulated doesn't have a newline to end the last line remove the
|
||||||
|
# newline that our naive format() added
|
||||||
|
data_as_list[-1] = data_as_list[-1][:-1]
|
||||||
|
|
||||||
|
for line in data_as_list:
|
||||||
|
yield line
|
||||||
|
|
||||||
|
def mock_open(mock=None, read_data=''):
|
||||||
|
"""
|
||||||
|
A helper function to create a mock to replace the use of `open`. It works
|
||||||
|
for `open` called directly or used as a context manager.
|
||||||
|
|
||||||
|
The `mock` argument is the mock object to configure. If `None` (the
|
||||||
|
default) then a `MagicMock` will be created for you, with the API limited
|
||||||
|
to methods or attributes available on standard file handles.
|
||||||
|
|
||||||
|
`read_data` is a string for the `read` methoddline`, and `readlines` of the
|
||||||
|
file handle to return. This is an empty string by default.
|
||||||
|
"""
|
||||||
|
def _readlines_side_effect(*args, **kwargs):
|
||||||
|
if handle.readlines.return_value is not None:
|
||||||
|
return handle.readlines.return_value
|
||||||
|
return list(_data)
|
||||||
|
|
||||||
|
def _read_side_effect(*args, **kwargs):
|
||||||
|
if handle.read.return_value is not None:
|
||||||
|
return handle.read.return_value
|
||||||
|
return type(read_data)().join(_data)
|
||||||
|
|
||||||
|
def _readline_side_effect():
|
||||||
|
if handle.readline.return_value is not None:
|
||||||
|
while True:
|
||||||
|
yield handle.readline.return_value
|
||||||
|
for line in _data:
|
||||||
|
yield line
|
||||||
|
|
||||||
|
global file_spec
|
||||||
|
if file_spec is None:
|
||||||
|
import _io # noqa
|
||||||
|
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
|
||||||
|
|
||||||
|
if mock is None:
|
||||||
|
mock = MagicMock(name='open', spec=open) # noqa
|
||||||
|
|
||||||
|
handle = MagicMock(spec=file_spec) # noqa
|
||||||
|
handle.__enter__.return_value = handle
|
||||||
|
|
||||||
|
_data = _iterate_read_data(read_data)
|
||||||
|
|
||||||
|
handle.write.return_value = None
|
||||||
|
handle.read.return_value = None
|
||||||
|
handle.readline.return_value = None
|
||||||
|
handle.readlines.return_value = None
|
||||||
|
|
||||||
|
handle.read.side_effect = _read_side_effect
|
||||||
|
handle.readline.side_effect = _readline_side_effect()
|
||||||
|
handle.readlines.side_effect = _readlines_side_effect
|
||||||
|
|
||||||
|
mock.return_value = handle
|
||||||
|
return mock
|
||||||
36
tests/unit/compat/unittest.py
Normal file
36
tests/unit/compat/unittest.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
|
||||||
|
'''
|
||||||
|
Compat module for Python2.7's unittest module
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Allow wildcard import because we really do want to import all of
|
||||||
|
# unittests's symbols into this compat shim
|
||||||
|
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||||
|
if sys.version_info < (2, 7):
|
||||||
|
try:
|
||||||
|
# Need unittest2 on python2.6
|
||||||
|
from unittest2 import * # noqa
|
||||||
|
except ImportError:
|
||||||
|
print('You need unittest2 installed on python2.6.x to run tests')
|
||||||
|
else:
|
||||||
|
from unittest import * # noqa
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
from unittest.mock import MagicMock
|
from ansible_collections.openstack.cloud.tests.unit.compat.mock import MagicMock
|
||||||
|
|
||||||
from ansible.utils.path import unfrackpath
|
from ansible.utils.path import unfrackpath
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -20,10 +20,10 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import unittest
|
|
||||||
|
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from io import BytesIO, StringIO
|
from io import BytesIO, StringIO
|
||||||
|
from ansible_collections.openstack.cloud.tests.unit.compat import unittest
|
||||||
from ansible.module_utils.six import PY3
|
from ansible.module_utils.six import PY3
|
||||||
from ansible.module_utils._text import to_bytes
|
from ansible.module_utils._text import to_bytes
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user