1 Commits

Author SHA1 Message Date
Sagi Shnaidman
20a27c461d Remove secret from experimental branch
Change-Id: I521069b356a14839720de954959a35b49fa59117
2024-12-17 13:01:13 +02:00
126 changed files with 611 additions and 7280 deletions

View File

@@ -47,8 +47,6 @@
devstack_services:
designate: true
neutron-dns: true
neutron-trunk: true
neutron-segments: true
zuul_copy_output:
'{{ devstack_log_dir }}/test_output.log': 'logs'
extensions_to_txt:
@@ -96,39 +94,6 @@
c-bak: false
tox_extra_args: -vv --skip-missing-interpreters=false -- coe_cluster coe_cluster_template
- job:
name: ansible-collections-openstack-functional-devstack-manila-base
parent: ansible-collections-openstack-functional-devstack-base
# Do not restrict branches in base jobs because else Zuul would not find a matching
# parent job variant during job freeze when child jobs are on other branches.
description: |
Run openstack collections functional tests against a devstack with Manila plugin enabled
# Do not set job.override-checkout or job.required-projects.override-checkout in base job because
# else Zuul will use this branch when matching variants for parent jobs during job freeze
required-projects:
- openstack/manila
- openstack/python-manilaclient
files:
- ^ci/roles/share_type/.*$
- ^plugins/modules/share_type.py
- ^plugins/modules/share_type_info.py
timeout: 10800
vars:
devstack_localrc:
MANILA_ENABLED_BACKENDS: generic
MANILA_OPTGROUP_generic_driver_handles_share_servers: true
MANILA_OPTGROUP_generic_connect_share_server_to_tenant_network: true
MANILA_USE_SERVICE_INSTANCE_PASSWORD: true
devstack_plugins:
manila: https://opendev.org/openstack/manila
devstack_services:
manila: true
m-api: true
m-sch: true
m-shr: true
m-dat: true
tox_extra_args: -vv --skip-missing-interpreters=false -- share_type share_type_info
- job:
name: ansible-collections-openstack-functional-devstack-magnum
parent: ansible-collections-openstack-functional-devstack-magnum-base
@@ -138,15 +103,6 @@
with Magnum plugin enabled, using master of openstacksdk and latest
ansible release. Run it only on coe_cluster{,_template} changes.
- job:
name: ansible-collections-openstack-functional-devstack-manila
parent: ansible-collections-openstack-functional-devstack-manila-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
with Manila plugin enabled, using master of openstacksdk and latest
ansible release. Run it only on share_type{,_info} changes.
- job:
name: ansible-collections-openstack-functional-devstack-octavia-base
parent: ansible-collections-openstack-functional-devstack-base
@@ -206,48 +162,50 @@
tox_constraints_file: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tests/constraints-openstacksdk-1.x.x.txt'
tox_install_siblings: false
# Job with Ansible 2.9 for checking backward compatibility
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.18
name: ansible-collections-openstack-functional-devstack-ansible-2.9
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.18 branch of ansible
using master of openstacksdk and stable 2.9 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.18
override-checkout: stable-2.9
vars:
tox_envlist: ansible_2_18
tox_envlist: ansible_2_9
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.19
name: ansible-collections-openstack-functional-devstack-ansible-2.11
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.19 branch of ansible
using master of openstacksdk and stable 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.19
override-checkout: stable-2.11
vars:
tox_envlist: ansible_2_19
tox_envlist: ansible_2_11
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.20
name: ansible-collections-openstack-functional-devstack-ansible-2.12
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.20 branch of ansible
using master of openstacksdk and stable 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.20
override-checkout: stable-2.12
vars:
tox_envlist: ansible_2_20
tox_envlist: ansible_2_12
- job:
name: ansible-collections-openstack-functional-devstack-ansible-devel
parent: ansible-collections-openstack-functional-devstack-base
nodeset: openstack-single-node-jammy
branches: master
description: |
Run openstack collections functional tests against a master devstack
@@ -276,58 +234,34 @@
- job:
name: openstack-tox-linters-ansible-devel
parent: openstack-tox-linters-ansible
nodeset: ubuntu-noble
nodeset: ubuntu-jammy
description: |
Run openstack collections linter tests using the devel branch of ansible
# non-voting because we can't prevent ansible devel from breaking us
voting: false
vars:
python_version: '3.12'
python_version: '3.10'
bindep_profile: test py310
- job:
name: openstack-tox-linters-ansible-2.18
name: openstack-tox-linters-ansible-2.12
parent: openstack-tox-linters-ansible
nodeset: ubuntu-focal
description: |
Run openstack collections linter tests using the 2.18 branch of ansible
Run openstack collections linter tests using the 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.18
override-checkout: stable-2.12
vars:
tox_envlist: linters_2_18
python_version: "3.12"
bindep_profile: test py312
- job:
name: openstack-tox-linters-ansible-2.19
parent: openstack-tox-linters-ansible
description: |
Run openstack collections linter tests using the 2.19 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.19
vars:
tox_envlist: linters_2_19
python_version: "3.12"
bindep_profile: test py312
- job:
name: openstack-tox-linters-ansible-2.20
parent: openstack-tox-linters-ansible
description: |
Run openstack collections linter tests using the 2.20 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.20
vars:
tox_envlist: linters_2_20
python_version: "3.12"
bindep_profile: test py312
ensure_tox_version: '<4'
tox_envlist: linters_2_12
python_version: 3.8
bindep_profile: test py38
# Cross-checks with other projects
- job:
name: bifrost-collections-src
parent: bifrost-integration-on-ubuntu-noble
parent: bifrost-integration-tinyipa-ubuntu-focal
required-projects:
- openstack/ansible-collections-openstack
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
@@ -338,7 +272,7 @@
override-checkout: master
- job:
name: bifrost-keystone-collections-src
parent: bifrost-integration-keystone-on-ubuntu-noble
parent: bifrost-integration-tinyipa-keystone-ubuntu-focal
required-projects:
- openstack/ansible-collections-openstack
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
@@ -348,83 +282,69 @@
name: openstack/openstacksdk
override-checkout: master
- job:
name: ansible-collections-openstack-release
parent: openstack-tox-linters-ansible
run: ci/publish/publish_collection.yml
secrets:
- ansible_galaxy_info
- secret:
name: ansible_galaxy_info
data:
url: https://galaxy.ansible.com
token: !encrypted/pkcs1-oaep
- K93hOZo1B5z248H04COB1N2HCkGbFPo2EUr+0W7qFzsrdvmbsAI86Hl9bUCfEENGrwvfV
0j9CE5iO0tyqal3r6ucMhGT44MgQWL3MBeRvK89yAJpSNMU7R7rEY/zbjZMoC9YElcHEv
GEDZSA/0gQHCHpZVDlx4JMGwrnd+Nz9ha3c12BYeZS8rS/dQl7EmZ867OsozmNdG9UkkC
0vP/dkenUQNvoZOSWgZztRBlbAyI1nc5iEEw9vvpLh19HcY9+S2iAZkgSq4jOOO4wn7gE
XAZPr0HRdwS2m4Hw0Pusrg7SdC3+2O0N/fvFGnvvKXHcSgQk3rPLn6HfKzOJoPWc4WlDX
MA79jYloNBXjOaeXOoiwYzzshWK53F6Ci+3leq1cYuFyHSi2ds2mYXat7YndZSsmsk5um
hj0+Ddy9Om1uYy3nhHyZLULE7UDUmduA9EPkvdyWlcW0yZL2kXcrDTHlSp4PaJg9iKVys
0aOOo9CNMwhyXAOGiFCYF/m7Efbnp50zUQhHN9+7LeVzXZuiH98C8kNvWfE0qrkrrgQ1n
78UMqGcGpdw4ZSlWrDTbrbd4v0bRnsJ+IAWISnT5OXaeJgGZwXRuBHtTXqbjoosBeX/8w
YKb0lx7E5ZtSw7+Y6LNDGihGTmVg1nkZUWo85CxyF/RiWHuNvpkzzqXmdGS1bg=
- project:
check:
jobs:
- tox-pep8
- openstack-tox-linters-ansible-devel
- openstack-tox-linters-ansible-2.18
- openstack-tox-linters-ansible-2.19
- openstack-tox-linters-ansible-2.20
- ansible-collections-openstack-functional-devstack
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-ansible-2.18
- ansible-collections-openstack-functional-devstack-ansible-2.19
- ansible-collections-openstack-functional-devstack-ansible-2.20
- ansible-collections-openstack-functional-devstack-ansible-devel
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-manila
- ansible-collections-openstack-functional-devstack-octavia
# - openstack-tox-linters-ansible-devel
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack:
# dependencies: &deps_unit_lint
# - tox-pep8
# - openstack-tox-linters-ansible-2.12
- bifrost-collections-src:
voting: false
irrelevant-files: *ignore_files
- bifrost-keystone-collections-src:
voting: false
irrelevant-files: *ignore_files
# - ansible-collections-openstack-functional-devstack-releases:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-2.9:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-2.12:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-devel:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-magnum:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-octavia:
# dependencies: *deps_unit_lint
# - bifrost-collections-src:
# voting: false
# dependencies: *deps_unit_lint
# irrelevant-files: *ignore_files
# - bifrost-keystone-collections-src:
# voting: false
# dependencies: *deps_unit_lint
# irrelevant-files: *ignore_files
gate:
jobs:
- tox-pep8
- openstack-tox-linters-ansible-2.18
- openstack-tox-linters-ansible-2.19
- openstack-tox-linters-ansible-2.20
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-manila
- ansible-collections-openstack-functional-devstack-octavia
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack
# - ansible-collections-openstack-functional-devstack-releases
# - ansible-collections-openstack-functional-devstack-ansible-2.9
# - ansible-collections-openstack-functional-devstack-ansible-2.12
# - ansible-collections-openstack-functional-devstack-magnum
# - ansible-collections-openstack-functional-devstack-octavia
periodic:
jobs:
- openstack-tox-linters-ansible-devel
- openstack-tox-linters-ansible-2.18
- openstack-tox-linters-ansible-2.19
- openstack-tox-linters-ansible-2.20
- ansible-collections-openstack-functional-devstack
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-ansible-2.18
- ansible-collections-openstack-functional-devstack-ansible-2.19
- ansible-collections-openstack-functional-devstack-ansible-2.20
- ansible-collections-openstack-functional-devstack-ansible-devel
- bifrost-collections-src
- bifrost-keystone-collections-src
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-manila
- ansible-collections-openstack-functional-devstack-octavia
# periodic:
# jobs:
# - openstack-tox-linters-ansible-devel
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack
# - ansible-collections-openstack-functional-devstack-releases
# - ansible-collections-openstack-functional-devstack-ansible-2.9
# - ansible-collections-openstack-functional-devstack-ansible-2.12
# - ansible-collections-openstack-functional-devstack-ansible-devel
# - bifrost-collections-src
# - bifrost-keystone-collections-src
# - ansible-collections-openstack-functional-devstack-magnum
# - ansible-collections-openstack-functional-devstack-octavia
tag:
jobs:
- ansible-collections-openstack-release
# experimental:
# jobs:
# - ansible-collections-openstack-functional-devstack-ansible-2.11
# tag:
# jobs:
# - ansible-collections-openstack-release

View File

@@ -4,182 +4,6 @@ Ansible OpenStack Collection Release Notes
.. contents:: Topics
v2.5.0
======
Release Summary
---------------
Bugfixes and minor changes
Major Changes
-------------
- Add import_method to module
- Add object_containers_info module
- Add support for filters in inventory
- Add volume_manage module
- Introduce share_type modules
Minor Changes
-------------
- Allow role_assignment module to work cross domain
- Don't compare current state for `reboot_*` actions
- Fix disable_gateway_ip for subnet
- Fix example in the dns_zone_info module doc
- Fix router module external IPs when only subnet specified
- Fix the bug reporting url
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
- Shows missing data in `stack_info` module output
v2.4.1
======
Release Summary
---------------
Bugfixes and minor changes
Minor Changes
-------------
- Update tags when changing server
Bugfixes
--------
- Fix missed client_cert in OpenStackModule
v2.4.0
======
Release Summary
---------------
New trait module and minor changes
Major Changes
-------------
- Add trait module
Minor Changes
-------------
- Add loadbalancer quota options
- Allow create instance with tags
New Modules
-----------
- openstack.cloud.trait - Add or Delete a trait from OpenStack
v2.3.3
======
Release Summary
---------------
Bugfixes and minor changes
Minor Changes
-------------
- Add test to only_ipv4 in inventory
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
Bugfixes
--------
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
v2.3.2
======
Release Summary
---------------
Bugfixes and minor changes
Minor Changes
-------------
- Drop compat implementations for tests
Bugfixes
--------
- Fix openstack.cloud.port module failure in check mode
v2.3.1
======
Release Summary
---------------
Client TLS certificate support
Minor Changes
-------------
- Add ability to pass client tls certificate
v2.3.0
======
Release Summary
---------------
Bugfixes and new modules
Major Changes
-------------
- Add Neutron trunk module
- Add application_credential module
- Add module to filter available volume services
Minor Changes
-------------
- Add inactive state for the images
- Add insecure_registry property to coe_cluster_templates
- Add support for creation of the default external networks
- Add target_all_project option
- Add vlan_tranparency for creation networks
- Allow munch results in server_info module
- Allow to specify multiple allocation pools when creating a subnet
- CI - Disable auto-discovery for setuptools
- CI - Don't create port with binding profile
- CI - Fix CI in collection
- CI - Fix linters-devel and devstack tests
- CI - Fix regression in quota module
- CI - Fix test for server shelve
- CI - Migrate Bifrost jobs to Ubuntu Jammy
- CI - Remove 2.9 jobs from Zuul config
- CI - Run functional testing regardless of pep8/linter results
- Enable glance-direct interop image import
- Ensure coe_cluster_template compare labels properly
- Wait for deleted server to disappear from results
- router - Allow specifying external network name in a different project
Bugfixes
--------
- Allow wait false when auto_ip is false
- Fix exception when creating object from file
- Fix exception when updating container with metadata
- Fix typo in openstack.cloud.lb_pool
- Fix typo in parameter description
- fix subnet module - allow cidr option with subnet_pool
New Modules
-----------
- openstack.cloud.application_credential - Manage OpenStack Identity (Keystone) application credentials
- openstack.cloud.trunk - Add or delete trunks from an OpenStack cloud
- openstack.cloud.volume_service_info - Fetch OpenStack Volume (Cinder) services
v2.2.0
======

View File

@@ -211,7 +211,7 @@ Thank you for your interest in our Ansible OpenStack collection ☺️
There are many ways in which you can participate in the project, for example:
- [Report and verify bugs and help with solving issues](
https://bugs.launchpad.net/ansible-collections-openstack).
https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack).
- [Submit and review patches](
https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack).
- Follow OpenStack's [How To Contribute](https://wiki.openstack.org/wiki/How_To_Contribute) guide.

View File

@@ -526,112 +526,3 @@ releases:
- Add volume_type modules
release_summary: New module for volume_type and bugfixes
release_date: '2023-12-01'
2.3.0:
changes:
bugfixes:
- Allow wait false when auto_ip is false
- Fix exception when creating object from file
- Fix exception when updating container with metadata
- Fix typo in openstack.cloud.lb_pool
- Fix typo in parameter description
- fix subnet module - allow cidr option with subnet_pool
major_changes:
- Add Neutron trunk module
- Add application_credential module
- Add module to filter available volume services
minor_changes:
- Add inactive state for the images
- Add insecure_registry property to coe_cluster_templates
- Add support for creation of the default external networks
- Add target_all_project option
- Add vlan_tranparency for creation networks
- Allow munch results in server_info module
- Allow to specify multiple allocation pools when creating a subnet
- CI - Disable auto-discovery for setuptools
- CI - Don't create port with binding profile
- CI - Fix CI in collection
- CI - Fix linters-devel and devstack tests
- CI - Fix regression in quota module
- CI - Fix test for server shelve
- CI - Migrate Bifrost jobs to Ubuntu Jammy
- CI - Remove 2.9 jobs from Zuul config
- CI - Run functional testing regardless of pep8/linter results
- Enable glance-direct interop image import
- Ensure coe_cluster_template compare labels properly
- Wait for deleted server to disappear from results
- router - Allow specifying external network name in a different project
release_summary: Bugfixes and new modules
modules:
- description: Manage OpenStack Identity (Keystone) application credentials
name: application_credential
namespace: ''
- description: Add or delete trunks from an OpenStack cloud
name: trunk
namespace: ''
- description: Fetch OpenStack Volume (Cinder) services
name: volume_service_info
namespace: ''
release_date: '2024-11-28'
2.3.1:
changes:
minor_changes:
- Add ability to pass client tls certificate
release_summary: Client TLS certificate support
release_date: '2024-12-18'
2.3.2:
changes:
bugfixes:
- Fix openstack.cloud.port module failure in check mode
minor_changes:
- Drop compat implementations for tests
release_summary: Bugfixes and minor changes
release_date: '2024-12-20'
2.3.3:
changes:
bugfixes:
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
minor_changes:
- Add test to only_ipv4 in inventory
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
release_summary: Bugfixes and minor changes
release_date: '2024-12-22'
2.4.0:
changes:
major_changes:
- Add trait module
minor_changes:
- Add loadbalancer quota options
- Allow create instance with tags
release_summary: New trait module and minor changes
modules:
- description: Add or Delete a trait from OpenStack
name: trait
namespace: ''
release_date: '2025-01-15'
2.4.1:
changes:
bugfixes:
- Fix missed client_cert in OpenStackModule
minor_changes:
- Update tags when changing server
release_summary: Bugfixes and minor changes
release_date: '2024-01-20'
2.5.0:
changes:
major_changes:
- Add import_method to module
- Add object_containers_info module
- Add support for filters in inventory
- Add volume_manage module
- Introduce share_type modules
minor_changes:
- Allow role_assignment module to work cross domain
- Don't compare current state for `reboot_*` actions
- Fix disable_gateway_ip for subnet
- Fix example in the dns_zone_info module doc
- Fix router module external IPs when only subnet specified
- Fix the bug reporting url
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
- Shows missing data in `stack_info` module output
release_summary: Bugfixes and minor changes
release_date: '2025-10-24'

View File

View File

@@ -1,3 +0,0 @@
---
minor_changes:
- Add support for setting the shard key on a baremetal node.

View File

@@ -1,5 +0,0 @@
---
minor_changes:
- Added the new ``openstack.cloud.baremetal_port_group`` module to manage
Bare Metal port groups (create, update, and delete), including CI role
coverage and unit tests.

View File

@@ -3,8 +3,7 @@
vars:
collection_path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
build_collection_path: /tmp/collection_built/
ansible_virtualenv_path: /tmp/ansible_venv
ansible_galaxy_path: "{{ ansible_virtualenv_path }}/bin/ansible-galaxy"
ansible_galaxy_path: "~/.local/bin/ansible-galaxy"
tasks:
@@ -12,15 +11,9 @@
include_role:
name: ensure-pip
- name: Install Ansible in virtualenv
- name: Install ansible
pip:
name: ansible-core<2.19
virtualenv: "{{ ansible_virtualenv_path }}"
virtualenv_command: "{{ ensure_pip_virtualenv_command }}"
- name: Detect ansible version
command: "{{ ansible_virtualenv_path }}/bin/ansible --version"
register: ansible_version
name: ansible-core<2.12
- name: Discover tag version
set_fact:

View File

@@ -1,9 +0,0 @@
expected_fields:
- description
- expires_at
- id
- name
- project_id
- roles
- secret
- unrestricted

View File

@@ -1,61 +0,0 @@
---
- name: Create application credentials
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: dummy description
register: appcred
- name: Assert return values of application_credential module
assert:
that:
- appcred is changed
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(appcred.application_credential.keys())|length == 0
- name: Create the application credential again
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: dummy description
register: appcred
- name: Assert return values of ansible_credential module
assert:
that:
# credentials are immutable so creating twice will cause delete and create
- appcred is changed
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(appcred.application_credential.keys())|length == 0
- name: Update the application credential again
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: new description
register: appcred
- name: Assert application credential changed
assert:
that:
- appcred is changed
- appcred.application_credential.description == 'new description'
- name: Get list of all keypairs using application credential
openstack.cloud.keypair_info:
cloud: "{{ appcred.cloud }}"
- name: Delete application credential
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: absent
name: ansible_creds
register: appcred
- name: Assert application credential changed
assert:
that: appcred is changed

View File

@@ -46,7 +46,6 @@ expected_fields:
- reservation
- resource_class
- retired_reason
- shard
- states
- storage_interface
- target_power_state

View File

@@ -46,7 +46,6 @@ expected_fields:
- reservation
- resource_class
- retired_reason
- shard
- states
- storage_interface
- target_power_state

View File

@@ -1,12 +0,0 @@
expected_fields:
- address
- created_at
- extra
- id
- links
- mode
- name
- node_id
- properties
- standalone_ports_supported
- updated_at

View File

@@ -1,100 +0,0 @@
---
# TODO: Actually run this role in CI. Atm we do not have DevStack's ironic plugin enabled.
- name: Create baremetal node
openstack.cloud.baremetal_node:
cloud: "{{ cloud }}"
driver_info:
ipmi_address: "1.2.3.4"
ipmi_username: "admin"
ipmi_password: "secret"
name: ansible_baremetal_node
nics:
- mac: "aa:bb:cc:aa:bb:cc"
state: present
register: node
- name: Create baremetal port group
openstack.cloud.baremetal_port_group:
cloud: "{{ cloud }}"
state: present
name: ansible_baremetal_port_group
node: ansible_baremetal_node
address: fa:16:3e:aa:aa:ab
mode: active-backup
standalone_ports_supported: true
extra:
test: created
properties:
miimon: '100'
register: port_group
- debug: var=port_group
- name: Assert return values of baremetal_port_group module
assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(port_group.port_group.keys())|length == 0
- port_group.port_group.name == "ansible_baremetal_port_group"
- port_group.port_group.node_id == node.node.id
- name: Update baremetal port group
openstack.cloud.baremetal_port_group:
cloud: "{{ cloud }}"
state: present
id: "{{ port_group.port_group.id }}"
mode: 802.3ad
standalone_ports_supported: false
extra:
test: updated
register: updated_port_group
- name: Assert return values of updated baremetal port group
assert:
that:
- updated_port_group is changed
- updated_port_group.port_group.id == port_group.port_group.id
- updated_port_group.port_group.mode == "802.3ad"
- not updated_port_group.port_group.standalone_ports_supported
- updated_port_group.port_group.extra.test == "updated"
- name: Update baremetal port group again
openstack.cloud.baremetal_port_group:
cloud: "{{ cloud }}"
state: present
id: "{{ port_group.port_group.id }}"
mode: 802.3ad
standalone_ports_supported: false
extra:
test: updated
register: updated_port_group
- name: Assert idempotency for baremetal port group module
assert:
that:
- updated_port_group is not changed
- updated_port_group.port_group.id == port_group.port_group.id
- name: Delete baremetal port group
openstack.cloud.baremetal_port_group:
cloud: "{{ cloud }}"
state: absent
id: "{{ port_group.port_group.id }}"
- name: Delete baremetal port group again
openstack.cloud.baremetal_port_group:
cloud: "{{ cloud }}"
state: absent
id: "{{ port_group.port_group.id }}"
register: deleted_port_group
- name: Assert idempotency for deleted baremetal port group
assert:
that:
- deleted_port_group is not changed
- name: Delete baremetal node
openstack.cloud.baremetal_node:
cloud: "{{ cloud }}"
name: ansible_baremetal_node
state: absent

View File

@@ -72,8 +72,6 @@
image_id: '{{ image_id }}'
is_floating_ip_enabled: true
keypair_id: '{{ keypair.keypair.id }}'
flavor_id: 'm1.small'
master_flavor_id: 'm1.small'
name: k8s
state: present
register: coe_cluster_template

View File

@@ -26,9 +26,6 @@
keypair_id: '{{ keypair.keypair.id }}'
name: k8s
state: present
labels:
docker_volume_size: 10
cloud_provider_tag: v1.23.1
register: coe_cluster_template
- name: Assert return values of coe_cluster_template module
@@ -46,9 +43,6 @@
keypair_id: '{{ keypair.keypair.id }}'
name: k8s
state: present
labels:
docker_volume_size: 10
cloud_provider_tag: v1.23.1
register: coe_cluster_template
- name: Assert return values of coe_cluster_template module

View File

@@ -27,12 +27,6 @@
name: ansible_external
external: true
- name: Gather information about external network
openstack.cloud.networks_info:
cloud: "{{ cloud }}"
name: ansible_external
register: external_network
- name: Create external subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
@@ -104,17 +98,6 @@
- ip_address: 10.7.7.102
register: port3
- name: Create internal port 4
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: ansible_internal_port4
network: ansible_internal
fixed_ips:
- ip_address: 10.7.7.103
- ip_address: 10.7.7.104
register: port4
- name: Create router 1
openstack.cloud.router:
cloud: "{{ cloud }}"
@@ -153,31 +136,10 @@
selectattr('floating_network_id', '==', public_network.networks.0.id)|
list|length > 0 }}"
# TODO: Replace with appropriate Ansible module once available
- name: Create a floating ip on public network (required for simplest, first floating ip test)
command: openstack --os-cloud={{ cloud }} floating ip create public
when: not public_network_had_fips
block:
- name: Create a floating ip on public network
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: public
register: public_fip_result
- name: Verify floating ip got created
assert:
that:
- public_fip_result.floating_ip.floating_network_id == public_network.networks.0.id
- name: Create a floating ip on public network again
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: public
register: public_fip_result
- name: Verify idempotency
assert:
that: public_fip_result is not changed
# TODO: Replace with appropriate Ansible module once available
- name: Create floating ip 1 on external network
@@ -189,90 +151,6 @@
when: fips.floating_ips|length == 0 or
"10.6.6.150" not in fips.floating_ips|map(attribute="floating_ip_address")|list
- name: Create floating ip 2 on external network
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: ansible_external
floating_ip_address: 10.6.6.151
register: external_fip2_result
- name: Verify floating ip got created
assert:
that:
- external_fip2_result.floating_ip.floating_network_id == external_network.networks.0.id
- name: Update floating ip 2 on external network
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: ansible_external
floating_ip_address: 10.6.6.151
nat_destination: ansible_internal
fixed_address: 10.7.7.104
register: external_fip2_result
- name: Verify floating ip got updated
assert:
that:
- external_fip2_result is changed
- external_fip2_result.floating_ip.floating_ip_address == "10.6.6.151"
- external_fip2_result.floating_ip.port_id == port4.port.id
- external_fip2_result.floating_ip.fixed_ip_address == "10.7.7.104"
- name: Update floating ip 2 on external network again
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: ansible_external
floating_ip_address: 10.6.6.151
nat_destination: ansible_internal
fixed_address: 10.7.7.104
register: external_fip2_result
- name: Verify idempotency
assert:
that:
- external_fip2_result is not changed
- name: Detatch floating ip 2 on external network from port
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
network: ansible_external
floating_ip_address: 10.6.6.151
- name: Get floating ip 2 info
openstack.cloud.floating_ip_info:
cloud: "{{ cloud }}"
floating_ip_address: 10.6.6.151
register: external_fip2_result
- name: Verify floating ip got detached
assert:
that:
- external_fip2_result.floating_ips.0.floating_ip_address == "10.6.6.151"
- external_fip2_result.floating_ips.0.fixed_ip_address == none
- external_fip2_result.floating_ips.0.port_id == none
- name: Detatch floating ip 2 on external network from port again
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
network: ansible_external
floating_ip_address: 10.6.6.151
- name: Get floating ip 2 info
openstack.cloud.floating_ip_info:
cloud: "{{ cloud }}"
floating_ip_address: 10.6.6.151
register: external_fip2_result
- name: Verify idempotency
assert:
that:
- external_fip2_result is not changed
- name: Create server 1 with one nic
openstack.cloud.server:
cloud: "{{ cloud }}"
@@ -363,7 +241,7 @@
that:
- server1_fips is success
- server1_fips is not changed
- server1_fips.floating_ips|length > 0
- server1_fips.floating_ips
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(server1_fips.floating_ips[0].keys())|length == 0
@@ -382,7 +260,7 @@
- name: Assert return values of floating_ip module
assert:
that:
- floating_ip.floating_ip|length > 0
- floating_ip.floating_ip
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(floating_ip.floating_ip.keys())|length == 0
@@ -434,7 +312,7 @@
- name: Assert floating ip attached to server 2
assert:
that:
- server2_fip.floating_ip|length > 0
- server2_fip.floating_ip
- name: Find all floating ips for debugging
openstack.cloud.floating_ip_info:
@@ -555,41 +433,18 @@
cloud: "{{ cloud }}"
register: fips
# TODO: Replace with appropriate Ansible module once available
- name: Delete floating ip on public network if we created it
when: not public_network_had_fips
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
purge: true
floating_ip_address: "{{ public_fip }}"
network: public
loop: >-
{{
fips.floating_ips |
selectattr('floating_network_id', '==', public_network.networks.0.id) |
map(attribute="floating_ip_address") |
list
}}
loop_control:
loop_var: public_fip
command: >
openstack --os-cloud={{ cloud }} floating ip delete
{{ fips.floating_ips|selectattr('floating_network_id', '==', public_network.networks.0.id)|
map(attribute="floating_ip_address")|list|join(' ') }}
# TODO: Replace with appropriate Ansible module once available
- name: Delete floating ip 1
command: openstack --os-cloud={{ cloud }} floating ip delete 10.6.6.150
when: fips.floating_ips|length > 0 and "10.6.6.150" in fips.floating_ips|map(attribute="floating_ip_address")|list
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
purge: true
floating_ip_address: 10.6.6.150
network: ansible_external
- name: Delete floating ip 2
when: fips.floating_ips|length > 0 and "10.6.6.151" in fips.floating_ips|map(attribute="floating_ip_address")|list
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
purge: true
floating_ip_address: 10.6.6.151
network: ansible_external
- name: Get remaining floating ips on external network
openstack.cloud.floating_ip_info:
@@ -597,24 +452,14 @@
floating_network: ansible_external
register: fips
# TODO: Replace with appropriate Ansible module once available
# The first, simple floating ip test might have allocated a floating ip on the external network.
# This floating ip must be removed before external network can be deleted.
- name: Delete remaining floating ips on external network
when: fips.floating_ips|length > 0
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
purge: true
floating_ip_address: "{{ external_fip }}"
network: ansible_external
loop: >-
{{
fips.floating_ips |
map(attribute="floating_ip_address") |
list
}}
loop_control:
loop_var: external_fip
command: >
openstack --os-cloud={{ cloud }} floating ip delete
{{ fips.floating_ips|map(attribute="floating_ip_address")|list|join(' ') }}
# Remove routers after floating ips have been detached and disassociated else removal fails with
# Error detaching interface from router ***: Client Error for url: ***,
@@ -633,12 +478,6 @@
state: absent
name: ansible_router1
- name: Delete internal port 4
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: ansible_internal_port4
- name: Delete internal port 3
openstack.cloud.port:
cloud: "{{ cloud }}"

View File

@@ -176,34 +176,6 @@
- image is changed
- image.image.name == 'ansible_image-changed'
- name: Deactivate raw image
openstack.cloud.image:
cloud: "{{ cloud }}"
state: inactive
id: "{{ image.image.id }}"
name: 'ansible_image-changed'
register: image
- name: Assert changed
assert:
that:
- image is changed
- image.image.status == 'deactivated'
- name: Reactivate raw image
openstack.cloud.image:
cloud: "{{ cloud }}"
state: present
id: "{{ image.image.id }}"
name: 'ansible_image-changed'
register: image
- name: Assert changed
assert:
that:
- image is changed
- image.image.status == 'active'
- name: Rename back raw image (defaults)
openstack.cloud.image:
cloud: "{{ cloud }}"

View File

@@ -279,11 +279,6 @@
ansible.builtin.set_fact:
cache: "{{ cache.content | b64decode | from_yaml }}"
- name: Further process Ansible 2.19+ cache
ansible.builtin.set_fact:
cache: "{{ cache.__payload__ | from_yaml }}"
when: cache.__payload__ is defined
- name: Check Ansible's cache
assert:
that:
@@ -308,25 +303,6 @@
that:
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
- name: List servers with inventory plugin with IPv4 only
ansible.builtin.command:
cmd: ansible-inventory --list --yaml --extra-vars only_ipv4=true --inventory-file openstack.yaml
chdir: "{{ tmp_dir.path }}"
environment:
ANSIBLE_INVENTORY_CACHE: "True"
ANSIBLE_INVENTORY_CACHE_PLUGIN: "jsonfile"
ANSIBLE_CACHE_PLUGIN_CONNECTION: "{{ tmp_dir.path }}/.cache/"
register: inventory
- name: Read YAML output from inventory plugin again
ansible.builtin.set_fact:
inventory: "{{ inventory.stdout | from_yaml }}"
- name: Check YAML output from inventory plugin again
assert:
that:
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
- name: Delete server 2
openstack.cloud.resource:
service: compute

View File

@@ -38,7 +38,7 @@
- name: Ensure public key is returned
assert:
that:
- keypair.keypair.public_key is defined and keypair.keypair.public_key|length > 0
- keypair.keypair.public_key is defined and keypair.keypair.public_key
- name: Create another keypair
openstack.cloud.keypair:

View File

@@ -11,7 +11,7 @@
- name: Check output of creating network
assert:
that:
- infonet.network is defined
- infonet.network
- item in infonet.network
loop: "{{ expected_fields }}"

View File

@@ -1,17 +0,0 @@
---
expected_fields:
- description
- id
- name
- network_id
- network_type
- physical_network
- segmentation_id
network_name: segment_network
segment_name: example_segment
network_type: vlan
segmentation_id: 999
physical_network: public
initial_description: "example segment description"
updated_description: "updated segment description"

View File

@@ -1,72 +0,0 @@
---
- name: Create network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: present
- name: Create segment {{ segment_name }}
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
description: "{{ initial_description }}"
network: "{{ network_name }}"
network_type: "{{ network_type }}"
segmentation_id: "{{ segmentation_id }}"
physical_network: "{{ physical_network }}"
state: present
register: segment
- name: Assert changed
assert:
that: segment is changed
- name: Assert segment fields
assert:
that: item in segment.network_segment
loop: "{{ expected_fields }}"
- name: Update segment {{ segment_name }} by name - no changes
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
description: "{{ initial_description }}"
state: present
register: segment
- name: Assert not changed
assert:
that: segment is not changed
- name: Update segment {{ segment_name }} by all fields - changes
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
description: "{{ updated_description }}"
network: "{{ network_name }}"
network_type: "{{ network_type }}"
segmentation_id: "{{ segmentation_id }}"
physical_network: "{{ physical_network }}"
state: present
register: segment
- name: Assert changed
assert:
that: segment is changed
- name: Delete segment {{ segment_name }}
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
state: absent
register: segment
- name: Assert changed
assert:
that: segment is changed
- name: Delete network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: absent

View File

@@ -7,4 +7,3 @@ expected_fields:
- project_id
- target_project_id
- tenant_id
all_project_symbol: '*'

View File

@@ -69,29 +69,6 @@
id: "{{ rbac_policy.rbac_policy.id }}"
state: absent
- name: Create a new network RBAC policy by targeting all projects
openstack.cloud.neutron_rbac_policy:
cloud: "{{ cloud }}"
object_id: "{{ network.network.id }}"
object_type: 'network'
action: 'access_as_shared'
target_all_project: true
project_id: "{{ source_project.project.id }}"
register: rbac_policy
- name: Assert return values of neutron_rbac_policy module
assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(rbac_policy.rbac_policy.keys())|length == 0
- rbac_policy.rbac_policy.target_project_id == all_project_symbol
- name: Delete RBAC policy
openstack.cloud.neutron_rbac_policy:
cloud: "{{ cloud }}"
id: "{{ rbac_policy.rbac_policy.id }}"
state: absent
- name: Get all rbac policies for {{ source_project.project.name }} - after deletion
openstack.cloud.neutron_rbac_policies_info:
cloud: "{{ cloud }}"

View File

@@ -5,7 +5,7 @@
state: present
name: ansible_container
- name: Create object from data
- name: Create object
openstack.cloud.object:
cloud: "{{ cloud }}"
state: present
@@ -28,47 +28,6 @@
name: ansible_object
container: ansible_container
- name: Create object from file
block:
- name: Create temporary data file
ansible.builtin.tempfile:
register: tmp_file
- name: Populate data file
ansible.builtin.copy:
content: "this is a test"
dest: "{{ tmp_file.path }}"
- name: Create object from data file
openstack.cloud.object:
cloud: "{{ cloud }}"
state: present
name: ansible_object
filename: "{{ tmp_file.path }}"
container: ansible_container
register: object
always:
- name: Remove temporary data file
ansible.builtin.file:
path: "{{ tmp_file.path }}"
state: absent
when: tmp_file is defined and 'path' in tmp_file
- name: Assert return values of object module
assert:
that:
- object.object.id == "ansible_object"
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(object.object.keys())|length == 0
- name: Delete object
openstack.cloud.object:
cloud: "{{ cloud }}"
state: absent
name: ansible_object
container: ansible_container
- name: Delete container
openstack.cloud.object_container:
cloud: "{{ cloud }}"

View File

@@ -31,21 +31,6 @@
- ('cache-control' in container.container.metadata.keys()|map('lower'))
- container.container.metadata['foo'] == 'bar'
- name: Update container metadata
openstack.cloud.object_container:
cloud: "{{ cloud }}"
name: ansible_container
metadata:
'foo': 'baz'
register: container
- name: Verify container metadata was updated
assert:
that:
- container is changed
- ('cache-control' in container.container.metadata.keys()|map('lower'))
- container.container.metadata['foo'] == 'baz'
- name: Update a container
openstack.cloud.object_container:
cloud: "{{ cloud }}"
@@ -60,7 +45,7 @@
that:
- container is changed
- ('cache-control' not in container.container.metadata.keys()|map('lower'))
- "container.container.metadata == {'foo': 'baz'}"
- "container.container.metadata == {'foo': 'bar'}"
- container.container.read_ACL is none or container.container.read_ACL == ""
- name: Delete container

View File

@@ -1,37 +0,0 @@
---
test_container_unprefixed_name: ansible-test-container
test_container_prefixed_prefix: ansible-prefixed-test-container
test_container_prefixed_num: 2
test_object_data: "Hello, world!"
expected_fields_single:
- bytes
- bytes_used
- content_type
- count
- history_location
- id
- if_none_match
- is_content_type_detected
- is_newest
- meta_temp_url_key
- meta_temp_url_key_2
- name
- object_count
- read_ACL
- storage_policy
- sync_key
- sync_to
- timestamp
- versions_location
- write_ACL
expected_fields_multiple:
- bytes
- bytes_used
- count
- id
- name
- object_count

View File

@@ -1,124 +0,0 @@
---
- name: Generate list of containers to create
ansible.builtin.set_fact:
all_test_containers: >-
{{
[test_container_unprefixed_name]
+ (
[test_container_prefixed_prefix + '-']
| product(range(test_container_prefixed_num) | map('string'))
| map('join', '')
)
}}
- name: Run checks
block:
- name: Create all containers
openstack.cloud.object_container:
cloud: "{{ cloud }}"
name: "{{ item }}"
read_ACL: ".r:*,.rlistings"
loop: "{{ all_test_containers }}"
- name: Create an object in all containers
openstack.cloud.object:
cloud: "{{ cloud }}"
container: "{{ item }}"
name: hello.txt
data: "{{ test_object_data }}"
loop: "{{ all_test_containers }}"
- name: Fetch single containers by name
openstack.cloud.object_containers_info:
cloud: "{{ cloud }}"
name: "{{ item }}"
register: single_containers
loop: "{{ all_test_containers }}"
- name: Check that all fields are returned for single containers
ansible.builtin.assert:
that:
- (item.containers | length) == 1
- item.containers[0].name == item.item
- item.containers[0].bytes == (test_object_data | length)
- item.containers[0].read_ACL == ".r:*,.rlistings"
# allow new fields to be introduced but prevent fields from being removed
- (expected_fields_single | difference(item.containers[0].keys()) | length) == 0
quiet: true
loop: "{{ single_containers.results }}"
loop_control:
label: "{{ item.item }}"
- name: Fetch multiple containers by prefix
openstack.cloud.object_containers_info:
cloud: "{{ cloud }}"
prefix: "{{ test_container_prefixed_prefix }}"
register: multiple_containers
- name: Check that the correct number of prefixed containers were returned
ansible.builtin.assert:
that:
- (multiple_containers.containers | length) == test_container_prefixed_num
fail_msg: >-
Incorrect number of containers found
(found {{ multiple_containers.containers | length }},
expected {{ test_container_prefixed_num }})
quiet: true
- name: Check that all prefixed containers exist
ansible.builtin.assert:
that:
- >-
(test_container_prefixed_prefix + '-' + (item | string))
in (multiple_containers.containers | map(attribute='name'))
fail_msg: "Container not found: {{ test_container_prefixed_prefix + '-' + (item | string) }}"
quiet: true
loop: "{{ range(test_container_prefixed_num) | list }}"
loop_control:
label: "{{ test_container_prefixed_prefix + '-' + (item | string) }}"
- name: Check that the expected fields are returned for all prefixed containers
ansible.builtin.assert:
that:
- item.name.startswith(test_container_prefixed_prefix)
# allow new fields to be introduced but prevent fields from being removed
- (expected_fields_multiple | difference(item.keys()) | length) == 0
quiet: true
loop: "{{ multiple_containers.containers | sort(attribute='name') }}"
loop_control:
label: "{{ item.name }}"
- name: Fetch all containers
openstack.cloud.object_containers_info:
cloud: "{{ cloud }}"
register: all_containers
- name: Check that all expected containers were returned
ansible.builtin.assert:
that:
- item in (all_containers.containers | map(attribute='name'))
fail_msg: "Container not found: {{ item }}"
quiet: true
loop: "{{ all_test_containers }}"
- name: Check that the expected fields are returned for all containers
ansible.builtin.assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- (expected_fields_multiple | difference(item.keys()) | length) == 0
quiet: true
loop: "{{ all_containers.containers | selectattr('name', 'in', all_test_containers) }}"
loop_control:
label: "{{ item.name }}"
always:
- name: Delete all containers
openstack.cloud.object_container:
cloud: "{{ cloud }}"
name: "{{ item }}"
state: absent
delete_with_all_objects: true
loop: "{{ all_test_containers }}"

View File

@@ -1,3 +1,6 @@
binding_profile:
"pci_slot": "0000:03:11.1"
"physical_network": "provider"
expected_fields:
- allowed_address_pairs
- binding_host_id

View File

@@ -256,6 +256,27 @@
state: absent
name: ansible_security_group
- name: Create port (with binding profile)
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ port_name }}"
network: "{{ network_name }}"
binding_profile: "{{ binding_profile }}"
register: port
- name: Assert binding_profile exists in created port
assert:
that: "port.port['binding_profile']"
- debug: var=port
- name: Delete port (with binding profile)
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ port_name }}"
- name: Delete subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"

View File

@@ -1,10 +0,0 @@
expected_fields:
- description
- external_port
- floatingip_id
- id
- internal_ip_address
- internal_port
- internal_port_id
- name
- protocol

View File

@@ -1,272 +0,0 @@
---
- name: Create test network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: test_internal_network
- name: Create test subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
name: test_internal_subnet
network_name: test_internal_network
cidr: 192.168.100.0/24
gateway_ip: 192.168.100.1
- name: Create test port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: test_internal_port
network: test_internal_network
fixed_ips:
- ip_address: 192.168.100.10
register: test_internal_port
- name: Create test external network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: test_external_network
external: true
- name: Create test external subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
network_name: test_external_network
name: test_external_subnet
cidr: 10.6.6.0/24
- name: Create router
openstack.cloud.router:
cloud: "{{ cloud }}"
state: present
name: test_router
network: test_external_network
external_fixed_ips:
- subnet: test_external_subnet
interfaces:
- test_internal_subnet
- name: Create test floating IP
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: present
network: test_external_network
register: test_floating_ip
- name: Test - Create port forwarding rule
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
internal_ip: 192.168.100.10
external_protocol_port: 8080
internal_protocol_port: 80
protocol: tcp
register: pf_create
- name: Get port forwarding info
openstack.cloud.port_forwarding_info:
cloud: "{{ cloud }}"
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
port_forwarding_id: "{{ pf_create.port_forwarding.id }}"
register: pf_create_info
- name: Verify - Port forwarding created successfully
assert:
that:
- pf_create is changed
- pf_create.port_forwarding is defined
- pf_create.port_forwarding.external_port == 8080
- pf_create.port_forwarding.internal_port == 80
- pf_create.port_forwarding.protocol == "tcp"
- pf_create_info.port_forwardings | length == 1
- pf_create_info.port_forwardings.0.id == pf_create.port_forwarding.id
- name: Test - Create port forwarding rule again (idempotency)
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
internal_ip: 192.168.100.10
external_protocol_port: 8080
internal_protocol_port: 80
protocol: tcp
register: pf_idempotent
- name: Verify - No changes
assert:
that:
- pf_idempotent is not changed
- name: Test - Update port forwarding internal port
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
internal_ip: 192.168.100.10
external_protocol_port: 8080
internal_protocol_port: 8080 # Changed from 80 to 8080
protocol: tcp
register: pf_update
- name: Get port forwarding info
openstack.cloud.port_forwarding_info:
cloud: "{{ cloud }}"
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
port_forwarding_id: "{{ pf_update.port_forwarding.id }}"
register: pf_update_info
- name: Verify - Port forwarding updated successfully
assert:
that:
- pf_update is changed
- pf_update.port_forwarding.internal_port == 8080
- pf_update_info.port_forwardings | length == 1
- pf_update_info.port_forwardings.0.id == pf_update.port_forwarding.id
- name: Test - Update with same values (idempotency)
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
internal_ip: 192.168.100.10
external_protocol_port: 8080
internal_protocol_port: 8080
protocol: tcp
register: pf_update_idempotent
- name: Verify - No changes
assert:
that:
- pf_update_idempotent is not changed
- name: Test - Change just one attribute
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
port_forwarding_id: "{{ pf_create.port_forwarding.id }}"
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
internal_protocol_port: 9090 # Different internal port
register: pf_update_by_id
- name: Verify - Port forwarding updated by ID
assert:
that:
- pf_update_by_id.changed == true
- pf_update_by_id.port_forwarding.id == pf_create.port_forwarding.id
- pf_update_by_id.port_forwarding.internal_port_id == test_internal_port.port.id
- pf_update_by_id.port_forwarding.internal_ip_address == "192.168.100.10"
- pf_update_by_id.port_forwarding.external_port == 8080
- pf_update_by_id.port_forwarding.internal_port == 9090
- name: Test - Create port forwarding without specifying internal IP
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: present
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
external_protocol_port: 2222
internal_protocol_port: 22
protocol: tcp
register: pf_auto_internal_ip
- name: Verify - Port forwarding created with auto internal IP
assert:
that:
- pf_auto_internal_ip.changed == true
- pf_auto_internal_ip.port_forwarding.internal_ip_address == "192.168.100.10"
- name: Test - Delete port forwarding
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: absent
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
network_port: test_internal_port
external_protocol_port: 8080
internal_protocol_port: 9090
protocol: tcp
register: pf_delete
- name: Verify - Port forwarding deleted successfully
assert:
that:
- pf_delete.changed == true
- name: Test - Delete port forwarding by ID
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: absent
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
port_forwarding_id: "{{ pf_auto_internal_ip.port_forwarding.id }}"
register: pf_delete_by_id
- name: Verify - Port forwarding deleted by ID
assert:
that:
- pf_delete_by_id.changed == true
- name: Test - Delete already deleted port forwarding (idempotency)
openstack.cloud.port_forwarding:
cloud: "{{ cloud }}"
state: absent
port_forwarding_id: "{{ pf_auto_internal_ip.port_forwarding.id }}"
floating_ip: "{{ test_floating_ip.floating_ip.id }}"
register: pf_delete_idempotent
- name: Verify - No errors on deleting non-existent rule (idempotency)
assert:
that:
- pf_delete_idempotent is not changed
- pf_delete_idempotent is not failed
- name: Clean up - Delete test floating IP
openstack.cloud.floating_ip:
cloud: "{{ cloud }}"
state: absent
floating_ip_address: "{{ test_floating_ip.floating_ip.floating_ip_address }}"
network: test_external_network
purge: true
- name: Clean up - Delete router
openstack.cloud.router:
cloud: "{{ cloud }}"
state: absent
name: test_router
- name: Clean up - Delete test external subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: test_external_subnet
- name: Clean up - Delete test external network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: test_external_network
- name: Clean up - Delete test port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: test_internal_port
- name: Clean up - Delete test subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: test_internal_subnet
- name: Clean up - Delete test network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: test_internal_network

View File

@@ -174,38 +174,6 @@
that:
- project.project.is_enabled == True
- name: Update project to add new extra_specs
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: ansible_project
extra_specs:
is_enabled: True
another_tag: True
register: project
- name: Assert return values of project module
assert:
that:
- project.project.is_enabled == True
- project.project.another_tag == True
- name: Update project to change existing extra_specs
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: ansible_project
extra_specs:
is_enabled: True
another_tag: False
register: project
- name: Assert return values of project module
assert:
that:
- project.project.is_enabled == True
- project.project.another_tag == False
- name: Delete project
openstack.cloud.project:
cloud: "{{ cloud }}"

View File

@@ -28,9 +28,3 @@ test_compute_quota:
ram: 5
server_group_members: 5
server_groups: 5
test_load_balancer_quota:
load_balancers: 5
health_monitors: 5
listeners: 5
pools: 5
members: 5

View File

@@ -1,158 +0,0 @@
---
- module_defaults:
group/openstack.cloud.openstack:
cloud: "{{ cloud }}"
name: "{{ test_project }}"
# Backward compatibility with Ansible 2.9
openstack.cloud.project:
cloud: "{{ cloud }}"
name: "{{ test_project }}"
openstack.cloud.quota:
cloud: "{{ cloud }}"
name: "{{ test_project }}"
block:
- name: Create test project
openstack.cloud.project:
state: present
- name: Clear quotas before tests
openstack.cloud.quota:
state: absent
register: default_quotas
- name: Set network quota
openstack.cloud.quota: "{{ test_network_quota }}"
register: quotas
- name: Assert changed
assert:
that: quotas is changed
- name: Assert field values
assert:
that: quotas.quotas.network[item.key] == item.value
loop: "{{ test_network_quota | dict2items }}"
- name: Set network quota again
openstack.cloud.quota: "{{ test_network_quota }}"
register: quotas
- name: Assert not changed
assert:
that: quotas is not changed
- name: Set volume quotas
openstack.cloud.quota: "{{ test_volume_quota }}"
register: quotas
- name: Assert changed
assert:
that: quotas is changed
- name: Assert field values
assert:
that: quotas.quotas.volume[item.key] == item.value
loop: "{{ test_volume_quota | dict2items }}"
- name: Set volume quotas again
openstack.cloud.quota: "{{ test_volume_quota }}"
register: quotas
- name: Assert not changed
assert:
that: quotas is not changed
- name: Set compute quotas
openstack.cloud.quota: "{{ test_compute_quota }}"
register: quotas
- name: Assert changed
assert:
that: quotas is changed
- name: Assert field values
assert:
that: quotas.quotas.compute[item.key] == item.value
loop: "{{ test_compute_quota | dict2items }}"
- name: Set compute quotas again
openstack.cloud.quota: "{{ test_compute_quota }}"
register: quotas
- name: Set load_balancer quotas
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
register: quotas
- name: Assert changed
assert:
that: quotas is changed
- name: Assert field values
assert:
that: quotas.quotas.load_balancer[item.key] == item.value
loop: "{{ test_load_balancer_quota | dict2items }}"
- name: Set load_balancer quotas again
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
register: quotas
- name: Assert not changed
assert:
that: quotas is not changed
- name: Unset all quotas
openstack.cloud.quota:
state: absent
register: quotas
- name: Assert defaults restore
assert:
that: quotas.quotas == default_quotas.quotas
- name: Set all quotas at once
openstack.cloud.quota:
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
register: quotas
- name: Assert changed
assert:
that: quotas is changed
- name: Assert volume values
assert:
that: quotas.quotas.volume[item.key] == item.value
loop: "{{ test_volume_quota | dict2items }}"
- name: Assert network values
assert:
that: quotas.quotas.network[item.key] == item.value
loop: "{{ test_network_quota | dict2items }}"
- name: Assert compute values
assert:
that: quotas.quotas.compute[item.key] == item.value
loop: "{{ test_compute_quota | dict2items }}"
- name: Assert load_balancer values
assert:
that: quotas.quotas.load_balancer[item.key] == item.value
loop: "{{ test_load_balancer_quota | dict2items }}"
- name: Set all quotas at once again
openstack.cloud.quota:
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
register: quotas
- name: Assert not changed
assert:
that: quotas is not changed
- name: Unset all quotas
openstack.cloud.quota:
state: absent
register: quotas
- name: Delete test project
openstack.cloud.project:
state: absent

View File

@@ -128,9 +128,4 @@
- name: Delete test project
openstack.cloud.project:
state: absent
- import_tasks: loadbalancer.yml
tags:
- loadbalancer
state: absent

View File

@@ -14,15 +14,6 @@
email: test@example.net
register: dns_zone
- name: Ensure recordset not present
openstack.cloud.recordset:
cloud: "{{ cloud }}"
zone: "{{ dns_zone.zone.name }}"
name: "{{ recordset_name }}"
recordset_type: "a"
records: "{{ records }}"
state: absent
- name: Create a recordset
openstack.cloud.recordset:
cloud: "{{ cloud }}"
@@ -31,13 +22,11 @@
recordset_type: "a"
records: "{{ records }}"
register: recordset
until: '"PENDING" not in recordset["recordset"].status'
retries: 10
delay: 5
- name: Verify recordset info
assert:
that:
- recordset is changed
- recordset["recordset"].name == recordset_name
- recordset["recordset"].zone_name == dns_zone.zone.name
- recordset["recordset"].records | list | sort == records | list | sort

View File

@@ -45,6 +45,12 @@
state: absent
user: admin
- name: Delete project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: ansible_project
- name: Create domain
openstack.cloud.identity_domain:
cloud: "{{ cloud }}"
@@ -72,7 +78,6 @@
state: present
name: ansible_user
domain: default
register: specific_user
- name: Create user in specific domain
openstack.cloud.identity_user:
@@ -133,45 +138,6 @@
that:
- role_assignment is changed
- name: Assign role to user in specific domain on default domain project
openstack.cloud.role_assignment:
cloud: "{{ cloud }}"
role: anotherrole
user: "{{ specific_user.user.id }}"
domain: default
project: ansible_project
register: role_assignment
- name: Assert role assignment
assert:
that:
- role_assignment is changed
- name: Revoke role to user in specific domain
openstack.cloud.role_assignment:
cloud: "{{ cloud }}"
role: anotherrole
user: "{{ specific_user.user.id }}"
domain: default
project: ansible_project
state: absent
register: role_assignment
- name: Assert role assignment revoked
assert:
that:
- role_assignment is changed
- name: Assign role to user in specific domain on default domain project
openstack.cloud.role_assignment:
cloud: "{{ cloud }}"
role: anotherrole
user: ansible_user
user_domain: "{{ specific_user.user.domain_id }}"
project: ansible_project
project_domain: default
register: role_assignment
- name: Delete group in default domain
openstack.cloud.identity_group:
cloud: "{{ cloud }}"
@@ -205,10 +171,3 @@
cloud: "{{ cloud }}"
state: absent
name: ansible_domain
- name: Delete project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: ansible_project

View File

@@ -558,46 +558,6 @@
assert:
that: router is not changed
- name: Create router without explicit IP address
openstack.cloud.router:
cloud: "{{ cloud }}"
state: present
name: "{{ router_name }}"
enable_snat: false
interfaces:
- shade_subnet1
network: "{{ external_network_name }}"
external_fixed_ips:
- subnet_id: shade_subnet5
register: router
- name: Assert idempotent module
assert:
that: router is changed
- name: Update router without explicit IP address
openstack.cloud.router:
cloud: "{{ cloud }}"
state: present
name: "{{ router_name }}"
enable_snat: false
interfaces:
- shade_subnet1
network: "{{ external_network_name }}"
external_fixed_ips:
- subnet_id: shade_subnet5
register: router
- name: Assert idempotent module
assert:
that: router is not changed
- name: Delete router
openstack.cloud.router:
cloud: "{{ cloud }}"
state: absent
name: "{{ router_name }}"
- name: Create router with simple interface
openstack.cloud.router:
cloud: "{{ cloud }}"
@@ -760,5 +720,3 @@
name: "{{ external_network_name }}"
- include_tasks: shared_network.yml
- include_tasks: shared_ext_network.yml

View File

@@ -1,99 +0,0 @@
---
# Test the case where we have a shared external network in one project used as
# the gateway on a router in a second project.
# See https://bugs.launchpad.net/ansible-collections-openstack/+bug/2049658
- name: Create the first project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test_1"
description: "Project that contains the external network to be shared"
domain: default
is_enabled: True
register: project_1
- name: Create the external network to be shared
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ external_network_name }}"
project: "shared_ext_net_test_1"
external: true
shared: true
register: shared_ext_network
- name: Create subnet on external network
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
network_name: "{{ shared_ext_network.id }}"
name: "shared_ext_subnet"
project: "shared_ext_net_test_1"
cidr: "10.6.6.0/24"
register: shared_subnet
- name: Create the second project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test_2"
description: "Project that contains the subnet to be shared"
domain: default
is_enabled: True
register: project_2
- name: Create router with gateway on shared external network
openstack.cloud.router:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test2_router"
project: "shared_ext_net_test_2"
network: "{{ external_network_name }}"
register: router
- name: Gather routers info
openstack.cloud.routers_info:
cloud: "{{ cloud }}"
name: "shared_ext_net_test2_router"
register: routers
- name: Verify routers info
assert:
that:
- routers.routers.0.id == router.router.id
- routers.routers.0.external_gateway_info.external_fixed_ips|length == 1
- name: Delete router
openstack.cloud.router:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test2_router"
project: "shared_ext_net_test_2"
- name: Delete subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
network_name: "{{ shared_ext_network.id }}"
name: "shared_ext_subnet"
project: "shared_ext_net_test_1"
- name: Delete network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ external_network_name }}"
project: "shared_ext_net_test_1"
- name: Delete project 2
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test_2"
- name: Delete project 1
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test_1"

View File

@@ -399,9 +399,6 @@
- port-id: "{{ port.port.id }}"
reuse_ips: false
state: present
tags:
- first
- second
wait: true
register: server
@@ -416,7 +413,6 @@
|selectattr('OS-EXT-IPS:type', 'equalto', 'floating')
|map(attribute='addr')
|list|length == 0
- server.server.tags == ["first", "second"]
- name: Find all floating ips for debugging
openstack.cloud.floating_ip_info:
@@ -458,8 +454,6 @@
- '{{ server_security_group }}'
- '{{ server_alt_security_group }}'
state: present
tags:
- yellow
wait: true
register: server_updated
@@ -481,7 +475,6 @@
- server_updated.server.addresses[server_network]|length == 2
- port.port.fixed_ips[0].ip_address in
server_updated.server.addresses[server_network]|map(attribute='addr')
- server_updated.server.tags == ['yellow']
# TODO: Verify networks once openstacksdk's issue #2010352 has been solved
# Ref.: https://storyboard.openstack.org/#!/story/2010352
#- server_updated.server.addresses.public|length > 0
@@ -516,8 +509,6 @@
- '{{ server_security_group }}'
- '{{ server_alt_security_group }}'
state: present
tags:
- yellow
wait: true
register: server_updated_again
@@ -526,7 +517,6 @@
that:
- server.server.id == server_updated_again.server.id
- server_updated_again is not changed
- server_updated_again.server.tags == ['yellow']
# TODO: Drop failure test once openstacksdk's issue #2010352 has been solved
# Ref.: https://storyboard.openstack.org/#!/story/2010352

View File

@@ -460,14 +460,19 @@
register: server
ignore_errors: true
- name: Assert shelve offload server
assert:
that:
- ((server is success)
or (server is not success
and "Cannot 'shelveOffload' instance" in server.msg
and "while it is in vm_state shelved_offloaded" in server.msg))
- name: Get info about server
openstack.cloud.server_info:
cloud: "{{ cloud }}"
server: ansible_server
register: servers
until: servers.servers.0.task_state == none
retries: 30
delay: 10
- name: Ensure status for server is SHELVED_OFFLOADED
# no change if server has been offloaded automatically after first shelve command
@@ -553,7 +558,7 @@
assert:
that:
- servers.servers.0.status == 'ACTIVE'
- server is changed
- server is not changed
- name: Reboot server (HARD)
openstack.cloud.server_action:
@@ -573,7 +578,7 @@
assert:
that:
- servers.servers.0.status == 'ACTIVE'
- server is changed
- server is not changed
- name: Delete server
openstack.cloud.server:

View File

@@ -1,5 +0,0 @@
---
share_backend_name: GENERIC_BACKEND
share_type_name: test_share_type
share_type_description: Test share type for CI
share_type_alt_description: Changed test share type

View File

@@ -1,130 +0,0 @@
---
- name: Create share type
openstack.cloud.share_type:
name: "{{ share_type_name }}"
cloud: "{{ cloud }}"
state: present
extra_specs:
share_backend_name: "{{ share_backend_name }}"
snapshot_support: true
create_share_from_snapshot_support: true
description: "{{ share_type_description }}"
register: the_result
- name: Check created share type
vars:
the_share_type: "{{ the_result.share_type }}"
ansible.builtin.assert:
that:
- "'id' in the_result.share_type"
- the_share_type.description == share_type_description
- the_share_type.is_public == True
- the_share_type.name == share_type_name
- the_share_type.extra_specs['share_backend_name'] == share_backend_name
- the_share_type.extra_specs['snapshot_support'] == "True"
- the_share_type.extra_specs['create_share_from_snapshot_support'] == "True"
success_msg: >-
Created share type: {{ the_result.share_type.id }},
Name: {{ the_result.share_type.name }},
Description: {{ the_result.share_type.description }}
- name: Test share type info module
openstack.cloud.share_type_info:
name: "{{ share_type_name }}"
cloud: "{{ cloud }}"
register: info_result
- name: Check share type info result
ansible.builtin.assert:
that:
- info_result.share_type.id == the_result.share_type.id
- info_result.share_type.name == share_type_name
- info_result.share_type.description == share_type_description
success_msg: "Share type info retrieved successfully"
- name: Test, check idempotency
openstack.cloud.share_type:
name: "{{ share_type_name }}"
cloud: "{{ cloud }}"
state: present
extra_specs:
share_backend_name: "{{ share_backend_name }}"
snapshot_support: true
create_share_from_snapshot_support: true
description: "{{ share_type_description }}"
is_public: true
register: the_result
- name: Check result.changed is false
ansible.builtin.assert:
that:
- the_result.changed == false
success_msg: "Request with the same details lead to no changes"
- name: Add extra spec
openstack.cloud.share_type:
cloud: "{{ cloud }}"
name: "{{ share_type_name }}"
state: present
extra_specs:
share_backend_name: "{{ share_backend_name }}"
snapshot_support: true
create_share_from_snapshot_support: true
some_spec: fake_spec
description: "{{ share_type_alt_description }}"
is_public: true
register: the_result
- name: Check share type extra spec
ansible.builtin.assert:
that:
- "'some_spec' in the_result.share_type.extra_specs"
- the_result.share_type.extra_specs["some_spec"] == "fake_spec"
- the_result.share_type.description == share_type_alt_description
success_msg: >-
New extra specs: {{ the_result.share_type.extra_specs }}
- name: Remove extra spec by updating with reduced set
openstack.cloud.share_type:
cloud: "{{ cloud }}"
name: "{{ share_type_name }}"
state: present
extra_specs:
share_backend_name: "{{ share_backend_name }}"
snapshot_support: true
create_share_from_snapshot_support: true
description: "{{ share_type_alt_description }}"
is_public: true
register: the_result
- name: Check extra spec was removed
ansible.builtin.assert:
that:
- "'some_spec' not in the_result.share_type.extra_specs"
success_msg: "Extra spec was successfully removed"
- name: Delete share type
openstack.cloud.share_type:
cloud: "{{ cloud }}"
name: "{{ share_type_name }}"
state: absent
register: the_result
- name: Check deletion was successful
ansible.builtin.assert:
that:
- the_result.changed == true
success_msg: "Share type deleted successfully"
- name: Test deletion idempotency
openstack.cloud.share_type:
cloud: "{{ cloud }}"
name: "{{ share_type_name }}"
state: absent
register: the_result
- name: Check deletion idempotency
ansible.builtin.assert:
that:
- the_result.changed == false
success_msg: "Deletion idempotency works correctly"

View File

@@ -25,4 +25,3 @@ expected_fields:
- updated_at
- use_default_subnet_pool
subnet_name: shade_subnet
segment_name: example_segment

View File

@@ -17,20 +17,10 @@
name: "{{ network_name }}"
state: present
- name: Create network segment {{ segment_name }}
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
network: "{{ network_name }}"
network_type: "vxlan"
segmentation_id: 1000
state: present
- name: Create subnet {{ subnet_name }} on network {{ network_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
network_segment: "{{ segment_name }}"
name: "{{ subnet_name }}"
state: present
enable_dhcp: "{{ enable_subnet_dhcp }}"
@@ -152,48 +142,6 @@
assert:
that: subnet is not changed
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
disable_gateway_ip: true
register: subnet
- name: Assert changed
assert:
that: subnet is changed
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
disable_gateway_ip: true
register: subnet
- name: Assert not changed
assert:
that: subnet is not changed
- name: Delete subnet {{ subnet_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
register: subnet
- name: Delete network segment {{ segment_name }}
openstack.cloud.network_segment:
cloud: "{{ cloud }}"
name: "{{ segment_name }}"
network: "{{ network_name }}"
state: absent
- name: Delete network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
@@ -202,6 +150,3 @@
- name: Subnet Allocation
include_tasks: subnet-allocation.yml
- name: Subnet Allocations from Subnet Pool
include_tasks: subnet-pool.yaml

View File

@@ -62,81 +62,6 @@
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2'
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8'
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.4
- start: 192.168.0.10
end: 192.168.0.12
- name: Create subnet {{ subnet_name }} on network {{ network_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.4
- start: 192.168.0.10
end: 192.168.0.12
register: idem2
- name: Update subnet {{ subnet_name }} allocation pools
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.8
- start: 192.168.0.10
end: 192.168.0.16
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
# TODO(sshnaidm): Uncomment this section when the issue with allocation_pools is fixed
# - name: Verify Subnet Allocation Pools Exist
# assert:
# that:
# - idem2 is not changed
# - subnet_result.subnets is defined
# - subnet_result.subnets | length == 1
# - subnet_result.subnets[0].allocation_pools is defined
# - subnet_result.subnets[0].allocation_pools | length == 2
# - name: Verify Subnet Allocation Pools
# assert:
# that:
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8') or
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.16')
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.8') or
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.16')
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"

View File

@@ -1,168 +0,0 @@
---
# This test cover case when subnet is constructed
# with few prefixes and neutron API is required
# CIDR parameter to be used together with subnet pool.
- name: Create network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: present
- name: Create address_scope
openstack.cloud.address_scope:
cloud: "{{ cloud }}"
name: "{{ address_scope_name }}"
shared: false
ip_version: "4"
register: create_address_scope
- name: Create subnet pool
openstack.cloud.subnet_pool:
cloud: "{{ cloud }}"
name: "{{ subnet_pool_name }}"
is_shared: false
address_scope: "{{ address_scope_name }}"
prefixes:
- 192.168.0.0/24
- 192.168.42.0/24
register: subnet_pool
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pool_start: 192.168.42.2
allocation_pool_end: 192.168.42.4
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pool_start: 192.168.42.2
allocation_pool_end: 192.168.42.4
register: idem1
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
- name: Verify Subnet Allocation Pools Exist
assert:
that:
- idem1 is not changed
- subnet_result.subnets is defined
- subnet_result.subnets | length == 1
- subnet_result.subnets[0].allocation_pools is defined
- subnet_result.subnets[0].allocation_pools | length == 1
- name: Verify Subnet Allocation Pools
assert:
that:
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2'
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4'
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pools:
- start: 192.168.42.2
end: 192.168.42.4
- start: 192.168.42.6
end: 192.168.42.8
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pools:
- start: 192.168.42.2
end: 192.168.42.4
- start: 192.168.42.6
end: 192.168.42.8
register: idem2
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
# NOT(gtema) Temporarily disable the check to land other gate fix
#- name: Verify Subnet Allocation Pools Exist
# assert:
# that:
# - idem2 is not changed
# - subnet_result.subnets is defined
# - subnet_result.subnets | length == 1
# - subnet_result.subnets[0].allocation_pools is defined
# - subnet_result.subnets[0].allocation_pools | length == 2
#
#- name: Verify Subnet Allocation Pools
# assert:
# that:
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4') or
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.8')
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.4') or
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.8')
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Delete created subnet pool
openstack.cloud.subnet_pool:
cloud: "{{ cloud }}"
name: "{{ subnet_pool_name }}"
state: absent
- name: Delete created address scope
openstack.cloud.address_scope:
cloud: "{{ cloud }}"
name: "{{ address_scope_name }}"
state: absent
- name: Delete network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: absent

View File

@@ -1 +0,0 @@
trait_name: CUSTOM_ANSIBLE_TRAIT

View File

@@ -1,28 +0,0 @@
---
- name: Create trait
openstack.cloud.trait:
cloud: "{{ cloud }}"
state: present
id: "{{ trait_name }}"
until: result is success
retries: 5
delay: 20
register: result
- name: Assert trait
assert:
that:
- "'name' in result.trait"
- "result.trait.id == trait_name"
- name: Remove trait
openstack.cloud.trait:
cloud: "{{ cloud }}"
state: absent
id: "{{ trait_name }}"
register: result1
- name: Assert trait removed
assert:
that:
- "'trait' not in result1"

View File

@@ -1,21 +0,0 @@
expected_fields:
- created_at
- description
- id
- is_admin_state_up
- name
- port_id
- project_id
- revision_number
- status
- sub_ports
- tags
- tenant_id
- updated_at
trunk_name: ansible_trunk
parent_network_name: ansible_parent_port_network
parent_subnet_name: ansible_parent_port_subnet
parent_port_name: ansible_parent_port
subport_network_name: ansible_subport_network
subport_subnet_name: ansible_subport_subnet
subport_name: ansible_subport

View File

@@ -1,185 +0,0 @@
---
- name: Create parent network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_network_name }}"
external: true
register: parent_network
- name: Create parent subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_subnet_name }}"
network_name: "{{ parent_network_name }}"
cidr: 10.5.5.0/24
register: parent_subnet
- name: Create parent port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_port_name }}"
network: "{{ parent_network_name }}"
fixed_ips:
- ip_address: 10.5.5.69
register: parent_port
- name: Create subport network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_network_name }}"
external: true
register: subport_network
- name: Create subport subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_subnet_name }}"
network_name: "{{ subport_network_name }}"
cidr: 10.5.6.0/24
register: subport_subnet
- name: Create subport
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_name }}"
network: "{{ subport_network_name }}"
fixed_ips:
- ip_address: 10.5.6.55
register: subport
- name: Create trunk without subports
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
register: trunk
- name: Display return values of trunk module
ansible.builtin.debug:
var: trunk
- name: Assert return values of trunk module
ansible.builtin.assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(trunk.trunk.keys())|length == 0
- name: Add subport to trunk by name
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports:
- port: "{{ subport_name }}"
segmentation_type: vlan
segmentation_id: 123
register: trunk_subport_by_name
- name: Assert the subport is part of the trunk
ansible.builtin.assert:
that:
- trunk_subport_by_name.trunk.sub_ports|length == 1
- name: Remove subport from trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports: []
register: trunk_subport_removed
- name: Assert no subports are part of the trunk
ansible.builtin.assert:
that:
- trunk_subport_removed.trunk.sub_ports|length == 0
- name: Add subport to trunk by ID
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports:
- port: "{{ subport.port.id }}"
segmentation_type: vlan
segmentation_id: 123
register: trunk_subport_by_id
- name: Assert the subport is part of the trunk
ansible.builtin.assert:
that:
- trunk_subport_by_id.trunk.sub_ports|length == 1
- name: Delete trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: absent
name: "{{ trunk_name }}"
- name: Create trunk without subports
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports:
- port: "{{ subport.port.id }}"
segmentation_type: vlan
segmentation_id: 123
register: trunk_with_subports
- name: Assert the subport is part of the trunk
ansible.builtin.assert:
that:
- trunk_with_subports.trunk.sub_ports|length == 1
- name: Delete trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: absent
name: "{{ trunk_name }}"
- name: Delete subport
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_name }}"
- name: Delete subport subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_subnet_name }}"
- name: Delete subport network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_network_name }}"
- name: Delete parent port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_port_name }}"
- name: Delete parent subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_subnet_name }}"
- name: Delete parent network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_network_name }}"

View File

@@ -12,35 +12,14 @@
that: item in vol.volume
loop: "{{ expected_fields }}"
- assert:
that: not vol.volume.is_bootable
- name: Create bootable volume from existing volume
- name: Create volume from existing volume
openstack.cloud.volume:
cloud: "{{ cloud }}"
state: present
size: 1
volume: "{{ vol.volume.id }}"
name: ansible_volume1
is_bootable: true
description: Test volume
register: vol
- assert:
that: vol.volume.is_bootable
- name: Make the first volume bootable
openstack.cloud.volume:
cloud: "{{ cloud }}"
state: present
size: 1
name: ansible_volume
is_bootable: true
description: Test volume
register: vol
- assert:
that: vol.volume.is_bootable
- name: Delete volume
openstack.cloud.volume:

View File

@@ -37,7 +37,7 @@
- name: Check info
assert:
that:
- info1.volumes | selectattr("id", "equalto", info.volumes.0.id) | list | length == 1
- info1.volumes | selectattr("id", "equalto", "{{ info.volumes.0.id }}") | list | length == 1
- info1.volumes.0.name == 'ansible_test'
- info1.volumes.0.status == None

View File

@@ -1,7 +0,0 @@
---
volume_image_metadata_cloud: "{{ cloud | default(omit) }}"
volume_image_metadata_volume_name: test-image-metadata-volume
volume_image_metadata_size: 1
volume_image_metadata:
disk_format: qcow2
container_format: bare

View File

@@ -1,103 +0,0 @@
---
- name: Get available images
openstack.cloud.image_info:
cloud: "{{ volume_image_metadata_cloud }}"
register: image_info
- name: Select test image
set_fact:
volume_image_metadata_image_id: >-
{{
image_info.images
| selectattr('status', 'equalto', 'active')
| list
| first
| default({})
}}
- name: Assert an image is available for testing
assert:
that:
- volume_image_metadata_image_id.id is defined
fail_msg: "No active images available in the cloud for volume_image_metadata CI test"
- name: Create a test volume from image
openstack.cloud.volume:
cloud: "{{ volume_image_metadata_cloud }}"
state: present
name: "{{ volume_image_metadata_volume_name }}"
image: "{{ volume_image_metadata_image_id.id }}"
size: "{{ volume_image_metadata_size }}"
register: created_volume
- name: Assert volume was created
assert:
that:
- created_volume.volume is defined
- created_volume.volume.id is defined
- name: Get volume details
openstack.cloud.volume_info:
cloud: "{{ volume_image_metadata_cloud }}"
name: "{{ volume_image_metadata_volume_name }}"
register: volume_info
- name: Assert volume has image metadata
assert:
that:
- volume_info.volumes[0].volume_image_metadata is defined
- volume_info.volumes[0].volume_image_metadata | length > 0
# --------------------------------------------------------------------
# Exercise new module
# --------------------------------------------------------------------
- name: Set volume image metadata
openstack.cloud.volume_image_metadata:
cloud: "{{ volume_image_metadata_cloud }}"
volume: "{{ created_volume.volume.id }}"
image_metadata: "{{ volume_image_metadata }}"
register: image_meta_result
- name: Assert image metadata changed
assert:
that:
- image_meta_result.changed | bool
# --------------------------------------------------------------------
# Idempotency check
# --------------------------------------------------------------------
- name: Set volume image metadata again (idempotent)
openstack.cloud.volume_image_metadata:
cloud: "{{ volume_image_metadata_cloud }}"
volume: "{{ created_volume.volume.id }}"
image_metadata: "{{ volume_image_metadata }}"
register: image_meta_idempotent
- name: Assert idempotent behavior
assert:
that:
- not image_meta_idempotent.changed | bool
# --------------------------------------------------------------------
# Verify metadata persisted
# --------------------------------------------------------------------
- name: Re-fetch volume details
openstack.cloud.volume_info:
cloud: "{{ volume_image_metadata_cloud }}"
name: "{{ volume_image_metadata_volume_name }}"
register: final_volume_info
- name: Verify image metadata values
assert:
that:
- final_volume_info.volumes[0].volume_image_metadata.disk_format == "qcow2"
- final_volume_info.volumes[0].volume_image_metadata.container_format == "bare"
# --------------------------------------------------------------------
# Cleanup
# --------------------------------------------------------------------
- name: Delete test volume
openstack.cloud.volume:
cloud: "{{ volume_image_metadata_cloud }}"
state: absent
name: "{{ volume_image_metadata_volume_name }}"

View File

@@ -1,32 +0,0 @@
test_volume: ansible_test_volume
managed_volume: managed_test_volume
expected_fields:
- attachments
- availability_zone
- consistency_group_id
- created_at
- updated_at
- description
- extended_replication_status
- group_id
- host
- image_id
- is_bootable
- is_encrypted
- is_multiattach
- migration_id
- migration_status
- project_id
- replication_driver_data
- replication_status
- scheduler_hints
- size
- snapshot_id
- source_volume_id
- status
- user_id
- volume_image_metadata
- volume_type
- id
- name
- metadata

View File

@@ -1,65 +0,0 @@
---
- name: Create volume
openstack.cloud.volume:
cloud: "{{ cloud }}"
state: present
size: 1
name: "{{ test_volume }}"
description: Test volume
register: vol
- assert:
that: item in vol.volume
loop: "{{ expected_fields }}"
- name: Unmanage volume
openstack.cloud.volume_manage:
cloud: "{{ cloud }}"
state: absent
name: "{{ vol.volume.id }}"
- name: Unmanage volume again
openstack.cloud.volume_manage:
cloud: "{{ cloud }}"
state: absent
name: "{{ vol.volume.id }}"
register: unmanage_idempotency
- assert:
that:
- unmanage_idempotency is not changed
- name: Manage volume
openstack.cloud.volume_manage:
cloud: "{{ cloud }}"
state: present
source_name: volume-{{ vol.volume.id }}
host: "{{ vol.volume.host }}"
name: "{{ managed_volume }}"
register: new_vol
- assert:
that:
- new_vol.volume.name == managed_volume
- name: Manage volume again
openstack.cloud.volume_manage:
cloud: "{{ cloud }}"
state: present
source_name: volume-{{ vol.volume.id }}
host: "{{ vol.volume.host }}"
name: "{{ managed_volume }}"
register: vol_idempotency
- assert:
that:
- vol_idempotency is not changed
- pause:
seconds: 10
- name: Delete volume
openstack.cloud.volume:
cloud: "{{ cloud }}"
state: absent
name: "{{ managed_volume }}"

View File

@@ -1,9 +0,0 @@
expected_fields:
- availability_zone
- binary
- disabled_reason
- host
- name
- state
- status
- updated_at

View File

@@ -1,23 +0,0 @@
---
- name: Fetch volume services
openstack.cloud.volume_service_info:
cloud: "{{ cloud }}"
register: volume_services
- name: Assert return values of volume_service_info module
assert:
that:
- volume_services.volume_services | length > 0
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(volume_services.volume_services[0].keys())|length == 0
- name: Fetch volume services with filters
openstack.cloud.volume_service_info:
cloud: "{{ cloud }}"
binary: "cinder-volume"
register: volume_services
- name: Assert return values of volume_service_info module
assert:
that:
- volume_services.volume_services | length > 0

View File

@@ -75,10 +75,10 @@ ansible-galaxy collection install --requirements-file ci/requirements.yml
if [ -z "$PIP_INSTALL" ]; then
tox -ebuild
ansible-galaxy collection install "$(find build_artifact/ -maxdepth 1 -name 'openstack-cloud-*')" --force
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATH
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
else
pip freeze | grep ansible-collections-openstack
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATH
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
fi
# We need to source the current tox environment so that Ansible will
@@ -124,17 +124,12 @@ if [ ! -e /etc/magnum ]; then
tag_opt+=" --skip-tags coe_cluster,coe_cluster_template"
fi
if ! systemctl is-enabled devstack@m-api.service 2>&1; then
# Skip share_type tasks if Manila is not available
tag_opt+=" --skip-tags share_type"
fi
cd ci/
# Run tests
set -o pipefail
# shellcheck disable=SC2086
ANSIBLE_COLLECTIONS_PATH=$TEST_COLLECTIONS_PATHS ansible-playbook \
ANSIBLE_COLLECTIONS_PATHS=$TEST_COLLECTIONS_PATHS ansible-playbook \
-vvv ./run-collection.yml \
-e "sdk_version=${SDK_VER} cloud=${CLOUD} cloud_alt=${CLOUD_ALT} ${ANSIBLE_VARS}" \
${tag_opt} 2>&1 | sudo tee /opt/stack/logs/test_output.log

View File

@@ -5,7 +5,6 @@
roles:
- { role: address_scope, tags: address_scope }
- { role: application_credential, tags: application_credential }
- { role: auth, tags: auth }
- { role: catalog_service, tags: catalog_service }
- { role: coe_cluster, tags: coe_cluster }
@@ -32,15 +31,10 @@
- { role: loadbalancer, tags: loadbalancer }
- { role: logging, tags: logging }
- { role: network, tags: network }
- { role: network_segment, tags: network_segment }
- { role: neutron_rbac_policy, tags: neutron_rbac_policy }
- { role: object, tags: object }
- { role: object_container, tags: object_container }
- { role: object_containers_info, tags: object_containers_info }
- { role: port, tags: port }
- { role: port_forwarding, tags: port_forwarding }
- { role: trait, tags: trait }
- { role: trunk, tags: trunk }
- { role: project, tags: project }
- { role: quota, tags: quota }
- { role: recordset, tags: recordset }
@@ -55,15 +49,11 @@
- { role: server_group, tags: server_group }
- { role: server_metadata, tags: server_metadata }
- { role: server_volume, tags: server_volume }
- { role: share_type, tags: share_type }
- { role: stack, tags: stack }
- { role: subnet, tags: subnet }
- { role: subnet_pool, tags: subnet_pool }
- { role: volume, tags: volume }
- { role: volume_type, tags: volume_type }
- { role: volume_backup, tags: volume_backup }
- { role: volume_manage, tags: volume_manage }
- { role: volume_service, tags: volume_service }
- { role: volume_snapshot, tags: volume_snapshot }
- { role: volume_type_access, tags: volume_type_access }
- { role: volume_image_metadata, tags: volume_image_metadata }

View File

@@ -11,7 +11,7 @@ For hacking on the Ansible OpenStack collection it helps to [prepare a DevStack
## Hosting
* [Bug tracker][bugtracker]
* [Bug tracker][storyboard]
* [Mailing list `openstack-discuss@lists.openstack.org`][openstack-discuss].
Prefix subjects with `[aoc]` or `[aco]` for faster responses.
* [Code Hosting][opendev-a-c-o]
@@ -188,4 +188,4 @@ Read [Release Guide](releasing.md) on how to publish new releases.
[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
[openstacksdk]: https://opendev.org/openstack/openstacksdk
[bugtracker]: https://bugs.launchpad.net/ansible-collections-openstack
[storyboard]: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack

View File

@@ -11,7 +11,7 @@ dependencies: {}
repository: https://opendev.org/openstack/ansible-collections-openstack
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
homepage: https://opendev.org/openstack/ansible-collections-openstack
issues: https://bugs.launchpad.net/ansible-collections-openstack
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
build_ignore:
- "*.tar.gz"
- build_artifact
@@ -32,4 +32,4 @@ build_ignore:
- .vscode
- ansible_collections_openstack.egg-info
- changelogs
version: 2.5.0
version: 2.2.0

View File

@@ -11,7 +11,7 @@ dependencies: {}
repository: https://opendev.org/openstack/ansible-collections-openstack
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
homepage: https://opendev.org/openstack/ansible-collections-openstack
issues: https://bugs.launchpad.net/ansible-collections-openstack
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
build_ignore:
- "*.tar.gz"
- build_artifact

View File

@@ -2,7 +2,6 @@ requires_ansible: ">=2.8"
action_groups:
openstack:
- address_scope
- application_credential
- auth
- baremetal_deploy_template
- baremetal_inspect
@@ -10,7 +9,6 @@ action_groups:
- baremetal_node_action
- baremetal_node_info
- baremetal_port
- baremetal_port_group
- baremetal_port_info
- catalog_service
- catalog_service_info
@@ -52,16 +50,12 @@ action_groups:
- lb_pool
- loadbalancer
- network
- network_segment
- networks_info
- neutron_rbac_policies_info
- neutron_rbac_policy
- object
- object_container
- object_containers_info
- port
- port_forwarding
- port_forwarding_info
- port_info
- project
- project_info
@@ -82,21 +76,15 @@ action_groups:
- server_info
- server_metadata
- server_volume
- share_type
- share_type_info
- stack
- stack_info
- subnet
- subnet_pool
- subnets_info
- trunk
- volume
- volume_manage
- volume_backup
- volume_backup_info
- volume_info
- volume_service_info
- volume_snapshot
- volume_snapshot_info
- volume_type_access
- volume_image_metadata

View File

@@ -96,18 +96,6 @@ options:
only.
type: bool
default: false
only_ipv4:
description:
- Use only ipv4 addresses for ansible_host and ansible_ssh_host.
- Using I(only_ipv4) helps when running Ansible in a ipv4 only setup.
type: bool
default: false
server_filters:
description:
- A dictionary of server filter value pairs.
- Available parameters can be seen under https://docs.openstack.org/api-ref/compute/#list-servers
type: dict
default: {}
show_all:
description:
- Whether all servers should be listed or not.
@@ -285,7 +273,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
clouds_yaml_path = self.get_option('clouds_yaml_path')
config_files = openstack.config.loader.CONFIG_FILES
if clouds_yaml_path:
config_files = clouds_yaml_path + config_files
config_files += clouds_yaml_path
config = openstack.config.loader.OpenStackConfig(
config_files=config_files)
@@ -315,7 +303,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
expand_hostvars = self.get_option('expand_hostvars')
all_projects = self.get_option('all_projects')
server_filters = self.get_option('server_filters')
servers = []
def _expand_server(server, cloud, volumes):
@@ -362,8 +349,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
all_projects=all_projects,
# details are required because 'addresses'
# attribute must be populated
details=True,
**server_filters)
details=True)
]:
servers.append(server)
except openstack.exceptions.OpenStackCloudException as e:
@@ -398,17 +384,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if address['OS-EXT-IPS:type'] == 'floating'),
None)
if self.get_option('only_ipv4'):
fixed_ip = next(
(address['addr'] for address in addresses
if (address['OS-EXT-IPS:type'] == 'fixed' and address['version'] == 4)),
None)
else:
fixed_ip = next(
(address['addr'] for address in addresses
if address['OS-EXT-IPS:type'] == 'fixed'),
None)
fixed_ip = next(
(address['addr'] for address in addresses
if address['OS-EXT-IPS:type'] == 'fixed'),
None)
ip = floating_ip if floating_ip is not None and not self.get_option('private') else fixed_ip

View File

@@ -32,15 +32,16 @@
import abc
import copy
from ansible.module_utils.six import raise_from
try:
from ansible.module_utils.compat.version import StrictVersion
except ImportError:
try:
from distutils.version import StrictVersion
except ImportError as exc:
raise ImportError(f'To use this plugin or module with ansible-core'
f' < 2.11, you need to use Python < 3.12 with '
f'distutils.version present. {exc}')
raise_from(ImportError('To use this plugin or module with ansible-core'
' < 2.11, you need to use Python < 3.12 with '
'distutils.version present'), exc)
import importlib
import os
@@ -182,7 +183,7 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
" excluded.")
for param in (
'auth', 'region_name', 'validate_certs',
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
if module.params[param] is not None:
module.fail_json(msg=fail_message.format(param=param))
# For 'interface' parameter, fail if we receive a non-default value
@@ -198,7 +199,6 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
verify=module.params['validate_certs'],
cacert=module.params['ca_cert'],
key=module.params['client_key'],
cert=module.params['client_cert'],
api_timeout=module.params['api_timeout'],
interface=module.params['interface'],
)
@@ -358,7 +358,7 @@ class OpenStackModule:
" excluded.")
for param in (
'auth', 'region_name', 'validate_certs',
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
if self.params[param] is not None:
self.fail_json(msg=fail_message.format(param=param))
# For 'interface' parameter, fail if we receive a non-default value
@@ -373,7 +373,6 @@ class OpenStackModule:
verify=self.params['validate_certs'],
cacert=self.params['ca_cert'],
key=self.params['client_key'],
cert=self.params['client_cert'],
api_timeout=self.params['api_timeout'],
interface=self.params['interface'],
)

View File

@@ -1,332 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024 Red Hat, Inc.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: application_credential
short_description: Manage OpenStack Identity (Keystone) application credentials
author: OpenStack Ansible SIG
description:
- Create or delete an OpenStack Identity (Keystone) application credential.
- When the secret parameter is not set a secret will be generated and returned
- in the response. Existing credentials cannot be modified so running this module
- against an existing credential will result in it being deleted and recreated.
- This needs to be taken into account when the secret is generated, as the secret
- will change on each run of the module.
options:
name:
description:
- Name of the application credential.
required: true
type: str
description:
description:
- Application credential description.
type: str
secret:
description:
- Secret to use for authentication
- (if not provided, one will be generated).
type: str
roles:
description:
- Roles to authorize (name or ID).
type: list
elements: dict
suboptions:
name:
description: Name of role
type: str
id:
description: ID of role
type: str
domain_id:
description: Domain ID
type: str
expires_at:
description:
- Sets an expiration date for the application credential,
- format of YYYY-mm-ddTHH:MM:SS
- (if not provided, the application credential will not expire).
type: str
unrestricted:
description:
- Enable application credential to create and delete other application
- credentials and trusts (this is potentially dangerous behavior and is
- disabled by default).
default: false
type: bool
access_rules:
description:
- List of access rules, each containing a request method, path, and service.
type: list
elements: dict
suboptions:
service:
description: Name of service endpoint
type: str
required: true
path:
description: Path portion of access URL
type: str
required: true
method:
description: HTTP method
type: str
required: true
state:
description:
- Should the resource be present or absent.
- Application credentials are immutable so running with an existing present
- credential will result in the credential being deleted and recreated.
choices: [present, absent]
default: present
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
"""
EXAMPLES = r"""
- name: Create application credential
openstack.cloud.application_credential:
cloud: mycloud
description: demodescription
name: democreds
state: present
- name: Create application credential with expiration, access rules and roles
openstack.cloud.application_credential:
cloud: mycloud
description: demodescription
name: democreds
access_rules:
- service: "compute"
path: "/v2.1/servers"
method: "GET"
expires_at: "2024-02-29T09:29:59"
roles:
- name: Member
state: present
- name: Delete application credential
openstack.cloud.application_credential:
cloud: mycloud
name: democreds
state: absent
"""
RETURN = r"""
application_credential:
description: Dictionary describing the project.
returned: On success when I(state) is C(present).
type: dict
contains:
id:
description: The ID of the application credential.
type: str
sample: "2e73d1b4f0cb473f920bd54dfce3c26d"
name:
description: The name of the application credential.
type: str
sample: "appcreds"
secret:
description: Secret to use for authentication
(if not provided, returns the generated value).
type: str
sample: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
description:
description: A description of the application credential's purpose.
type: str
sample: "App credential"
expires_at:
description: The expiration time of the application credential in UTC,
if one was specified.
type: str
sample: "2024-02-29T09:29:59.000000"
project_id:
description: The ID of the project the application credential was created
for and that authentication requests using this application
credential will be scoped to.
type: str
sample: "4b633c451ac74233be3721a3635275e5"
roles:
description: A list of one or more roles that this application credential
has associated with its project. A token using this application
credential will have these same roles.
type: list
elements: dict
sample: [{"name": "Member"}]
access_rules:
description: A list of access_rules objects
type: list
elements: dict
sample:
- id: "edecb6c791d541a3b458199858470d20"
service: "compute"
path: "/v2.1/servers"
method: "GET"
unrestricted:
description: A flag indicating whether the application credential may be
used for creation or destruction of other application credentials
or trusts.
type: bool
cloud:
description: The current cloud config with the username and password replaced
with the name and secret of the application credential. This
can be passed to the cloud parameter of other tasks, or written
to an openstack cloud config file.
returned: On success when I(state) is C(present).
type: dict
sample:
auth_type: "v3applicationcredential"
auth:
auth_url: "https://192.0.2.1/identity"
application_credential_secret: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
application_credential_id: "3e73d1b4f0cb473f920bd54dfce3c26d"
"""
import copy
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule,
)
try:
import openstack.config
except ImportError:
pass
class IdentityApplicationCredentialModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
description=dict(),
secret=dict(no_log=True),
roles=dict(
type="list",
elements="dict",
options=dict(name=dict(), id=dict(), domain_id=dict()),
),
expires_at=dict(),
unrestricted=dict(type="bool", default=False),
access_rules=dict(
type="list",
elements="dict",
options=dict(
service=dict(required=True),
path=dict(required=True),
method=dict(required=True),
),
),
state=dict(default="present", choices=["absent", "present"]),
)
module_kwargs = dict()
cloud = None
def openstack_cloud_from_module(self):
# Fetch cloud param before it is popped
self.cloud = self.params["cloud"]
return OpenStackModule.openstack_cloud_from_module(self)
def run(self):
state = self.params["state"]
creds = self._find()
if state == "present" and not creds:
# Create creds
creds = self._create().to_dict(computed=False)
cloud_config = self._get_cloud_config(creds)
self.exit_json(
changed=True, application_credential=creds, cloud=cloud_config
)
elif state == "present" and creds:
# Recreate immutable creds
self._delete(creds)
creds = self._create().to_dict(computed=False)
cloud_config = self._get_cloud_config(creds)
self.exit_json(
changed=True, application_credential=creds, cloud=cloud_config
)
elif state == "absent" and creds:
# Delete creds
self._delete(creds)
self.exit_json(changed=True)
elif state == "absent" and not creds:
# Do nothing
self.exit_json(changed=False)
def _get_user_id(self):
return self.conn.session.get_user_id()
def _create(self):
kwargs = dict(
(k, self.params[k])
for k in [
"name",
"description",
"secret",
"expires_at",
"unrestricted",
"access_rules",
]
if self.params[k] is not None
)
roles = self.params["roles"]
if roles:
kwroles = []
for role in roles:
kwroles.append(
dict(
(k, role[k])
for k in ["name", "id", "domain_id"]
if role[k] is not None
)
)
kwargs["roles"] = kwroles
kwargs["user"] = self._get_user_id()
creds = self.conn.identity.create_application_credential(**kwargs)
return creds
def _get_cloud_config(self, creds):
cloud_region = openstack.config.OpenStackConfig().get_one(self.cloud)
conf = cloud_region.config
cloud_config = copy.deepcopy(conf)
cloud_config["auth_type"] = "v3applicationcredential"
cloud_config["auth"] = {
"application_credential_id": creds["id"],
"application_credential_secret": creds["secret"],
"auth_url": conf["auth"]["auth_url"],
}
return cloud_config
def _delete(self, creds):
user = self._get_user_id()
self.conn.identity.delete_application_credential(user, creds.id)
def _find(self):
name = self.params["name"]
user = self._get_user_id()
return self.conn.identity.find_application_credential(
user=user, name_or_id=name
)
def main():
module = IdentityApplicationCredentialModule()
module()
if __name__ == "__main__":
main()

View File

@@ -243,10 +243,6 @@ node:
retired_reason:
description: TODO
type: str
shard:
description: The shard key for a node.
returned: success
type: str
states:
description: |
Links to the collection of states. Note that this resource is also

View File

@@ -437,10 +437,6 @@ node:
description: The reason the node is marked as retired.
returned: success
type: str
shard:
description: The shard key for a node.
returned: success
type: str
states:
description: Links to the collection of states.
returned: success

View File

@@ -289,10 +289,6 @@ nodes:
description: The reason the node is marked as retired.
returned: success
type: str
shard:
description: The shard key for a node.
returned: success
type: str
states:
description: Links to the collection of states.
returned: success

View File

@@ -1,257 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2026 OpenStack Ansible SIG
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
module: baremetal_port_group
short_description: Create/Delete Bare Metal port group resources from OpenStack
author: OpenStack Ansible SIG
description:
- Create, update and remove Bare Metal port groups from OpenStack.
options:
id:
description:
- ID of the port group.
- Will be auto-generated if not specified.
type: str
aliases: ['uuid']
name:
description:
- Name of the port group.
type: str
node:
description:
- ID or Name of the node this resource belongs to.
- Required when creating a new port group.
type: str
address:
description:
- Physical hardware address of this port group, typically the hardware
MAC address.
type: str
extra:
description:
- A set of one or more arbitrary metadata key and value pairs.
type: dict
standalone_ports_supported:
description:
- Whether the port group supports ports that are not members of this
port group.
type: bool
mode:
description:
- The port group mode.
type: str
properties:
description:
- Key/value properties for the port group.
type: dict
state:
description:
- Indicates desired state of the resource.
choices: ['present', 'absent']
default: present
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
- name: Create Bare Metal port group
openstack.cloud.baremetal_port_group:
cloud: devstack
state: present
name: bond0
node: bm-0
address: fa:16:3e:aa:aa:aa
mode: '802.3ad'
standalone_ports_supported: true
register: result
- name: Update Bare Metal port group
openstack.cloud.baremetal_port_group:
cloud: devstack
state: present
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
mode: 'active-backup'
properties:
miimon: '100'
register: result
- name: Delete Bare Metal port group
openstack.cloud.baremetal_port_group:
cloud: devstack
state: absent
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
register: result
'''
RETURN = r'''
port_group:
description: A port group dictionary, subset of the dictionary keys listed
below may be returned, depending on your cloud provider.
returned: success
type: dict
contains:
address:
description: Physical hardware address of the port group.
returned: success
type: str
created_at:
description: Bare Metal port group created at timestamp.
returned: success
type: str
extra:
description: A set of one or more arbitrary metadata key and value
pairs.
returned: success
type: dict
id:
description: The UUID for the Bare Metal port group resource.
returned: success
type: str
links:
description: A list of relative links, including the self and
bookmark links.
returned: success
type: list
mode:
description: The port group mode.
returned: success
type: str
name:
description: Bare Metal port group name.
returned: success
type: str
node_id:
description: UUID of the Bare Metal node this resource belongs to.
returned: success
type: str
properties:
description: Key/value properties for this port group.
returned: success
type: dict
standalone_ports_supported:
description: Whether standalone ports are supported.
returned: success
type: bool
updated_at:
description: Bare Metal port group updated at timestamp.
returned: success
type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule
)
class BaremetalPortGroupModule(OpenStackModule):
argument_spec = dict(
id=dict(aliases=['uuid']),
name=dict(),
node=dict(),
address=dict(),
extra=dict(type='dict'),
standalone_ports_supported=dict(type='bool'),
mode=dict(),
properties=dict(type='dict'),
state=dict(default='present', choices=['present', 'absent']),
)
module_kwargs = dict(
required_one_of=[
('id', 'name'),
],
supports_check_mode=True,
)
def _find_port_group(self):
id_or_name = self.params['id'] if self.params['id'] else self.params['name']
if not id_or_name:
return None
try:
return self.conn.baremetal.find_port_group(id_or_name)
except self.sdk.exceptions.ResourceNotFound:
return None
def _build_create_attrs(self):
attrs = {}
for key in ['id', 'name', 'address', 'extra',
'standalone_ports_supported', 'mode', 'properties']:
if self.params[key] is not None:
attrs[key] = self.params[key]
node_name_or_id = self.params['node']
if not node_name_or_id:
self.fail_json(msg="Parameter 'node' is required when creating a new port group")
node = self.conn.baremetal.find_node(node_name_or_id, ignore_missing=False)
attrs['node_id'] = node['id']
return attrs
def _build_update_attrs(self, port_group):
attrs = {}
for key in ['name', 'address', 'extra',
'standalone_ports_supported', 'mode', 'properties']:
if self.params[key] is not None and self.params[key] != port_group.get(key):
attrs[key] = self.params[key]
return attrs
def _will_change(self, port_group, state):
if state == 'absent':
return bool(port_group)
if not port_group:
return True
return bool(self._build_update_attrs(port_group))
def run(self):
state = self.params['state']
port_group = self._find_port_group()
if self.ansible.check_mode:
if state == 'present' and not port_group:
self._build_create_attrs()
self.exit_json(changed=self._will_change(port_group, state))
if state == 'present':
if not port_group:
port_group = self.conn.baremetal.create_port_group(
**self._build_create_attrs())
self.exit_json(
changed=True,
port_group=port_group.to_dict(computed=False))
update_attrs = self._build_update_attrs(port_group)
changed = bool(update_attrs)
if changed:
port_group = self.conn.baremetal.update_port_group(
port_group['id'], **update_attrs)
self.exit_json(
changed=changed,
port_group=port_group.to_dict(computed=False))
if not port_group:
self.exit_json(changed=False)
self.conn.baremetal.delete_port_group(port_group['id'])
self.exit_json(changed=True)
def main():
module = BaremetalPortGroupModule()
module()
if __name__ == "__main__":
main()

View File

@@ -80,10 +80,6 @@ options:
- Magnum's default value for I(is_registry_enabled) is C(false).
type: bool
aliases: ['registry_enabled']
insecure_registry:
description:
- The URL pointing to users own private insecure docker registry.
type: str
is_tls_disabled:
description:
- Indicates whether the TLS should be disabled.
@@ -346,7 +342,6 @@ class COEClusterTemplateModule(OpenStackModule):
keypair_id=dict(),
labels=dict(type='raw'),
master_flavor_id=dict(),
insecure_registry=dict(),
is_master_lb_enabled=dict(type='bool', default=False,
aliases=['master_lb_enabled']),
is_public=dict(type='bool', aliases=['public']),
@@ -417,7 +412,6 @@ class COEClusterTemplateModule(OpenStackModule):
'fixed_subnet', 'flavor_id',
'http_proxy', 'https_proxy',
'image_id',
'insecure_registry',
'is_floating_ip_enabled',
'is_master_lb_enabled',
'is_public', 'is_registry_enabled',
@@ -433,9 +427,6 @@ class COEClusterTemplateModule(OpenStackModule):
if isinstance(labels, str):
labels = dict([tuple(kv.split(":"))
for kv in labels.split(",")])
elif isinstance(labels, dict):
labels = dict({str(k): str(v)
for k, v in labels.items()})
if labels != cluster_template['labels']:
non_updateable_keys.append('labels')
@@ -467,7 +458,7 @@ class COEClusterTemplateModule(OpenStackModule):
'external_network_id', 'fixed_network',
'fixed_subnet', 'flavor_id', 'http_proxy',
'https_proxy', 'image_id',
'insecure_registry', 'is_floating_ip_enabled',
'is_floating_ip_enabled',
'is_master_lb_enabled', 'is_public',
'is_registry_enabled', 'is_tls_disabled',
'keypair_id', 'master_flavor_id', 'name',

View File

@@ -41,11 +41,11 @@ extends_documentation_fragment:
EXAMPLES = r'''
- name: Fetch all DNS zones
openstack.cloud.dns_zone_info:
openstack.cloud.dns_zones:
cloud: devstack
- name: Fetch DNS zones by name
openstack.cloud.dns_zone_info:
openstack.cloud.dns_zones:
cloud: devstack
name: ansible.test.zone.
'''

View File

@@ -12,11 +12,6 @@ description:
- Create, update or delete an identity provider of the OpenStack
identity (Keystone) service.
options:
authorization_ttl:
description:
- Time to keep the role assignments for users authenticating via this identity provider.
- When not provided, global default configured in the Identity service will be used.
type: int
description:
description:
- The description of the identity provider.
@@ -63,7 +58,6 @@ EXAMPLES = r'''
name: example_provider
domain_id: 0123456789abcdef0123456789abcdef
description: 'My example IDP'
authorization_ttl: 300
remote_ids:
- 'https://auth.example.com/auth/realms/ExampleRealm'
@@ -80,10 +74,6 @@ identity_provider:
returned: On success when I(state) is C(present).
type: dict
contains:
authorization_ttl:
description: Time to keep the role assignments for users authenticating via this identity provider.
type: int
sample: 300
description:
description: Identity provider description
type: str
@@ -114,7 +104,6 @@ from ansible_collections.openstack.cloud.plugins.module_utils.resource import St
class IdentityProviderModule(OpenStackModule):
argument_spec = dict(
authorization_ttl=dict(type='int'),
description=dict(),
domain_id=dict(),
id=dict(required=True, aliases=['name']),
@@ -138,7 +127,7 @@ class IdentityProviderModule(OpenStackModule):
kwargs['attributes'] = \
dict((k, self.params[k])
for k in ['authorization_ttl', 'description', 'domain_id', 'id', 'is_enabled',
for k in ['description', 'domain_id', 'id', 'is_enabled',
'remote_ids']
if self.params[k] is not None)

View File

@@ -40,13 +40,6 @@ options:
required: true
type: list
elements: dict
schema_version:
description:
- The federated attribute mapping schema version.
The default value on the client side is 'None';
however, that will lead the backend to set the default according
to 'attribute_mapping_default_schema_version' option.
type: str
state:
description:
- Whether the mapping should be C(present) or C(absent).
@@ -76,7 +69,6 @@ EXAMPLES = r'''
any_one_of:
- Contractor
- SubContractor
schema_version: '1.0'
- name: Delete a mapping
openstack.cloud.federation_mapping:
@@ -101,9 +93,6 @@ mapping:
rules:
description: List of rules for the mapping
type: list
schema_version:
description: Schema version of the mapping
type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -119,7 +108,6 @@ class IdentityFederationMappingModule(OpenStackModule):
local=dict(required=True, type='list', elements='dict'),
remote=dict(required=True, type='list', elements='dict')
)),
schema_version=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -167,7 +155,7 @@ class IdentityFederationMappingModule(OpenStackModule):
if len(self.params['rules']) < 1:
self.fail_json(msg='At least one rule must be passed')
attributes = dict((k, self.params[k]) for k in ['rules', 'schema_version']
attributes = dict((k, self.params[k]) for k in ['rules']
if k in self.params and self.params[k] is not None
and self.params[k] != mapping[k])
@@ -178,8 +166,7 @@ class IdentityFederationMappingModule(OpenStackModule):
def _create(self):
return self.conn.identity.create_mapping(id=self.params['name'],
rules=self.params['rules'],
schema_version=self.params['schema_version'])
rules=self.params['rules'])
def _delete(self, mapping):
self.conn.identity.delete_mapping(mapping.id)

View File

@@ -17,9 +17,8 @@ description:
options:
fixed_address:
description:
- To which fixed IP of attached port the floating IP address should be
- To which fixed IP of server the floating IP address should be
attached to.
aliases: ["fixed_ip_address"]
type: str
floating_ip_address:
description:
@@ -36,7 +35,6 @@ options:
network:
description:
- The name or ID of a neutron external network or a nova pool name.
- When I(server) is not defined, I(network) is required
type: str
purge:
description:
@@ -59,6 +57,7 @@ options:
description:
- The name or ID of the server to which the IP address
should be assigned.
required: true
type: str
state:
description:
@@ -184,24 +183,23 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class NetworkingFloatingIPModule(OpenStackModule):
argument_spec = dict(
fixed_address=dict(aliases=['fixed_ip_address']),
fixed_address=dict(),
floating_ip_address=dict(),
nat_destination=dict(aliases=['fixed_network', 'internal_network']),
network=dict(),
purge=dict(type='bool', default=False),
reuse=dict(type='bool', default=False),
server=dict(),
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
required_if=[
['state', 'present', ['server', 'network'], True],
['state', 'absent', ['floating_ip_address'], False],
['state', 'absent', ['floating_ip_address']]
],
required_by={
'floating_ip_address': ('network'),
},
}
)
def run(self):
@@ -216,174 +214,139 @@ class NetworkingFloatingIPModule(OpenStackModule):
changed = False
fixed_address = self.params['fixed_address']
floating_ip_address = self.params['floating_ip_address']
nat_destination_id = (
self.nat_destination['id'] if self.nat_destination else None
)
nat_destination_name_or_id = self.params['nat_destination']
network_id = self.network['id'] if self.network else None
server = self.server
ips = self._find_ips(
server=server,
server=self.server,
floating_ip_address=floating_ip_address,
network_id=network_id,
fixed_address=fixed_address,
nat_destination_id=nat_destination_id
)
nat_destination_name_or_id=nat_destination_name_or_id)
ip = None
# First floating ip satisfies our requirements
ip = ips[0] if ips else None
if not ips:
if server:
if floating_ip_address:
# Requested floating ip address does not exist
self.conn.add_ip_list(
server=server,
ips=[floating_ip_address],
wait=self.params['wait'],
timeout=self.params['timeout'],
fixed_address=fixed_address
)
changed = True
if floating_ip_address:
# A specific floating ip address has been requested
else:
# No specific floating ip has been requested and none of the
# floating ips which have been assigned to the server matches
# requirements
if not ip:
# If a specific floating ip address has been requested
# and it does not exist yet then create it
# add_ips_to_server() will handle several scenarios:
#
# If a specific floating ip address has been requested then it
# will be attached to the server. The floating ip address has
# either been created in previous steps or it already existed.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud
# /_floating_ip.py#L985
#
# If no specific floating ip address has been requested, reuse
# is allowed and a network has been given (with ip_pool) from
# which floating ip addresses will be drawn, then any existing
# floating ip address from ip_pool=network which is not
# attached to any other server will be attached to the server.
# If no such floating ip address exists or if reuse is not
# allowed, then a new floating ip address will be created
# within ip_pool=network and attached to the server.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
# _floating_ip.py#L981
#
# If no specific floating ip address has been requested and no
# network has been given (with ip_pool) from which floating ip
# addresses will be taken, then a floating ip address might be
# added to the server, refer to _needs_floating_ip() for
# details.
# Ref.:
# * https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/\
# _floating_ip.py#L989
# * https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
# _floating_ip.py#L995
#
# Both floating_ip_address and network are mutually exclusive
# in add_ips_to_server(), i.e.add_ips_to_server will ignore
# floating_ip_address if network is not None. To prefer
# attaching a specific floating ip address over assigning any
# fip, ip_pool is only defined if floating_ip_address is None.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# a6b0ece2821ea79330c4067100295f6bdcbe456e/openstack/cloud/
# _floating_ip.py#L987
self.conn.add_ips_to_server(
server=server,
ip_pool=network_id,
ips=None, # No specific floating ip requested
reuse=self.params['reuse'],
fixed_address=fixed_address,
wait=self.params['wait'],
timeout=self.params['timeout'],
nat_destination=nat_destination_id
)
changed = True
else: # not server
kwargs = self._params_to_kwargs(
floating_ip_address,
network_id,
fixed_address,
self.nat_destination
)
# create the ip
ip = self.conn.network.create_ip(**kwargs)
# openstacksdk's create_ip requires floating_ip_address
# and floating_network_id to be set
self.conn.network.create_ip(
floating_ip_address=floating_ip_address,
floating_network_id=network_id)
changed = True
else: # ips
ip = ips[0]
else: # ip
# Requested floating ip address exists already
if server:
server_ips = self._filter_ips(server)
if ip.floating_ip_address not in server_ips:
port_details = ip.port_details
if (port_details
and port_details['status'] == 'ACTIVE'):
# Requested ip has been attached to different server
self.fail_json(
msg="Floating ip {0} has been attached to "
"different server".format(
floating_ip_address))
if ip.port_details and (ip.port_details['status'] == 'ACTIVE') \
and (floating_ip_address not in self._filter_ips(
self.server)):
# Floating ip address exists and has been attached
# but to a different server
else:
# Requested floating ip address has not been
# assigned to server
self.conn.add_ip_list(
server=server,
ips=[ip.floating_ip_address],
wait=self.params['wait'],
timeout=self.params['timeout'],
fixed_address=fixed_address
)
changed = True
# Requested ip has been attached to different server
self.fail_json(
msg="Floating ip {0} has been attached to different "
"server".format(floating_ip_address))
else:
# floating ip is already assigned to the server
pass
elif len(ips) > 1: # not server
self.fail_json(msg='Found more than one floating ip')
if not ip \
or floating_ip_address not in self._filter_ips(self.server):
# Requested floating ip address does not exist or has not been
# assigned to server
self.conn.add_ip_list(
server=self.server,
ips=[floating_ip_address],
wait=self.params['wait'],
timeout=self.params['timeout'],
fixed_address=fixed_address)
changed = True
else:
kwargs = self._params_to_kwargs(
floating_ip_address,
network_id,
fixed_address,
self.nat_destination
)
for key, value in kwargs.items():
if ip[key] != value:
self.conn.network.update_ip(ip, **kwargs)
changed = True
break
# Requested floating ip address has been assigned to server
pass
if changed and server:
elif not ips: # and not floating_ip_address
# No specific floating ip has been requested and none of the
# floating ips which have been assigned to the server matches
# requirements
# add_ips_to_server() will handle several scenarios:
#
# If a specific floating ip address has been requested then it
# will be attached to the server. The floating ip address has
# either been created in previous steps or it already existed.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud
# /_floating_ip.py#L985
#
# If no specific floating ip address has been requested, reuse
# is allowed and a network has been given (with ip_pool) from
# which floating ip addresses will be drawn, then any existing
# floating ip address from ip_pool=network which is not
# attached to any other server will be attached to the server.
# If no such floating ip address exists or if reuse is not
# allowed, then a new floating ip address will be created
# within ip_pool=network and attached to the server.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
# _floating_ip.py#L981
#
# If no specific floating ip address has been requested and no
# network has been given (with ip_pool) from which floating ip
# addresses will be taken, then a floating ip address might be
# added to the server, refer to _needs_floating_ip() for
# details.
# Ref.:
# * https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/\
# _floating_ip.py#L989
# * https://github.com/openstack/openstacksdk/blob/
# 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
# _floating_ip.py#L995
#
# Both floating_ip_address and network are mutually exclusive
# in add_ips_to_server(), i.e.add_ips_to_server will ignore
# floating_ip_address if network is not None. To prefer
# attaching a specific floating ip address over assigning any
# fip, ip_pool is only defined if floating_ip_address is None.
# Ref.: https://github.com/openstack/openstacksdk/blob/
# a6b0ece2821ea79330c4067100295f6bdcbe456e/openstack/cloud/
# _floating_ip.py#L987
self.conn.add_ips_to_server(
server=self.server,
ip_pool=network_id,
ips=None, # No specific floating ip requested
reuse=self.params['reuse'],
fixed_address=fixed_address,
wait=self.params['wait'],
timeout=self.params['timeout'],
nat_destination=nat_destination_name_or_id)
changed = True
else:
# Found one or more floating ips which satisfy requirements
pass
if changed:
# update server details such as addresses
server = self.conn.compute.get_server(server)
self.server = self.conn.compute.get_server(self.server)
# Update the floating ip resource
ips = self._find_ips(
server,
floating_ip_address,
network_id,
fixed_address,
nat_destination_id
)
self.server, floating_ip_address, network_id,
fixed_address, nat_destination_name_or_id)
# ips can be empty, e.g. when server has no private ipv4
# address to which a floating ip address can be attached
ip = ips[0] if ips else None
# ips can be empty, e.g. when server has no private ipv4
# address to which a floating ip address can be attached
self.exit_json(
changed=changed,
floating_ip=ip.to_dict(computed=False)
)
floating_ip=ips[0].to_dict(computed=False) if ips else None)
def _detach_and_delete(self):
ips = self._find_ips(
@@ -391,7 +354,7 @@ class NetworkingFloatingIPModule(OpenStackModule):
floating_ip_address=self.params['floating_ip_address'],
network_id=self.network['id'] if self.network else None,
fixed_address=self.params['fixed_address'],
nat_destination_id=self.nat_destination['id'] if self.nat_destination else None)
nat_destination_name_or_id=self.params['nat_destination'])
if not ips:
# Nothing to detach
@@ -399,22 +362,19 @@ class NetworkingFloatingIPModule(OpenStackModule):
changed = False
for ip in ips:
if self.server:
if ip['fixed_ip_address']:
# Silently ignore that ip might not be attached to server
#
# self.conn.network.update_ip(ip_id, port_id=None) does not
# handle nova network but self.conn.detach_ip_from_server()
# does so
changed = self.conn.detach_ip_from_server(server_id=self.server['id'],
floating_ip_id=ip['id'])
# OpenStackSDK sets {"port_id": None} to detach a floating
# ip from a device, but there might be a delay until a
# server does not list it in addresses any more.
else: # not self.server
if ip['port_id']:
changed = True
self.conn.network.update_ip(floating_ip=ip['id'], port_id=None)
if ip['fixed_ip_address']:
# Silently ignore that ip might not be attached to server
#
# self.conn.network.update_ip(ip_id, port_id=None) does not
# handle nova network but self.conn.detach_ip_from_server()
# does so
self.conn.detach_ip_from_server(server_id=self.server['id'],
floating_ip_id=ip['id'])
# OpenStackSDK sets {"port_id": None} to detach a floating
# ip from a device, but there might be a delay until a
# server does not list it in addresses any more.
changed = True
if self.params['purge']:
self.conn.network.delete_ip(ip['id'])
@@ -437,56 +397,39 @@ class NetworkingFloatingIPModule(OpenStackModule):
# Returns a list not an iterator here because
# it is iterated several times below
addresses = _flatten(server['addresses'].values())
return [
address['addr']
for address in addresses
if address['OS-EXT-IPS:type'] == 'floating'
]
return [address['addr']
for address in _flatten(server['addresses'].values())
if address['OS-EXT-IPS:type'] == 'floating']
def _find_ips(self,
server,
floating_ip_address,
network_id,
fixed_address,
nat_destination_id):
nat_destination_name_or_id):
# Check which floating ips matches our requirements.
# They might or might not be attached to our server.
if floating_ip_address:
# A specific floating ip address has been requested
ip = self.conn.network.find_ip(floating_ip_address)
return [ip] if ip else []
elif server:
if (not fixed_address and nat_destination_id):
# No specific floating ip and no specific fixed ip have been
# requested but a private network (nat_destination) has been
# given where the floating ip should be attached to.
return self._find_ips_by_nat_destination(
server, nat_destination_id)
else:
# not floating_ip_address
# and (fixed_address or not nat_destination_id)
# An analysis of all floating ips of server is required
return self._find_ips_by_network_id_and_fixed_address(
server, fixed_address, network_id)
elif fixed_address or nat_destination_id:
ports = self._find_ports_by_fixed_address_or_nat_destination(fixed_address, nat_destination_id)
floating_ips = []
for port in ports:
ips = list(self.conn.network.ips(port_id=port.id))
floating_ips.extend(ips)
return floating_ips
elif network_id:
return list(self.conn.network.ips(floating_network_id=network_id))
elif (not fixed_address and nat_destination_name_or_id):
# No specific floating ip and no specific fixed ip have been
# requested but a private network (nat_destination) has been
# given where the floating ip should be attached to.
return self._find_ips_by_nat_destination(
server, nat_destination_name_or_id)
else:
return []
# not floating_ip_address
# and (fixed_address or not nat_destination_name_or_id)
# An analysis of all floating ips of server is required
return self._find_ips_by_network_id_and_fixed_address(
server, fixed_address, network_id)
def _find_ips_by_nat_destination(self,
server,
nat_destination_id):
nat_destination_name_or_id):
if not server['addresses']:
return None
@@ -494,7 +437,7 @@ class NetworkingFloatingIPModule(OpenStackModule):
# Check if we have any floating ip on
# the given nat_destination network
nat_destination = self.conn.network.find_network(
nat_destination_id, ignore_missing=False)
nat_destination_name_or_id, ignore_missing=False)
fips_with_nat_destination = [
addr for addr
@@ -524,7 +467,7 @@ class NetworkingFloatingIPModule(OpenStackModule):
# match network of floating ip
continue
if not fixed_address: # and not nat_destination_id
if not fixed_address: # and not nat_destination_name_or_id
# Any floating ip will fullfil these requirements
matching_ips.append(ip)
@@ -535,84 +478,20 @@ class NetworkingFloatingIPModule(OpenStackModule):
return matching_ips
def _params_to_kwargs(self,
floating_ip_address,
network_id,
fixed_address,
nat_destination):
kwargs = {}
kwargs['floating_network_id'] = network_id
if fixed_address:
# must indicate internal port identifier
ports = self._find_ports_by_fixed_address_or_nat_destination(
fixed_address, nat_destination
)
if len(ports) > 1:
self.fail_json(
msg='There are multiple subnets with the fixed ip '
'address {0}'.format(fixed_address)
)
elif len(ports) == 0:
self.fail_json(
msg='No port found with fixed ip address {0}'.format(
fixed_address)
)
else:
kwargs['fixed_ip_address'] = fixed_address
kwargs['port_id'] = ports[0].id
if floating_ip_address:
kwargs['floating_ip_address'] = floating_ip_address
return kwargs
def _find_ports_by_fixed_address_or_nat_destination(self,
fixed_address,
nat_destination):
port_kwargs = {}
if fixed_address:
port_kwargs['fixed_ips'] = f'ip_address={fixed_address}'
if nat_destination:
port_kwargs['network_id'] = nat_destination.id
ports = self.conn.network.ports(**port_kwargs)
return list(ports)
def _init(self):
server_name_or_id = self.params['server']
if server_name_or_id:
self.server = self.conn.compute.find_server(
name_or_id=server_name_or_id, ignore_missing=False
)
else:
self.server = None
if (self.server is None and self.params['fixed_address']
and self.params['nat_destination'] is None):
self.fail_json(
msg='fixed_address requires nat_destination to be defined '
'when server isn\'t'
)
server = self.conn.compute.find_server(server_name_or_id,
ignore_missing=False)
# fetch server details such as addresses
self.server = self.conn.compute.get_server(server)
network_name_or_id = self.params['network']
if network_name_or_id:
self.network = self.conn.network.find_network(
name_or_id=network_name_or_id, ignore_missing=False
)
name_or_id=network_name_or_id, ignore_missing=False)
else:
self.network = None
nat_destination_name_or_id = self.params['nat_destination']
if nat_destination_name_or_id:
self.nat_destination = self.conn.network.find_network(
name_or_id=nat_destination_name_or_id, ignore_missing=False
)
else:
self.nat_destination = None
def main():
module = NetworkingFloatingIPModule()

View File

@@ -56,7 +56,7 @@ options:
description:
- When I(update_password) is C(always), then the password will always be
updated.
- When I(update_password) is C(on_create), then the password is only set
- When I(update_password) is C(on_create), the the password is only set
when creating a user.
type: str
extends_documentation_fragment:

View File

@@ -100,8 +100,8 @@ options:
type: str
state:
description:
- Should the resource be present, absent or inactive.
choices: [present, absent, inactive]
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
tags:
@@ -122,26 +122,6 @@ options:
- I(volume) has been deprecated. Use module M(openstack.cloud.volume)
instead.
type: str
use_import:
description:
- Use the 'glance-direct' method of the interoperable image import mechanism.
- Should only be used when needed, such as when the user needs the cloud to
transform image format.
type: bool
import_method:
description:
- Method to use for importing the image. Not all deployments support all methods.
- Supports web-download or glance-download.
- copy-image is not supported with create actions.
- glance-direct is removed from the import method so use_import can be used in that case.
type: str
choices: [web-download, glance-download]
uri:
description:
- Required only if using the web-download import method.
- This url is where the data is made available to the Image service.
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -167,7 +147,7 @@ EXAMPLES = r'''
RETURN = r'''
image:
description: Dictionary describing the Glance image.
returned: On success when I(state) is C(present) or C(inactive).
returned: On success when I(state) is C(present).
type: dict
contains:
id:
@@ -408,18 +388,15 @@ class ImageModule(OpenStackModule):
owner_domain=dict(aliases=['project_domain']),
properties=dict(type='dict', default={}),
ramdisk=dict(),
state=dict(default='present', choices=['absent', 'present', 'inactive']),
state=dict(default='present', choices=['absent', 'present']),
tags=dict(type='list', default=[], elements='str'),
visibility=dict(choices=['public', 'private', 'shared', 'community']),
volume=dict(),
use_import=dict(type='bool'),
import_method=dict(choices=['web-download', 'glance-download']),
uri=dict()
)
module_kwargs = dict(
mutually_exclusive=[
('filename', 'volume', 'uri'),
('filename', 'volume'),
('visibility', 'is_public'),
],
)
@@ -427,8 +404,7 @@ class ImageModule(OpenStackModule):
# resource attributes obtainable directly from params
attr_params = ('id', 'name', 'filename', 'disk_format',
'container_format', 'wait', 'timeout', 'is_public',
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags',
'use_import', 'import_method', 'uri')
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags')
def _resolve_visibility(self):
"""resolve a visibility value to be compatible with older versions"""
@@ -485,28 +461,6 @@ class ImageModule(OpenStackModule):
return update_payload
def _wait_for_image_active(self, image):
if not self.params['wait']:
return image
return self.sdk.resource.wait_for_status(
self.conn.image,
image,
status='active',
failures=['error', 'deleted', 'killed'],
wait=self.params['timeout'],
attribute='status')
def _import_uploaded_image(self, image):
if not hasattr(self.conn.image, 'import_image'):
self.fail_json(
msg="The installed openstacksdk library does not support "
"image import operations required for images in the "
"'uploading' state.")
self.conn.image.import_image(image, method='glance-direct')
return self._wait_for_image_active(self.conn.get_image(image.id))
def run(self):
changed = False
image_name_or_id = self.params['id'] or self.params['name']
@@ -548,33 +502,6 @@ class ImageModule(OpenStackModule):
self.exit_json(changed=changed,
image=self._return_value(image.id))
if image['status'] == 'deactivated':
self.conn.image.reactivate_image(image)
changed = True
elif image['status'] == 'queued':
if (
self.params['filename']
and hasattr(self.conn.image, 'stage_image')):
self.conn.image.stage_image(
image, filename=self.params['filename'])
changed = True
elif self.params['filename']:
with open(self.params['filename'], 'rb') as image_data:
self.conn.image.upload_image(
container_format=self.params['container_format'],
disk_format=self.params['disk_format'],
data=image_data,
id=image.id,
name=image.name)
changed = True
image = self.conn.get_image(image.id)
if image['status'] == 'uploading' and self.params['use_import']:
image = self._import_uploaded_image(image)
changed = True
elif image['status'] == 'importing':
image = self._wait_for_image_active(image)
update_payload = self._build_update(image)
if update_payload:
@@ -590,20 +517,6 @@ class ImageModule(OpenStackModule):
wait=self.params['wait'],
timeout=self.params['timeout'])
changed = True
elif self.params['state'] == 'inactive' and image is not None:
if image['status'] == 'active':
self.conn.image.deactivate_image(image)
changed = True
update_payload = self._build_update(image)
if update_payload:
self.conn.image.update_image(image.id, **update_payload)
changed = True
self.exit_json(changed=changed, image=self._return_value(image.id))
self.exit_json(changed=changed)

View File

@@ -142,7 +142,7 @@ pool:
'''
EXAMPLES = r'''
- name: Create a load-balancer pool
- name: Create a load-balander pool
openstack.cloud.lb_pool:
cloud: mycloud
lb_algorithm: ROUND_ROBIN
@@ -151,7 +151,7 @@ EXAMPLES = r'''
protocol: HTTP
state: present
- name: Delete a load-balancer pool
- name: Delete a load-balander pool
openstack.cloud.lb_pool:
cloud: mycloud
name: test-pool

View File

@@ -30,15 +30,6 @@ options:
description:
- Whether this network is externally accessible.
type: bool
is_default:
description:
- Whether this network is default network or not. This is only effective
with external networks.
type: bool
is_vlan_transparent:
description:
- Whether this network is vlan_transparent or not.
type: bool
state:
description:
- Indicate desired state of the resource.
@@ -199,8 +190,6 @@ class NetworkModule(OpenStackModule):
shared=dict(type='bool'),
admin_state_up=dict(type='bool'),
external=dict(type='bool'),
is_default=dict(type='bool'),
is_vlan_transparent=dict(type='bool'),
provider_physical_network=dict(),
provider_network_type=dict(),
provider_segmentation_id=dict(type='int'),
@@ -218,8 +207,6 @@ class NetworkModule(OpenStackModule):
shared = self.params['shared']
admin_state_up = self.params['admin_state_up']
external = self.params['external']
is_default = self.params['is_default']
is_vlan_transparent = self.params['is_vlan_transparent']
provider_physical_network = self.params['provider_physical_network']
provider_network_type = self.params['provider_network_type']
provider_segmentation_id = self.params['provider_segmentation_id']
@@ -257,10 +244,6 @@ class NetworkModule(OpenStackModule):
kwargs["admin_state_up"] = admin_state_up
if external is not None:
kwargs["is_router_external"] = external
if is_default is not None:
kwargs["is_default"] = is_default
if is_vlan_transparent is not None:
kwargs["is_vlan_transparent"] = is_vlan_transparent
if not net:
net = self.conn.network.create_network(name=name, **kwargs)

View File

@@ -1,183 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2025 British Broadcasting Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = '''
---
module: network_segment
short_description: Creates/removes network segments from OpenStack
author: OpenStack Ansible SIG
description:
- Add, update or remove network segments from OpenStack.
options:
name:
description:
- Name to be assigned to the segment. Although Neutron allows for
non-unique segment names, this module enforces segment name
uniqueness.
required: true
type: str
description:
description:
- Description of the segment
type: str
network:
description:
- Name or id of the network to which the segment should be attached
type: str
network_type:
description:
- The type of physical network that maps to this segment resource.
type: str
physical_network:
description:
- The physical network where this segment object is implemented.
type: str
segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
type: int
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
default: present
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
# Create a VLAN type network segment named 'segment1'.
- openstack.cloud.network_segment:
cloud: mycloud
name: segment1
network: my_network
network_type: vlan
segmentation_id: 2000
physical_network: my_physnet
state: present
'''
RETURN = '''
id:
description: Id of segment
returned: On success when segment exists.
type: str
network_segment:
description: Dictionary describing the network segment.
returned: On success when network segment exists.
type: dict
contains:
description:
description: Description
type: str
id:
description: Id
type: str
name:
description: Name
type: str
network_id:
description: Network Id
type: str
network_type:
description: Network type
type: str
physical_network:
description: Physical network
type: str
segmentation_id:
description: Segmentation Id
type: int
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class NetworkSegmentModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
description=dict(),
network=dict(),
network_type=dict(),
physical_network=dict(),
segmentation_id=dict(type='int'),
state=dict(default='present', choices=['absent', 'present'])
)
def run(self):
state = self.params['state']
name = self.params['name']
network_name_or_id = self.params['network']
kwargs = {}
filters = {}
for arg in ('description', 'network_type', 'physical_network', 'segmentation_id'):
if self.params[arg] is not None:
kwargs[arg] = self.params[arg]
for arg in ('network_type', 'physical_network'):
if self.params[arg] is not None:
filters[arg] = self.params[arg]
if network_name_or_id:
network = self.conn.network.find_network(network_name_or_id,
ignore_missing=False,
**filters)
kwargs['network_id'] = network.id
filters['network_id'] = network.id
segment = self.conn.network.find_segment(name, **filters)
if state == 'present':
if not segment:
segment = self.conn.network.create_segment(name=name, **kwargs)
changed = True
else:
changed = False
update_kwargs = {}
# As the name is required and all other attributes cannot be
# changed (and appear in filters above), we only need to handle
# updates to the description here.
for arg in ["description"]:
if (
arg in kwargs
# ensure user wants something specific
and kwargs[arg] is not None
# and this is not what we have right now
and kwargs[arg] != segment[arg]
):
update_kwargs[arg] = kwargs[arg]
if update_kwargs:
segment = self.conn.network.update_segment(
segment.id, **update_kwargs
)
changed = True
segment = segment.to_dict(computed=False)
self.exit(changed=changed, network_segment=segment, id=segment['id'])
elif state == 'absent':
if not segment:
self.exit(changed=False)
else:
self.conn.network.delete_segment(segment['id'])
self.exit(changed=True)
def main():
module = NetworkSegmentModule()
module()
if __name__ == '__main__':
main()

View File

@@ -65,12 +65,6 @@ options:
- Required when creating or updating a RBAC policy rule, ignored when
deleting a policy.
type: str
target_all_project:
description:
- Whether all projects are targted for access.
- If this option set to true, C(target_project_id) is ignored.
type: bool
default: 'false'
state:
description:
- Whether the RBAC rule should be C(present) or C(absent).
@@ -151,8 +145,6 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class NeutronRBACPolicy(OpenStackModule):
all_project_symbol = '*'
argument_spec = dict(
action=dict(choices=['access_as_external', 'access_as_shared']),
id=dict(aliases=['policy_id']),
@@ -161,22 +153,17 @@ class NeutronRBACPolicy(OpenStackModule):
project_id=dict(),
state=dict(default='present', choices=['absent', 'present']),
target_project_id=dict(),
target_all_project=dict(type='bool', default=False),
)
module_kwargs = dict(
required_if=[
('state', 'present', ('target_project_id', 'target_all_project',), True),
('state', 'present', ('target_project_id',)),
('state', 'absent', ('id',)),
],
supports_check_mode=True,
)
def run(self):
target_all_project = self.params.get('target_all_project')
if target_all_project:
self.params['target_project_id'] = self.all_project_symbol
state = self.params['state']
policy = self._find()
@@ -275,7 +262,7 @@ class NeutronRBACPolicy(OpenStackModule):
return [p for p in policies
if any(p[k] == self.params[k]
for k in ['object_id'])]
for k in ['object_id', 'target_project_id'])]
def _update(self, policy, update):
attributes = update.get('attributes')

View File

@@ -295,11 +295,8 @@ class ObjectModule(OpenStackModule):
for k in ['data', 'filename']
if self.params[k] is not None)
object = self.conn.object_store.create_object(container_name, name,
**kwargs)
if not object:
object = self._find()
return object
return self.conn.object_store.create_object(container_name, name,
**kwargs)
def _delete(self, object):
container_name = self.params['container']

View File

@@ -269,7 +269,7 @@ class ContainerModule(OpenStackModule):
if metadata is not None:
# Swift metadata keys must be treated as case-insensitive
old_metadata = dict((k.lower(), v)
for k, v in (container.metadata or {}).items())
for k, v in (container.metadata or {}))
new_metadata = dict((k, v) for k, v in metadata.items()
if k.lower() not in old_metadata
or v != old_metadata[k.lower()])

View File

@@ -1,202 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024 Catalyst Cloud Limited
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: object_containers_info
short_description: Fetch container info from the OpenStack Swift service.
author: OpenStack Ansible SIG
description:
- Fetch container info from the OpenStack Swift service.
options:
name:
description:
- Name of the container
type: str
aliases: ["container"]
prefix:
description:
- Filter containers by prefix
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
"""
EXAMPLES = r"""
- name: List all containers existing on the project
openstack.cloud.object_containers_info:
- name: Retrive a single container by name
openstack.cloud.object_containers_info:
name: test-container
- name: Retrieve and filter containers by prefix
openstack.cloud.object_containers_info:
prefix: test-
"""
RETURN = r"""
containers:
description: List of dictionaries describing matching containers.
returned: always
type: list
elements: dict
contains:
bytes:
description: The total number of bytes that are stored in Object Storage
for the container.
type: int
sample: 5449
bytes_used:
description: The count of bytes used in total.
type: int
sample: 5449
content_type:
description: The MIME type of the list of names.
Only fetched when searching for a container by name.
type: str
sample: null
count:
description: The number of objects in the container.
type: int
sample: 1
history_location:
description: Enables versioning on the container.
Only fetched when searching for a container by name.
type: str
sample: null
id:
description: The ID of the container. Equals I(name).
type: str
sample: "otc"
if_none_match:
description: "In combination with C(Expect: 100-Continue), specify an
C(If-None-Match: *) header to query whether the server
already has a copy of the object before any data is sent.
Only set when searching for a container by name."
type: str
sample: null
is_content_type_detected:
description: If set to C(true), Object Storage guesses the content type
based on the file extension and ignores the value sent in
the Content-Type header, if present.
Only fetched when searching for a container by name.
type: bool
sample: null
is_newest:
description: If set to True, Object Storage queries all replicas to
return the most recent one. If you omit this header, Object
Storage responds faster after it finds one valid replica.
Because setting this header to True is more expensive for
the back end, use it only when it is absolutely needed.
Only fetched when searching for a container by name.
type: bool
sample: null
meta_temp_url_key:
description: The secret key value for temporary URLs. If not set,
this header is not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
meta_temp_url_key_2:
description: A second secret key value for temporary URLs. If not set,
this header is not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
name:
description: The name of the container.
type: str
sample: "otc"
object_count:
description: The number of objects.
type: int
sample: 1
read_ACL:
description: The ACL that grants read access. If not set, this header is
not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
storage_policy:
description: Storage policy used by the container. It is not possible to
change policy of an existing container.
Only fetched when searching for a container by name.
type: str
sample: null
sync_key:
description: The secret key for container synchronization. If not set,
this header is not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
sync_to:
description: The destination for container synchronization. If not set,
this header is not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
timestamp:
description: The timestamp of the transaction.
Only fetched when searching for a container by name.
type: str
sample: null
versions_location:
description: Enables versioning on this container. The value is the name
of another container. You must UTF-8-encode and then
URL-encode the name before you include it in the header. To
disable versioning, set the header to an empty string.
Only fetched when searching for a container by name.
type: str
sample: null
write_ACL:
description: The ACL that grants write access. If not set, this header is
not returned by this operation.
Only fetched when searching for a container by name.
type: str
sample: null
"""
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class ObjectContainersInfoModule(OpenStackModule):
argument_spec = dict(
name=dict(aliases=["container"]),
prefix=dict(),
)
module_kwargs = dict(
supports_check_mode=True,
)
def run(self):
if self.params["name"]:
containers = [
(
self.conn.object_store.get_container_metadata(
self.params["name"],
).to_dict(computed=False)
),
]
else:
query = {}
if self.params["prefix"]:
query["prefix"] = self.params["prefix"]
containers = [
c.to_dict(computed=False)
for c in self.conn.object_store.containers(**query)
]
self.exit(changed=False, containers=containers)
def main():
module = ObjectContainersInfoModule()
module()
if __name__ == "__main__":
main()

View File

@@ -511,7 +511,7 @@ class PortModule(OpenStackModule):
**(dict(network_id=network.id) if network else dict()))
if self.ansible.check_mode:
self.exit_json(changed=self._will_change(port, state))
self.exit_json(changed=self._will_change(network, port, state))
if state == 'present' and not port:
# create port

View File

@@ -1,249 +0,0 @@
# Copyright: (c) 2018, Terry Jones <terry.jones@example.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: port_forwarding
short_description: Create/Update/Delete port forwarding resources from OpenStack
description:
- Create, Update and Remove Neutron floating IP port forwarding resources from OpenStack
- Port forwarding allows external traffic to reach instances behind a floating IP
author: OpenStack Ansible SIG
options:
external_protocol_port:
description:
- The external port number on the floating IP that will be forwarded
- Must be between 1 and 65535
- Required if C(port_forwarding_id) is set
type: int
aliases: ['external_port']
floating_ip:
description:
- The floating IP address or ID to create port forwarding on
type: str
required: true
aliases: ['floating_ip_address']
internal_ip:
description:
- The internal IP address to forward traffic to
- Must be one of the fixed IPs on the specified port
- If not specified, uses the first fixed IP of the port
- Requires C(network_port)
type: str
aliases: ['internal_ip_address']
internal_protocol_port:
description:
- The internal port number to forward traffic to
- Must be between 1 and 65535
- Required if C(port_forwarding_id) is set
type: int
aliases: ['internal_port']
network_port:
description:
- The Neutron port name or ID that contains the internal IP
- Required if C(port_forwarding_id) is set
type: str
port_forwarding_id:
description:
- ID of an existing port forwarding resource
- Used for updates and deletions when ID is known
type: str
protocol:
description:
- The IP protocol for the port forwarding resource
- Supports tcp and udp protocols
- Required if C(port_forwarding_id) is set
type: str
state:
description:
- Whether the port forwarding resource should exist or not
type: str
choices: ['present', 'absent']
default: present
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
- name: Create new port fowarding
openstack.cloud.port_forwarding:
state: present
floating_ip: 192.168.150.67
external_protocol_port: 80
internal_protocol_port: 8080
network_port: example_http_port
protocol: tcp
- name: Update previously created port forwarding
openstack.cloud.port_forwarding:
state: present
port_forwarding_id: existing_port_forwarding
floating_ip: 192.168.150.67
internal_protocol_port: 9090
- name: Delete port forwarding
openstack.cloud.port_forwarding:
state: absent
port_forwarding_id: "resource-id"
floating_ip: "203.0.113.100"
'''
RETURN = r'''
port_forwarding:
description: Dictionary describing the port forwarding resource.
type: list
elements: dict
returned: success
contains:
description:
description: The description of the port forwarding.
type: str
external_port:
description: The external port number.
type: int
floatingip_id:
description: The floating IP id associated with the port forwarding.
type: str
id:
description: The id of the port forwarding.
type: str
internal_ip_address:
description: The internal IP address associated with the port forwarding.
type: str
internal_port:
description: The internal port number.
type: int
internal_port_id:
description: The ID of the network port associated with the port forwarding.
type: str
protocol:
description: The IP protocol used for port forwarding.
type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule
)
class PortForwardingModule(OpenStackModule):
argument_spec = dict(
external_protocol_port=dict(type='int', aliases=['external_port']),
floating_ip=dict(required=True, aliases=['floating_ip_address']),
internal_ip=dict(aliases=['internal_ip_address']),
internal_protocol_port=dict(type='int', aliases=['internal_port']),
network_port=dict(),
port_forwarding_id=dict(),
protocol=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
module_kwargs = dict(
required_if=[
['port_forwarding_id', None, ['external_protocol_port',
'internal_protocol_port',
'network_port',
'protocol'], False],
],
required_by={
'internal_ip': ['network_port'],
},
)
def run(self):
port_forwarding_id = self.params['port_forwarding_id']
floating_ip = self.conn.network.find_ip(self.params['floating_ip'],
ignore_missing=False)
port = self.conn.network.find_port(self.params['network_port']) \
if self.params['network_port'] else None
internal_ip = self._find_internal_ip(port) if port else None
external_port = self.params['external_protocol_port']
internal_port = self.params['internal_protocol_port']
protocol = self.params['protocol']
state = self.params['state']
attrs = {}
if port is not None:
attrs['internal_port_id'] = port.id
if internal_ip is not None:
attrs['internal_ip_address'] = internal_ip
if external_port is not None:
attrs['external_port'] = external_port
if protocol is not None:
attrs['protocol'] = protocol
port_forwarding = self._find_port_forwarding(floating_ip.id,
port_forwarding_id,
attrs)
if internal_port is not None:
attrs['internal_port'] = internal_port
changed = False
if state == 'present':
if port_forwarding:
# found valid pfwd_id or pfwd with matching attributes
new_attrs = {k: v for k, v in attrs.items() if port_forwarding[k] != v}
if new_attrs:
port_forwarding = self.conn.network.update_port_forwarding(
port_forwarding.id, floating_ip.id, **new_attrs)
changed = True
elif not port_forwarding_id:
# pfwd_id not given, so create new pfwd
attrs['floatingip_id'] = floating_ip.id
port_forwarding = self.conn.network.create_port_forwarding(**attrs)
changed = True
self.exit_json(changed=changed, port_forwarding=port_forwarding)
else:
if port_forwarding:
self.conn.network.delete_port_forwarding(port_forwarding.id, floating_ip.id)
changed = True
self.exit_json(changed=changed)
def _find_internal_ip(self, port):
internal_ip = self.params['internal_ip']
if internal_ip:
for fixed_ip in port.fixed_ips:
if fixed_ip['ip_address'] == internal_ip:
return internal_ip
self.fail_json(
msg='Internal IP %s not found in port %s fixed IPs' % (internal_ip, port.id))
else:
if port.fixed_ips:
return port.fixed_ips[0]['ip_address']
else:
self.fail_json(msg='Port %s has no fixed IPs available' % port.id)
def _find_port_forwarding(self, fip_id, pf_id, attrs):
try:
if pf_id:
return self.conn.network.find_port_forwarding(pf_id, fip_id, ignore_missing=False)
port_forwardings = list(self.conn.network.port_forwardings(fip_id, **attrs))
if len(port_forwardings) > 1:
self.fail_json(
msg='Found more than one port forwarding resources with matching attributes')
return port_forwardings[0] if len(port_forwardings) == 1 else None
except self.sdk.exceptions.NotFoundException:
return None
def main():
module = PortForwardingModule()
module()
if __name__ == '__main__':
main()

View File

@@ -1,148 +0,0 @@
# Copyright: (c) 2018, Terry Jones <terry.jones@example.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: port_forwarding_info
short_description: Retrieve port forwarding resources from OpenStack.
description:
- Retrieve Neutron floating IP port forwarding resources from OpenStack.
author: OpenStack Ansible SIG
options:
external_port:
description:
- The external port number on the floating IP that will be forwarded.
type: int
floating_ip:
description:
- The address or ID of a floating IP that contains a port forwarding.
type: str
internal_port_id:
description:
- The Neutron port ID.
type: str
port_forwarding_id:
description:
- ID of an existing port forwarding resource.
type: str
protocol:
description:
- The IP protocol for the port forwarding resource.
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
# Getting all port forwardings
- openstack.cloud.port_forwarding_info:
register: pfwds
# Getting port forwardings by associated floating ip
- openstack.cloud.port_forwarding_info:
floating_ip: 192.168.42.67
register: pfwds
# Getting port forwarding by port forwarding id
- openstack.cloud.port_forwarding_info:
port_forwarding_id: d09f88d6-bb20-4268-9139-27c1b82c51d0
register: pfwd
'''
RETURN = r'''
port_forwardings:
description: The port forwarding objects list.
type: list
elements: dict
returned: success
contains:
description:
description: The description of the port forwarding.
type: str
external_port:
description: The external port number.
type: int
floatingip_id:
description: The floating IP id associated with the port forwarding.
type: str
id:
description: The id of the port forwarding.
type: str
internal_ip_address:
description: The internal IP address associated with the port forwarding.
type: str
internal_port:
description: The internal port number.
type: int
internal_port_id:
description: The ID of the network port associated with the port forwarding.
type: str
protocol:
description: The IP protocol used for port forwarding.
type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule
)
class PortForwardingInfoModule(OpenStackModule):
argument_spec = dict(
external_port=dict(type='int'),
floating_ip=dict(),
internal_port_id=dict(),
port_forwarding_id=dict(),
protocol=dict(),
)
module_kwargs = dict(
supports_check_mode=True
)
def _find_port_forwardings(self):
port_forwarding_id = self.params['port_forwarding_id']
floating_ip = self.params['floating_ip']
query_kwargs = {k: self.params[k]
for k in ['external_port',
'internal_port_id',
'protocol']
if self.params[k] is not None}
floating_ips = None
if floating_ip:
fip = self.conn.network.find_ip(floating_ip)
floating_ips = [fip] if fip else []
else:
floating_ips = self.conn.network.ips()
port_forwardings = []
if port_forwarding_id is None:
for fip in floating_ips:
pfwds = self.conn.network.port_forwardings(fip.id, **query_kwargs)
port_forwardings.extend(list(pfwds))
else:
for fip in floating_ips:
pfwd = self.conn.network.find_port_forwarding(
port_forwarding_id, fip.id, query_kwargs)
if pfwd:
return [pfwd]
return port_forwardings
def run(self):
port_forwardings = [pfwd.to_dict(computed=False)
for pfwd in self._find_port_forwardings()]
self.exit(changed=False, port_forwardings=port_forwardings)
def main():
module = PortForwardingInfoModule()
module()
if __name__ == '__main__':
main()

View File

@@ -181,7 +181,7 @@ class IdentityProjectModule(OpenStackModule):
raise ValueError('Duplicate key(s) in extra_specs: {0}'
.format(', '.join(list(duplicate_keys))))
for k, v in extra_specs.items():
if k not in project or v != project[k]:
if v != project[k]:
attributes[k] = v
if attributes:

View File

@@ -38,9 +38,6 @@ options:
groups:
description: Number of groups that are allowed for the project
type: int
health_monitors:
description: Maximum number of health monitors that can be created.
type: int
injected_file_content_bytes:
description:
- Maximum file size in bytes.
@@ -64,12 +61,6 @@ options:
key_pairs:
description: Number of key pairs to allow.
type: int
l7_policies:
description: The maximum amount of L7 policies you can create.
type: int
listeners:
description: The maximum number of listeners you can create.
type: int
load_balancers:
description: The maximum amount of load balancers you can create
type: int
@@ -77,9 +68,6 @@ options:
metadata_items:
description: Number of metadata items allowed per instance.
type: int
members:
description: Number of members allowed for loadbalancer.
type: int
name:
description: Name of the OpenStack Project to manage.
required: true
@@ -239,33 +227,6 @@ quotas:
server_groups:
description: Number of server groups to allow.
type: int
load_balancer:
description: Load_balancer service quotas
type: dict
contains:
health_monitors:
description: Maximum number of health monitors that can be
created.
type: int
l7_policies:
description: The maximum amount of L7 policies you can
create.
type: int
listeners:
description: The maximum number of listeners you can create
type: int
load_balancers:
description: The maximum amount of load balancers one can
create
type: int
members:
description: The maximum amount of members for
loadbalancer.
type: int
pools:
description: The maximum amount of pools one can create.
type: int
network:
description: Network service quotas
type: dict
@@ -273,9 +234,16 @@ quotas:
floating_ips:
description: Number of floating IP's to allow.
type: int
load_balancers:
description: The maximum amount of load balancers one can
create
type: int
networks:
description: Number of networks to allow.
type: int
pools:
description: The maximum amount of pools one can create.
type: int
ports:
description: Number of Network ports to allow, this needs
to be greater than the instances limit.
@@ -344,7 +312,9 @@ quotas:
server_groups: 10,
network:
floating_ips: 50,
load_balancers: 10,
networks: 10,
pools: 10,
ports: 160,
rbac_policies: 10,
routers: 10,
@@ -360,13 +330,6 @@ quotas:
per_volume_gigabytes: -1,
snapshots: 10,
volumes: 10,
load_balancer:
health_monitors: 10,
load_balancers: 10,
l7_policies: 10,
listeners: 10,
pools: 5,
members: 5,
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -374,8 +337,9 @@ from collections import defaultdict
class QuotaModule(OpenStackModule):
# TODO: Add missing network quota options 'check_limit'
# to argument_spec, DOCUMENTATION and RETURN docstrings
# TODO: Add missing network quota options 'check_limit', 'health_monitors',
# 'l7_policies', 'listeners' to argument_spec, DOCUMENTATION and
# RETURN docstrings
argument_spec = dict(
backup_gigabytes=dict(type='int'),
backups=dict(type='int'),
@@ -386,7 +350,6 @@ class QuotaModule(OpenStackModule):
'network_floating_ips']),
gigabytes=dict(type='int'),
groups=dict(type='int'),
health_monitors=dict(type='int'),
injected_file_content_bytes=dict(type='int',
aliases=['injected_file_size']),
injected_file_path_bytes=dict(type='int',
@@ -394,11 +357,8 @@ class QuotaModule(OpenStackModule):
injected_files=dict(type='int'),
instances=dict(type='int'),
key_pairs=dict(type='int', no_log=False),
l7_policies=dict(type='int'),
listeners=dict(type='int'),
load_balancers=dict(type='int', aliases=['loadbalancer']),
metadata_items=dict(type='int'),
members=dict(type='int'),
name=dict(required=True),
networks=dict(type='int', aliases=['network']),
per_volume_gigabytes=dict(type='int'),
@@ -422,9 +382,9 @@ class QuotaModule(OpenStackModule):
supports_check_mode=True
)
# Some attributes in quota resources don't exist in the api anymore, e.g.
# compute quotas that were simply network proxies, and pre-Octavia network
# quotas. This map allows marking them to be skipped.
# Some attributes in quota resources don't exist in the api anymore, mostly
# compute quotas that were simply network proxies. This map allows marking
# them to be skipped.
exclusion_map = {
'compute': {
# 'fixed_ips', # Available until Nova API version 2.35
@@ -437,39 +397,24 @@ class QuotaModule(OpenStackModule):
# 'injected_file_path_bytes', # Nova API
# 'injected_files', # version 2.56
},
'load_balancer': {'name'},
'network': {
'name',
'l7_policies',
'load_balancers',
'loadbalancer',
'health_monitors',
'pools',
'listeners',
},
'network': {'name'},
'volume': {'name'},
}
def _get_quotas(self, project):
quota = {}
if self.conn.has_service('block-storage'):
quota['volume'] = self.conn.block_storage.get_quota_set(project.id)
quota['volume'] = self.conn.block_storage.get_quota_set(project)
else:
self.warn('Block storage service aka volume service is not'
' supported by your cloud. Ignoring volume quotas.')
if self.conn.has_service('load-balancer'):
quota['load_balancer'] = self.conn.load_balancer.get_quota(
project.id)
else:
self.warn('Loadbalancer service is not supported by your'
' cloud. Ignoring loadbalancer quotas.')
if self.conn.has_service('network'):
quota['network'] = self.conn.network.get_quota(project.id)
else:
self.warn('Network service is not supported by your cloud.'
' Ignoring network quotas.')
quota['compute'] = self.conn.compute.get_quota_set(project.id)
return quota
@@ -507,6 +452,7 @@ class QuotaModule(OpenStackModule):
# Get current quota values
quotas = self._get_quotas(project)
changed = False
if self.ansible.check_mode:
@@ -522,8 +468,6 @@ class QuotaModule(OpenStackModule):
self.conn.network.delete_quota(project.id)
if 'volume' in quotas:
self.conn.block_storage.revert_quota_set(project)
if 'load_balancer' in quotas:
self.conn.load_balancer.delete_quota(project.id)
# Necessary since we can't tell what the default quotas are
quotas = self._get_quotas(project)
@@ -533,18 +477,14 @@ class QuotaModule(OpenStackModule):
if changes:
if 'volume' in changes:
quotas['volume'] = self.conn.block_storage.update_quota_set(
project.id, **changes['volume'])
self.conn.block_storage.update_quota_set(
quotas['volume'], **changes['volume'])
if 'compute' in changes:
quotas['compute'] = self.conn.compute.update_quota_set(
project.id, **changes['compute'])
self.conn.compute.update_quota_set(
quotas['compute'], **changes['compute'])
if 'network' in changes:
quotas['network'] = self.conn.network.update_quota(
project.id, **changes['network'])
if 'load_balancer' in changes:
quotas['load_balancer'] = \
self.conn.load_balancer.update_quota(
project.id, **changes['load_balancer'])
changed = True
quotas = {k: v.to_dict(computed=False) for k, v in quotas.items()}

View File

@@ -239,11 +239,7 @@ class DnsRecordsetModule(OpenStackModule):
elif self._needs_update(kwargs, recordset):
recordset = self.conn.dns.update_recordset(recordset, **kwargs)
changed = True
# NOTE(gtema): this is a workaround to temporarily bring the
# zone_id param back which may not me populated by SDK
rs = recordset.to_dict(computed=False)
rs["zone_id"] = zone.id
self.exit_json(changed=changed, recordset=rs)
self.exit_json(changed=changed, recordset=recordset)
elif state == 'absent' and recordset is not None:
self.conn.dns.delete_recordset(recordset)
changed = True

View File

@@ -19,9 +19,7 @@ options:
- Valid only with keystone version 3.
- Required if I(project) is not specified.
- When I(project) is specified, then I(domain) will not be used for
scoping the role association, only for finding resources. Deprecated
for finding resources, please use I(group_domain), I(project_domain),
I(role_domain), or I(user_domain).
scoping the role association, only for finding resources.
- "When scoping the role association, I(project) has precedence over
I(domain) and I(domain) has precedence over I(system): When I(project)
is specified, then I(domain) and I(system) are not used for role
@@ -34,45 +32,24 @@ options:
- Valid only with keystone version 3.
- If I(group) is not specified, then I(user) is required. Both may not be
specified at the same time.
- You can supply I(group_domain) or the deprecated usage of I(domain) to
find group resources.
type: str
group_domain:
description:
- Name or ID for the domain.
- Valid only with keystone version 3.
- Only valid for finding group resources.
type: str
project:
description:
- Name or ID of the project to scope the role association to.
- If you are using keystone version 2, then this value is required.
- When I(project) is specified, then I(domain) will not be used for
scoping the role association, only for finding resources. Prefer
I(group_domain) over I(domain).
scoping the role association, only for finding resources.
- "When scoping the role association, I(project) has precedence over
I(domain) and I(domain) has precedence over I(system): When I(project)
is specified, then I(domain) and I(system) are not used for role
association. When I(domain) is specified, then I(system) will not be
used for role association."
type: str
project_domain:
description:
- Name or ID for the domain.
- Valid only with keystone version 3.
- Only valid for finding project resources.
type: str
role:
description:
- Name or ID for the role.
required: true
type: str
role_domain:
description:
- Name or ID for the domain.
- Valid only with keystone version 3.
- Only valid for finding role resources.
type: str
state:
description:
- Should the roles be present or absent on the user.
@@ -96,12 +73,6 @@ options:
- If I(user) is not specified, then I(group) is required. Both may not be
specified at the same time.
type: str
user_domain:
description:
- Name or ID for the domain.
- Valid only with keystone version 3.
- Only valid for finding user resources.
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -130,15 +101,11 @@ class IdentityRoleAssignmentModule(OpenStackModule):
argument_spec = dict(
domain=dict(),
group=dict(),
group_domain=dict(type='str'),
project=dict(),
project_domain=dict(type='str'),
role=dict(required=True),
role_domain=dict(type='str'),
state=dict(default='present', choices=['absent', 'present']),
system=dict(),
user=dict(),
user_domain=dict(type='str'),
)
module_kwargs = dict(
@@ -146,33 +113,17 @@ class IdentityRoleAssignmentModule(OpenStackModule):
('user', 'group'),
('domain', 'project', 'system'),
],
mutually_exclusive=[
('user', 'group'),
('project', 'system'), # domain should be part of this
],
supports_check_mode=True
)
def _find_domain_id(self, domain):
if domain is not None:
domain = self.conn.identity.find_domain(domain,
ignore_missing=False)
return dict(domain_id=domain['id'])
return dict()
def run(self):
filters = {}
group_find_filters = {}
project_find_filters = {}
role_find_filters = {}
user_find_filters = {}
find_filters = {}
kwargs = {}
role_find_filters.update(self._find_domain_id(
self.params['role_domain']))
role_name_or_id = self.params['role']
role = self.conn.identity.find_role(role_name_or_id,
ignore_missing=False,
**role_find_filters)
ignore_missing=False)
filters['role_id'] = role['id']
domain_name_or_id = self.params['domain']
@@ -180,31 +131,22 @@ class IdentityRoleAssignmentModule(OpenStackModule):
domain = self.conn.identity.find_domain(
domain_name_or_id, ignore_missing=False)
filters['scope_domain_id'] = domain['id']
group_find_filters['domain_id'] = domain['id']
project_find_filters['domain_id'] = domain['id']
user_find_filters['domain_id'] = domain['id']
find_filters['domain_id'] = domain['id']
kwargs['domain'] = domain['id']
user_name_or_id = self.params['user']
if user_name_or_id is not None:
user_find_filters.update(self._find_domain_id(
self.params['user_domain']))
user = self.conn.identity.find_user(
user_name_or_id, ignore_missing=False,
**user_find_filters)
user_name_or_id, ignore_missing=False, **find_filters)
filters['user_id'] = user['id']
else:
user = None
kwargs['user'] = user['id']
group_name_or_id = self.params['group']
if group_name_or_id is not None:
group_find_filters.update(self._find_domain_id(
self.params['group_domain']))
group = self.conn.identity.find_group(
group_name_or_id, ignore_missing=False,
**group_find_filters)
group_name_or_id, ignore_missing=False, **find_filters)
filters['group_id'] = group['id']
else:
group = None
kwargs['group'] = group['id']
system_name = self.params['system']
if system_name is not None:
@@ -212,14 +154,14 @@ class IdentityRoleAssignmentModule(OpenStackModule):
if 'scope_domain_id' not in filters:
filters['scope.system'] = system_name
kwargs['system'] = system_name
project_name_or_id = self.params['project']
if project_name_or_id is not None:
project_find_filters.update(self._find_domain_id(
self.params['project_domain']))
project = self.conn.identity.find_project(
project_name_or_id, ignore_missing=False,
**project_find_filters)
project_name_or_id, ignore_missing=False, **find_filters)
filters['scope_project_id'] = project['id']
kwargs['project'] = project['id']
# project has precedence over domain and system
filters.pop('scope_domain_id', None)
@@ -234,50 +176,10 @@ class IdentityRoleAssignmentModule(OpenStackModule):
or (state == 'absent' and role_assignments)))
if state == 'present' and not role_assignments:
if 'scope_domain_id' in filters:
if user is not None:
self.conn.identity.assign_domain_role_to_user(
filters['scope_domain_id'], user, role)
else:
self.conn.identity.assign_domain_role_to_group(
filters['scope_domain_id'], group, role)
elif 'scope_project_id' in filters:
if user is not None:
self.conn.identity.assign_project_role_to_user(
filters['scope_project_id'], user, role)
else:
self.conn.identity.assign_project_role_to_group(
filters['scope_project_id'], group, role)
elif 'scope.system' in filters:
if user is not None:
self.conn.identity.assign_system_role_to_user(
user, role, filters['scope.system'])
else:
self.conn.identity.assign_system_role_to_group(
group, role, filters['scope.system'])
self.conn.grant_role(role['id'], **kwargs)
self.exit_json(changed=True)
elif state == 'absent' and role_assignments:
if 'scope_domain_id' in filters:
if user is not None:
self.conn.identity.unassign_domain_role_from_user(
filters['scope_domain_id'], user, role)
else:
self.conn.identity.unassign_domain_role_from_group(
filters['scope_domain_id'], group, role)
elif 'scope_project_id' in filters:
if user is not None:
self.conn.identity.unassign_project_role_from_user(
filters['scope_project_id'], user, role)
else:
self.conn.identity.unassign_project_role_from_group(
filters['scope_project_id'], group, role)
elif 'scope.system' in filters:
if user is not None:
self.conn.identity.unassign_system_role_from_user(
user, role, filters['scope.system'])
else:
self.conn.identity.unassign_system_role_from_group(
group, role, filters['scope.system'])
self.conn.revoke_role(role['id'], **kwargs)
self.exit_json(changed=True)
else:
self.exit_json(changed=False)

View File

@@ -372,10 +372,6 @@ class RouterModule(OpenStackModule):
for p in external_fixed_ips:
if 'ip_address' in p:
req_fip_map[p['subnet_id']].add(p['ip_address'])
elif p['subnet_id'] in cur_fip_map:
# handle idempotence of updating with no explicit ip
req_fip_map[p['subnet_id']].update(
cur_fip_map[p['subnet_id']])
# Check if external ip addresses need to be added
for fip in external_fixed_ips:
@@ -468,7 +464,7 @@ class RouterModule(OpenStackModule):
subnet = self.conn.network.find_subnet(
iface['subnet_id'], ignore_missing=False, **filters)
fip = dict(subnet_id=subnet.id)
if iface.get('ip_address', None) is not None:
if 'ip_address' in iface:
fip['ip_address'] = iface['ip_address']
external_fixed_ips.append(fip)
@@ -620,13 +616,9 @@ class RouterModule(OpenStackModule):
router = self.conn.network.find_router(name, **query_filters)
network = None
if network_name_or_id:
# First try to find a network in the specified project.
network = self.conn.network.find_network(network_name_or_id,
ignore_missing=False,
**query_filters)
if not network:
# Fall back to a global search for the network.
network = self.conn.network.find_network(network_name_or_id,
ignore_missing=False)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.

View File

@@ -195,24 +195,16 @@ options:
added.
- On server creation, if I(security_groups) is omitted, the API creates
the server in the default security group.
- On server creation, requested security groups are not applied to
pre-existing ports.
- On update, if I(security_groups) is set, the security groups are
applied to all attached ports.
- Requested security groups are not applied to pre-existing ports.
type: list
elements: str
default: []
state:
description:
- Should the resource be C(present) or C(absent).
choices: [present, absent]
default: present
type: str
tags:
description:
- A list of tags should be added to instance
type: list
elements: str
default: []
terminate_volume:
description:
- If C(true), delete volume when deleting the instance and if it has
@@ -764,7 +756,6 @@ server:
description: A list of associated tags.
returned: success
type: list
elements: str
task_state:
description: The task state of this server.
returned: success
@@ -832,9 +823,8 @@ class ServerModule(OpenStackModule):
nics=dict(default=[], type='list', elements='raw'),
reuse_ips=dict(default=True, type='bool'),
scheduler_hints=dict(type='dict'),
security_groups=dict(type='list', elements='str'),
security_groups=dict(default=[], type='list', elements='str'),
state=dict(default='present', choices=['absent', 'present']),
tags=dict(type='list', default=[], elements='str'),
terminate_volume=dict(default=False, type='bool'),
userdata=dict(),
volume_size=dict(type='int'),
@@ -900,8 +890,7 @@ class ServerModule(OpenStackModule):
return {
**self._build_update_ips(server),
**self._build_update_security_groups(server),
**self._build_update_server(server),
**self._build_update_tags(server)}
**self._build_update_server(server)}
def _build_update_ips(self, server):
auto_ip = self.params['auto_ip']
@@ -954,9 +943,6 @@ class ServerModule(OpenStackModule):
def _build_update_security_groups(self, server):
update = {}
if self.params['security_groups'] is None:
return update
required_security_groups = dict(
(sg['id'], sg) for sg in [
self.conn.network.find_security_group(
@@ -1044,16 +1030,9 @@ class ServerModule(OpenStackModule):
return update
def _build_update_tags(self, server):
required_tags = self.params.get('tags')
if set(server["tags"]) == set(required_tags):
return {}
update = dict(tags=required_tags)
return update
def _create(self):
for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
if self.params[k] \
if self.params[k] is not None \
and self.params['wait'] is False:
# floating ip addresses will only be added if
# we wait until the server has been created
@@ -1093,7 +1072,7 @@ class ServerModule(OpenStackModule):
for k in ['auto_ip', 'availability_zone', 'boot_from_volume',
'boot_volume', 'config_drive', 'description', 'key_name',
'name', 'network', 'reuse_ips', 'scheduler_hints',
'security_groups', 'tags', 'terminate_volume', 'timeout',
'security_groups', 'terminate_volume', 'timeout',
'userdata', 'volume_size', 'volumes', 'wait']:
if self.params[k] is not None:
args[k] = self.params[k]
@@ -1112,20 +1091,10 @@ class ServerModule(OpenStackModule):
server.id,
**dict((k, self.params[k])
for k in ['wait', 'timeout', 'delete_ips']))
# Nova returns server for some time with the "DELETED" state. Our tests
# are not able to handle this, so wait for server to really disappear.
if self.params['wait']:
for count in self.sdk.utils.iterate_timeout(
timeout=self.params['timeout'],
message="Timeout waiting for server to be absent"
):
if self.conn.compute.find_server(server.id) is None:
break
def _update(self, server, update):
server = self._update_ips(server, update)
server = self._update_security_groups(server, update)
server = self._update_tags(server, update)
server = self._update_server(server, update)
# Refresh server attributes after security groups etc. have changed
#
@@ -1198,16 +1167,6 @@ class ServerModule(OpenStackModule):
# be postponed until all updates have been applied.
return server
def _update_tags(self, server, update):
tags = update.get('tags')
self.conn.compute.put(
"/servers/{server_id}/tags".format(server_id=server['id']),
json={"tags": tags},
microversion="2.26"
)
return server
def _parse_metadata(self, metadata):
if not metadata:
return {}

View File

@@ -136,9 +136,6 @@ class ServerActionModule(OpenStackModule):
# rebuild does not depend on state
will_change = (
(action == 'rebuild')
# `reboot_*` actions do not change state, servers remain `ACTIVE`
or (action == 'reboot_hard')
or (action == 'reboot_soft')
or (action == 'lock' and not server['is_locked'])
or (action == 'unlock' and server['is_locked'])
or server.status.lower() not in [a.lower()

View File

@@ -377,9 +377,7 @@ class ServerInfoModule(OpenStackModule):
kwargs['name_or_id'] = self.params['name']
self.exit(changed=False,
servers=[server.to_dict(computed=False)
if hasattr(server, "to_dict") else server
for server in
servers=[server.to_dict(computed=False) for server in
self.conn.search_servers(**kwargs)])

View File

@@ -1,520 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2025 VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: share_type
short_description: Manage OpenStack share type
author: OpenStack Ansible SIG
description:
- Add, remove or update share types in OpenStack Manila.
options:
name:
description:
- Share type name or id.
- For private share types, the UUID must be used instead of name.
required: true
type: str
description:
description:
- Description of the share type.
type: str
extra_specs:
description:
- Dictionary of share type extra specifications
type: dict
is_public:
description:
- Make share type accessible to the public.
- Can be updated after creation using Manila API direct updates.
type: bool
default: true
driver_handles_share_servers:
description:
- Boolean flag indicating whether share servers are managed by the driver.
- Required for share type creation.
- This is automatically added to extra_specs as 'driver_handles_share_servers'.
type: bool
default: true
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
default: present
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
"""
EXAMPLES = r"""
- name: Delete share type by name
openstack.cloud.share_type:
name: test_share_type
state: absent
- name: Delete share type by id
openstack.cloud.share_type:
name: fbadfa6b-5f17-4c26-948e-73b94de57b42
state: absent
- name: Create share type
openstack.cloud.share_type:
name: manila-generic-share
state: present
driver_handles_share_servers: true
extra_specs:
share_backend_name: GENERIC_BACKEND
snapshot_support: true
create_share_from_snapshot_support: true
description: Generic share type
is_public: true
"""
RETURN = """
share_type:
description: Dictionary describing share type
returned: On success when I(state) is 'present'
type: dict
contains:
name:
description: share type name
returned: success
type: str
sample: manila-generic-share
extra_specs:
description: share type extra specifications
returned: success
type: dict
sample: {"share_backend_name": "GENERIC_BACKEND", "snapshot_support": "true"}
is_public:
description: whether the share type is public
returned: success
type: bool
sample: True
description:
description: share type description
returned: success
type: str
sample: Generic share type
driver_handles_share_servers:
description: whether driver handles share servers
returned: success
type: bool
sample: true
id:
description: share type uuid
returned: success
type: str
sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
"""
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule,
)
# Manila API microversion 2.50 provides complete share type information
# including is_default field and description
# Reference: https://docs.openstack.org/api-ref/shared-file-system/#show-share-type-detail
MANILA_MICROVERSION = "2.50"
class ShareTypeModule(OpenStackModule):
argument_spec = dict(
name=dict(type="str", required=True),
description=dict(type="str", required=False),
extra_specs=dict(type="dict", required=False),
is_public=dict(type="bool", default=True),
driver_handles_share_servers=dict(type="bool", default=True),
state=dict(type="str", default="present", choices=["absent", "present"]),
)
module_kwargs = dict(
required_if=[("state", "present", ["driver_handles_share_servers"])],
supports_check_mode=True,
)
@staticmethod
def _extract_result(details):
if details is not None:
if hasattr(details, "to_dict"):
result = details.to_dict(computed=False)
elif isinstance(details, dict):
result = details.copy()
else:
result = dict(details) if details else {}
# Normalize is_public field from API response
if result and "os-share-type-access:is_public" in result:
result["is_public"] = result["os-share-type-access:is_public"]
elif result and "share_type_access:is_public" in result:
result["is_public"] = result["share_type_access:is_public"]
return result
return {}
def _find_share_type(self, name_or_id):
"""
Find share type by name or ID with comprehensive information.
Uses direct Manila API calls since SDK methods are not available.
Handles both public and private share types.
"""
# Try direct access first for complete information
share_type = self._find_by_direct_access(name_or_id)
if share_type:
return share_type
# If direct access fails, try searching in public listing
# This handles cases where we have the name but need to find the ID
try:
response = self.conn.shared_file_system.get("/types")
share_types = response.json().get("share_types", [])
for share_type in share_types:
if share_type["name"] == name_or_id or share_type["id"] == name_or_id:
# Found by name, now get complete info using the ID
result = self._find_by_direct_access(share_type["id"])
if result:
return result
except Exception:
pass
return None
def _find_by_direct_access(self, name_or_id):
"""
Find share type by direct access using Manila API.
Uses microversion to get complete information including description and is_default.
Falls back to basic API if microversion is not supported.
"""
# Try with microversion first for complete information
try:
response = self.conn.shared_file_system.get(
f"/types/{name_or_id}", microversion=MANILA_MICROVERSION
)
share_type_data = response.json().get("share_type", {})
if share_type_data:
return share_type_data
except Exception:
pass
# Fallback: try without microversion for basic information
try:
response = self.conn.shared_file_system.get(f"/types/{name_or_id}")
share_type_data = response.json().get("share_type", {})
if share_type_data:
return share_type_data
except Exception:
pass
return None
def run(self):
state = self.params["state"]
name_or_id = self.params["name"]
# Find existing share type (similar to volume_type.py pattern)
share_type = self._find_share_type(name_or_id)
if self.ansible.check_mode:
self.exit_json(changed=self._will_change(state, share_type))
if state == "present" and not share_type:
# Create type
create_result = self._create()
share_type = self._extract_result(create_result)
self.exit_json(changed=True, share_type=share_type)
elif state == "present" and share_type:
# Update type
update = self._build_update(share_type)
update_result = self._update(share_type, update)
share_type = self._extract_result(update_result)
self.exit_json(changed=bool(update), share_type=share_type)
elif state == "absent" and share_type:
# Delete type
self._delete(share_type)
self.exit_json(changed=True)
else:
# state == 'absent' and not share_type
self.exit_json(changed=False)
def _build_update(self, share_type):
return {
**self._build_update_extra_specs(share_type),
**self._build_update_share_type(share_type),
}
def _build_update_extra_specs(self, share_type):
update = {}
old_extra_specs = share_type.get("extra_specs", {})
# Build the complete new extra specs including driver_handles_share_servers
new_extra_specs = {}
# Add driver_handles_share_servers (always required)
if self.params.get("driver_handles_share_servers") is not None:
new_extra_specs["driver_handles_share_servers"] = str(
self.params["driver_handles_share_servers"]
).title()
# Add user-defined extra specs
if self.params.get("extra_specs"):
new_extra_specs.update(
{k: str(v) for k, v in self.params["extra_specs"].items()}
)
delete_extra_specs_keys = set(old_extra_specs.keys()) - set(
new_extra_specs.keys()
)
if delete_extra_specs_keys:
update["delete_extra_specs_keys"] = delete_extra_specs_keys
if old_extra_specs != new_extra_specs:
update["create_extra_specs"] = new_extra_specs
return update
def _build_update_share_type(self, share_type):
update = {}
# Only allow description updates - name is used for identification
allowed_attributes = ["description"]
# Handle is_public updates - CLI supports this, so we should too
# Always check is_public since it has a default value of True
current_is_public = share_type.get(
"os-share-type-access:is_public",
share_type.get("share_type_access:is_public"),
)
requested_is_public = self.params["is_public"] # Will be True by default now
if current_is_public != requested_is_public:
# Mark this as needing a special access update
update["update_access"] = {
"is_public": requested_is_public,
"share_type_id": share_type.get("id"),
}
type_attributes = {
k: self.params[k]
for k in allowed_attributes
if k in self.params
and self.params.get(k) is not None
and self.params.get(k) != share_type.get(k)
}
if type_attributes:
update["type_attributes"] = type_attributes
return update
def _create(self):
share_type_attrs = {"name": self.params["name"]}
if self.params.get("description") is not None:
share_type_attrs["description"] = self.params["description"]
# Handle driver_handles_share_servers - this is the key required parameter
extra_specs = {}
if self.params.get("driver_handles_share_servers") is not None:
extra_specs["driver_handles_share_servers"] = str(
self.params["driver_handles_share_servers"]
).title()
# Add user-defined extra specs
if self.params.get("extra_specs"):
extra_specs.update(
{k: str(v) for k, v in self.params["extra_specs"].items()}
)
if extra_specs:
share_type_attrs["extra_specs"] = extra_specs
# Handle is_public parameter - field name depends on API version
if self.params.get("is_public") is not None:
# For microversion (API 2.7+), use share_type_access:is_public
# For older versions, use os-share-type-access:is_public
share_type_attrs["share_type_access:is_public"] = self.params["is_public"]
# Also include legacy field for compatibility
share_type_attrs["os-share-type-access:is_public"] = self.params[
"is_public"
]
try:
payload = {"share_type": share_type_attrs}
# Try with microversion first (supports share_type_access:is_public)
try:
response = self.conn.shared_file_system.post(
"/types", json=payload, microversion=MANILA_MICROVERSION
)
share_type_data = response.json().get("share_type", {})
except Exception:
# Fallback: try without microversion (uses os-share-type-access:is_public)
# Remove the newer field name for older API compatibility
if "share_type_access:is_public" in share_type_attrs:
del share_type_attrs["share_type_access:is_public"]
payload = {"share_type": share_type_attrs}
response = self.conn.shared_file_system.post("/types", json=payload)
share_type_data = response.json().get("share_type", {})
return share_type_data
except Exception as e:
self.fail_json(msg=f"Failed to create share type: {str(e)}")
def _delete(self, share_type):
# Use direct API call since SDK method may not exist
try:
share_type_id = (
share_type.get("id") if isinstance(share_type, dict) else share_type.id
)
# Try with microversion first, fallback if not supported
try:
self.conn.shared_file_system.delete(
f"/types/{share_type_id}", microversion=MANILA_MICROVERSION
)
except Exception:
self.conn.shared_file_system.delete(f"/types/{share_type_id}")
except Exception as e:
self.fail_json(msg=f"Failed to delete share type: {str(e)}")
def _update(self, share_type, update):
if not update:
return share_type
share_type = self._update_share_type(share_type, update)
share_type = self._update_extra_specs(share_type, update)
share_type = self._update_access(share_type, update)
return share_type
def _update_extra_specs(self, share_type, update):
share_type_id = (
share_type.get("id") if isinstance(share_type, dict) else share_type.id
)
delete_extra_specs_keys = update.get("delete_extra_specs_keys")
if delete_extra_specs_keys:
for key in delete_extra_specs_keys:
try:
# Try with microversion first, fallback if not supported
try:
self.conn.shared_file_system.delete(
f"/types/{share_type_id}/extra_specs/{key}",
microversion=MANILA_MICROVERSION,
)
except Exception:
self.conn.shared_file_system.delete(
f"/types/{share_type_id}/extra_specs/{key}"
)
except Exception as e:
self.fail_json(msg=f"Failed to delete extra spec '{key}': {str(e)}")
# refresh share_type information
share_type = self._find_share_type(share_type_id)
create_extra_specs = update.get("create_extra_specs")
if create_extra_specs:
# Convert values to strings as Manila API expects string values
string_specs = {k: str(v) for k, v in create_extra_specs.items()}
try:
# Try with microversion first, fallback if not supported
try:
self.conn.shared_file_system.post(
f"/types/{share_type_id}/extra_specs",
json={"extra_specs": string_specs},
microversion=MANILA_MICROVERSION,
)
except Exception:
self.conn.shared_file_system.post(
f"/types/{share_type_id}/extra_specs",
json={"extra_specs": string_specs},
)
except Exception as e:
self.fail_json(msg=f"Failed to update extra specs: {str(e)}")
# refresh share_type information
share_type = self._find_share_type(share_type_id)
return share_type
def _update_access(self, share_type, update):
"""Update share type access (public/private) using direct API update"""
access_update = update.get("update_access")
if not access_update:
return share_type
share_type_id = access_update["share_type_id"]
is_public = access_update["is_public"]
try:
# Use direct update with share_type_access:is_public (works for both public and private)
update_payload = {"share_type": {"share_type_access:is_public": is_public}}
try:
self.conn.shared_file_system.put(
f"/types/{share_type_id}",
json=update_payload,
microversion=MANILA_MICROVERSION,
)
except Exception:
# Fallback: try with legacy field name for older API versions
update_payload = {
"share_type": {"os-share-type-access:is_public": is_public}
}
self.conn.shared_file_system.put(
f"/types/{share_type_id}", json=update_payload
)
# Refresh share type information after access change
share_type = self._find_share_type(share_type_id)
except Exception as e:
self.fail_json(msg=f"Failed to update share type access: {str(e)}")
return share_type
def _update_share_type(self, share_type, update):
type_attributes = update.get("type_attributes")
if type_attributes:
share_type_id = (
share_type.get("id") if isinstance(share_type, dict) else share_type.id
)
try:
# Try with microversion first, fallback if not supported
try:
response = self.conn.shared_file_system.put(
f"/types/{share_type_id}",
json={"share_type": type_attributes},
microversion=MANILA_MICROVERSION,
)
except Exception:
response = self.conn.shared_file_system.put(
f"/types/{share_type_id}", json={"share_type": type_attributes}
)
updated_type = response.json().get("share_type", {})
return updated_type
except Exception as e:
self.fail_json(msg=f"Failed to update share type: {str(e)}")
return share_type
def _will_change(self, state, share_type):
if state == "present" and not share_type:
return True
if state == "present" and share_type:
return bool(self._build_update(share_type))
if state == "absent" and share_type:
return True
return False
def main():
module = ShareTypeModule()
module()
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More