mirror of
https://opendev.org/openstack/ansible-collections-openstack.git
synced 2026-03-27 14:03:03 +00:00
Compare commits
69 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ccec4d07b3 | ||
|
|
04b70b99da | ||
|
|
ed4c4036af | ||
|
|
e2ebc1c8d0 | ||
|
|
99eb60f7dc | ||
|
|
e90fd7a915 | ||
|
|
1bc4f648fb | ||
|
|
1a654a9c38 | ||
|
|
67b7ec5e58 | ||
|
|
70128d6230 | ||
|
|
1dc367b566 | ||
|
|
a178493281 | ||
|
|
b2aac80b41 | ||
|
|
ae6e48be00 | ||
|
|
e06a61f97a | ||
|
|
3c0a9f2d94 | ||
|
|
b1ecd54a5d | ||
|
|
4a3460e727 | ||
|
|
c9887b3a23 | ||
|
|
e84ebb0773 | ||
|
|
eef8368e6f | ||
|
|
f584c54dfd | ||
|
|
3901000119 | ||
|
|
556208fc3c | ||
|
|
3ac95541da | ||
|
|
59b5b33557 | ||
|
|
3d7948a4e4 | ||
|
|
6cb5ed4b84 | ||
|
|
283c342b10 | ||
|
|
2a5fb584e2 | ||
|
|
762fee2bad | ||
|
|
9d92897522 | ||
|
|
df0b57a3c6 | ||
|
|
5a6f5084dd | ||
|
|
c988b3bcbf | ||
|
|
437438e33c | ||
|
|
3fd79d342c | ||
|
|
98bb212ae4 | ||
|
|
08d8cd8c25 | ||
|
|
41cf92df99 | ||
|
|
5494d153b1 | ||
|
|
fef560eb5b | ||
|
|
1ce7dd8b5f | ||
|
|
438bbea34b | ||
|
|
239c45c78f | ||
|
|
e065818024 | ||
|
|
0764e671a9 | ||
|
|
5f4db3583e | ||
|
|
6262474c94 | ||
|
|
f9fcd35018 | ||
|
|
5dbf47cb49 | ||
|
|
57c63e7918 | ||
|
|
ed5829d462 | ||
|
|
b025e7c356 | ||
|
|
782340833e | ||
|
|
73aab9e80c | ||
|
|
ae7e8260a3 | ||
|
|
030df96dc0 | ||
|
|
c5d0d3ec82 | ||
|
|
0cff7eb3a2 | ||
|
|
29e3f3dac8 | ||
|
|
d18ea87091 | ||
|
|
529c1e8dcc | ||
|
|
99b7af529c | ||
|
|
2ed3ffe1d0 | ||
|
|
ae5dbf0fc0 | ||
|
|
4074db1bd0 | ||
|
|
3248ba9960 | ||
|
|
2d5ca42629 |
106
.zuul.yaml
106
.zuul.yaml
@@ -48,6 +48,7 @@
|
||||
designate: true
|
||||
neutron-dns: true
|
||||
neutron-trunk: true
|
||||
neutron-segments: true
|
||||
zuul_copy_output:
|
||||
'{{ devstack_log_dir }}/test_output.log': 'logs'
|
||||
extensions_to_txt:
|
||||
@@ -95,6 +96,39 @@
|
||||
c-bak: false
|
||||
tox_extra_args: -vv --skip-missing-interpreters=false -- coe_cluster coe_cluster_template
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-manila-base
|
||||
parent: ansible-collections-openstack-functional-devstack-base
|
||||
# Do not restrict branches in base jobs because else Zuul would not find a matching
|
||||
# parent job variant during job freeze when child jobs are on other branches.
|
||||
description: |
|
||||
Run openstack collections functional tests against a devstack with Manila plugin enabled
|
||||
# Do not set job.override-checkout or job.required-projects.override-checkout in base job because
|
||||
# else Zuul will use this branch when matching variants for parent jobs during job freeze
|
||||
required-projects:
|
||||
- openstack/manila
|
||||
- openstack/python-manilaclient
|
||||
files:
|
||||
- ^ci/roles/share_type/.*$
|
||||
- ^plugins/modules/share_type.py
|
||||
- ^plugins/modules/share_type_info.py
|
||||
timeout: 10800
|
||||
vars:
|
||||
devstack_localrc:
|
||||
MANILA_ENABLED_BACKENDS: generic
|
||||
MANILA_OPTGROUP_generic_driver_handles_share_servers: true
|
||||
MANILA_OPTGROUP_generic_connect_share_server_to_tenant_network: true
|
||||
MANILA_USE_SERVICE_INSTANCE_PASSWORD: true
|
||||
devstack_plugins:
|
||||
manila: https://opendev.org/openstack/manila
|
||||
devstack_services:
|
||||
manila: true
|
||||
m-api: true
|
||||
m-sch: true
|
||||
m-shr: true
|
||||
m-dat: true
|
||||
tox_extra_args: -vv --skip-missing-interpreters=false -- share_type share_type_info
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-magnum
|
||||
parent: ansible-collections-openstack-functional-devstack-magnum-base
|
||||
@@ -104,6 +138,15 @@
|
||||
with Magnum plugin enabled, using master of openstacksdk and latest
|
||||
ansible release. Run it only on coe_cluster{,_template} changes.
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-manila
|
||||
parent: ansible-collections-openstack-functional-devstack-manila-base
|
||||
branches: master
|
||||
description: |
|
||||
Run openstack collections functional tests against a master devstack
|
||||
with Manila plugin enabled, using master of openstacksdk and latest
|
||||
ansible release. Run it only on share_type{,_info} changes.
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-octavia-base
|
||||
parent: ansible-collections-openstack-functional-devstack-base
|
||||
@@ -164,17 +207,17 @@
|
||||
tox_install_siblings: false
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||
name: ansible-collections-openstack-functional-devstack-ansible-2.18
|
||||
parent: ansible-collections-openstack-functional-devstack-base
|
||||
branches: master
|
||||
description: |
|
||||
Run openstack collections functional tests against a master devstack
|
||||
using master of openstacksdk and stable 2.12 branch of ansible
|
||||
using master of openstacksdk and stable 2.16 branch of ansible
|
||||
required-projects:
|
||||
- name: github.com/ansible/ansible
|
||||
override-checkout: stable-2.12
|
||||
override-checkout: stable-2.18
|
||||
vars:
|
||||
tox_envlist: ansible_2_12
|
||||
tox_envlist: ansible_2_18
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-functional-devstack-ansible-devel
|
||||
@@ -218,24 +261,22 @@
|
||||
bindep_profile: test py310
|
||||
|
||||
- job:
|
||||
name: openstack-tox-linters-ansible-2.12
|
||||
name: openstack-tox-linters-ansible-2.18
|
||||
parent: openstack-tox-linters-ansible
|
||||
nodeset: ubuntu-focal
|
||||
description: |
|
||||
Run openstack collections linter tests using the 2.12 branch of ansible
|
||||
Run openstack collections linter tests using the 2.18 branch of ansible
|
||||
required-projects:
|
||||
- name: github.com/ansible/ansible
|
||||
override-checkout: stable-2.12
|
||||
override-checkout: stable-2.18
|
||||
vars:
|
||||
ensure_tox_version: '<4'
|
||||
tox_envlist: linters_2_12
|
||||
python_version: 3.8
|
||||
bindep_profile: test py38
|
||||
tox_envlist: linters_2_18
|
||||
python_version: "3.12"
|
||||
bindep_profile: test py312
|
||||
|
||||
# Cross-checks with other projects
|
||||
- job:
|
||||
name: bifrost-collections-src
|
||||
parent: bifrost-integration-tinyipa-ubuntu-jammy
|
||||
parent: bifrost-integration-on-ubuntu-jammy
|
||||
required-projects:
|
||||
- openstack/ansible-collections-openstack
|
||||
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
||||
@@ -246,7 +287,7 @@
|
||||
override-checkout: master
|
||||
- job:
|
||||
name: bifrost-keystone-collections-src
|
||||
parent: bifrost-integration-tinyipa-keystone-ubuntu-jammy
|
||||
parent: bifrost-integration-keystone-on-ubuntu-jammy
|
||||
required-projects:
|
||||
- openstack/ansible-collections-openstack
|
||||
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
|
||||
@@ -258,7 +299,7 @@
|
||||
|
||||
- job:
|
||||
name: ansible-collections-openstack-release
|
||||
parent: base
|
||||
parent: openstack-tox-linters-ansible
|
||||
run: ci/publish/publish_collection.yml
|
||||
secrets:
|
||||
- ansible_galaxy_info
|
||||
@@ -268,28 +309,29 @@
|
||||
data:
|
||||
url: https://galaxy.ansible.com
|
||||
token: !encrypted/pkcs1-oaep
|
||||
- lZFzfoCbuwqV1k6qRfl/VS7E+knUW7+zpg7BptrenK4n0g7UY0HtdVkYq0pV0Tj/LbhzG
|
||||
jHD0mehcV1iS6B7ORKg4criJkdDfEx09BD8z8yv0EleiIMmhlrCoMcY593OZMBtVbGi0D
|
||||
CwQtNO98QIsfZogChfLfvRNiBmUV98mEb/p6p3EtGx8J7qcAsqfWxc/CzB8GCleLAHHHT
|
||||
FuikMM03ZnV0ew7E+TPkHbzzPhBZOqS5HYF0HtgttHwIXdfIWp/XdTuEEk7uRRgYZ2Iao
|
||||
ifWRzoKaOQmhM++e1ydCqw9D4y9dZEFNMQLwSqcrvtb8cNwT1kl7SCFqYNE2lbutj4ne6
|
||||
PTBQRsKegMB4Y3ena14fNF6tCynvJLPhF/cjPH2Jhs+B19XQhWkL3TgiOY02W24YHwRcP
|
||||
+LdkM8inAvyVi3DEbEqdjBPO9OFJcBOKPlCdkGvuwdNCuEpEwctWs0gV3voflG2CDKzmJ
|
||||
wu9JJOAWnq/0l1WpuDqWreKeQ/BUGZC2Gb4xRAqofulgvhs4WuYoEccjH4EJFIZ90S1EP
|
||||
R/ZLadqZaEhmjwGM5sMWbBbjT23XsRgg0Tzt9m8DENYMuYDqkMdRbt2jYZa+32p4hyxVe
|
||||
Y6H/pqYq5b9uOzumnShaK4WlmkQyXcNPkoSlMC1h4OGvqX/WUixpI38jyMA5Tc=
|
||||
- K93hOZo1B5z248H04COB1N2HCkGbFPo2EUr+0W7qFzsrdvmbsAI86Hl9bUCfEENGrwvfV
|
||||
0j9CE5iO0tyqal3r6ucMhGT44MgQWL3MBeRvK89yAJpSNMU7R7rEY/zbjZMoC9YElcHEv
|
||||
GEDZSA/0gQHCHpZVDlx4JMGwrnd+Nz9ha3c12BYeZS8rS/dQl7EmZ867OsozmNdG9UkkC
|
||||
0vP/dkenUQNvoZOSWgZztRBlbAyI1nc5iEEw9vvpLh19HcY9+S2iAZkgSq4jOOO4wn7gE
|
||||
XAZPr0HRdwS2m4Hw0Pusrg7SdC3+2O0N/fvFGnvvKXHcSgQk3rPLn6HfKzOJoPWc4WlDX
|
||||
MA79jYloNBXjOaeXOoiwYzzshWK53F6Ci+3leq1cYuFyHSi2ds2mYXat7YndZSsmsk5um
|
||||
hj0+Ddy9Om1uYy3nhHyZLULE7UDUmduA9EPkvdyWlcW0yZL2kXcrDTHlSp4PaJg9iKVys
|
||||
0aOOo9CNMwhyXAOGiFCYF/m7Efbnp50zUQhHN9+7LeVzXZuiH98C8kNvWfE0qrkrrgQ1n
|
||||
78UMqGcGpdw4ZSlWrDTbrbd4v0bRnsJ+IAWISnT5OXaeJgGZwXRuBHtTXqbjoosBeX/8w
|
||||
YKb0lx7E5ZtSw7+Y6LNDGihGTmVg1nkZUWo85CxyF/RiWHuNvpkzzqXmdGS1bg=
|
||||
|
||||
- project:
|
||||
check:
|
||||
jobs:
|
||||
- tox-pep8
|
||||
- openstack-tox-linters-ansible-devel
|
||||
- openstack-tox-linters-ansible-2.12
|
||||
- openstack-tox-linters-ansible-2.18
|
||||
- ansible-collections-openstack-functional-devstack
|
||||
- ansible-collections-openstack-functional-devstack-releases
|
||||
- ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||
- ansible-collections-openstack-functional-devstack-ansible-2.18
|
||||
- ansible-collections-openstack-functional-devstack-ansible-devel
|
||||
- ansible-collections-openstack-functional-devstack-magnum
|
||||
- ansible-collections-openstack-functional-devstack-manila
|
||||
- ansible-collections-openstack-functional-devstack-octavia
|
||||
|
||||
- bifrost-collections-src:
|
||||
@@ -302,24 +344,24 @@
|
||||
gate:
|
||||
jobs:
|
||||
- tox-pep8
|
||||
- openstack-tox-linters-ansible-2.12
|
||||
# - ansible-collections-openstack-functional-devstack
|
||||
- openstack-tox-linters-ansible-2.18
|
||||
- ansible-collections-openstack-functional-devstack-releases
|
||||
# - ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||
- ansible-collections-openstack-functional-devstack-magnum
|
||||
- ansible-collections-openstack-functional-devstack-manila
|
||||
- ansible-collections-openstack-functional-devstack-octavia
|
||||
|
||||
periodic:
|
||||
jobs:
|
||||
- openstack-tox-linters-ansible-devel
|
||||
- openstack-tox-linters-ansible-2.12
|
||||
- openstack-tox-linters-ansible-2.18
|
||||
- ansible-collections-openstack-functional-devstack
|
||||
- ansible-collections-openstack-functional-devstack-releases
|
||||
- ansible-collections-openstack-functional-devstack-ansible-2.12
|
||||
- ansible-collections-openstack-functional-devstack-ansible-2.18
|
||||
- ansible-collections-openstack-functional-devstack-ansible-devel
|
||||
- bifrost-collections-src
|
||||
- bifrost-keystone-collections-src
|
||||
- ansible-collections-openstack-functional-devstack-magnum
|
||||
- ansible-collections-openstack-functional-devstack-manila
|
||||
- ansible-collections-openstack-functional-devstack-octavia
|
||||
|
||||
tag:
|
||||
|
||||
120
CHANGELOG.rst
120
CHANGELOG.rst
@@ -4,6 +4,126 @@ Ansible OpenStack Collection Release Notes
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
v2.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfixes and minor changes
|
||||
|
||||
Major Changes
|
||||
-------------
|
||||
|
||||
- Add import_method to module
|
||||
- Add object_containers_info module
|
||||
- Add support for filters in inventory
|
||||
- Add volume_manage module
|
||||
- Introduce share_type modules
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Allow role_assignment module to work cross domain
|
||||
- Don't compare current state for `reboot_*` actions
|
||||
- Fix disable_gateway_ip for subnet
|
||||
- Fix example in the dns_zone_info module doc
|
||||
- Fix router module external IPs when only subnet specified
|
||||
- Fix the bug reporting url
|
||||
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
|
||||
- Shows missing data in `stack_info` module output
|
||||
|
||||
v2.4.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfixes and minor changes
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Update tags when changing server
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix missed client_cert in OpenStackModule
|
||||
|
||||
v2.4.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
New trait module and minor changes
|
||||
|
||||
Major Changes
|
||||
-------------
|
||||
|
||||
- Add trait module
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add loadbalancer quota options
|
||||
- Allow create instance with tags
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- openstack.cloud.trait - Add or Delete a trait from OpenStack
|
||||
|
||||
v2.3.3
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfixes and minor changes
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add test to only_ipv4 in inventory
|
||||
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
|
||||
|
||||
v2.3.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfixes and minor changes
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Drop compat implementations for tests
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix openstack.cloud.port module failure in check mode
|
||||
|
||||
v2.3.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Client TLS certificate support
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add ability to pass client tls certificate
|
||||
|
||||
v2.3.0
|
||||
======
|
||||
|
||||
@@ -211,7 +211,7 @@ Thank you for your interest in our Ansible OpenStack collection ☺️
|
||||
There are many ways in which you can participate in the project, for example:
|
||||
|
||||
- [Report and verify bugs and help with solving issues](
|
||||
https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack).
|
||||
https://bugs.launchpad.net/ansible-collections-openstack).
|
||||
- [Submit and review patches](
|
||||
https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack).
|
||||
- Follow OpenStack's [How To Contribute](https://wiki.openstack.org/wiki/How_To_Contribute) guide.
|
||||
|
||||
@@ -572,3 +572,66 @@ releases:
|
||||
name: volume_service_info
|
||||
namespace: ''
|
||||
release_date: '2024-11-28'
|
||||
2.3.1:
|
||||
changes:
|
||||
minor_changes:
|
||||
- Add ability to pass client tls certificate
|
||||
release_summary: Client TLS certificate support
|
||||
release_date: '2024-12-18'
|
||||
2.3.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Fix openstack.cloud.port module failure in check mode
|
||||
minor_changes:
|
||||
- Drop compat implementations for tests
|
||||
release_summary: Bugfixes and minor changes
|
||||
release_date: '2024-12-20'
|
||||
2.3.3:
|
||||
changes:
|
||||
bugfixes:
|
||||
- CI - Fix deprecated ANSIBLE_COLLECTIONS_PATHS variable
|
||||
minor_changes:
|
||||
- Add test to only_ipv4 in inventory
|
||||
- add an option to use only IPv4 only for ansible_host and ansible_ssh_host
|
||||
release_summary: Bugfixes and minor changes
|
||||
release_date: '2024-12-22'
|
||||
2.4.0:
|
||||
changes:
|
||||
major_changes:
|
||||
- Add trait module
|
||||
minor_changes:
|
||||
- Add loadbalancer quota options
|
||||
- Allow create instance with tags
|
||||
release_summary: New trait module and minor changes
|
||||
modules:
|
||||
- description: Add or Delete a trait from OpenStack
|
||||
name: trait
|
||||
namespace: ''
|
||||
release_date: '2025-01-15'
|
||||
2.4.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Fix missed client_cert in OpenStackModule
|
||||
minor_changes:
|
||||
- Update tags when changing server
|
||||
release_summary: Bugfixes and minor changes
|
||||
release_date: '2024-01-20'
|
||||
2.5.0:
|
||||
changes:
|
||||
major_changes:
|
||||
- Add import_method to module
|
||||
- Add object_containers_info module
|
||||
- Add support for filters in inventory
|
||||
- Add volume_manage module
|
||||
- Introduce share_type modules
|
||||
minor_changes:
|
||||
- Allow role_assignment module to work cross domain
|
||||
- Don't compare current state for `reboot_*` actions
|
||||
- Fix disable_gateway_ip for subnet
|
||||
- Fix example in the dns_zone_info module doc
|
||||
- Fix router module external IPs when only subnet specified
|
||||
- Fix the bug reporting url
|
||||
- Let clouds_yaml_path behave as documented (Override path to clouds.yaml file)
|
||||
- Shows missing data in `stack_info` module output
|
||||
release_summary: Bugfixes and minor changes
|
||||
release_date: '2025-10-24'
|
||||
|
||||
5
changelogs/fragments/baremetal_port_group_module.yaml
Normal file
5
changelogs/fragments/baremetal_port_group_module.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
minor_changes:
|
||||
- Added the new ``openstack.cloud.baremetal_port_group`` module to manage
|
||||
Bare Metal port groups (create, update, and delete), including CI role
|
||||
coverage and unit tests.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
|
||||
bugfixes:
|
||||
- |
|
||||
coe_cluster_template - now labels are converted to strings by module that
|
||||
fixes module idempotence in case label values defined by users are
|
||||
integers or booleans.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
|
||||
minor_changes:
|
||||
- |
|
||||
openstack.cloud.image - Added new `inactive` option for the image `state`
|
||||
It will deactivate the image. Setting state `present` can re-activate it
|
||||
again for deactivated previously images.
|
||||
@@ -3,7 +3,8 @@
|
||||
vars:
|
||||
collection_path: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
|
||||
build_collection_path: /tmp/collection_built/
|
||||
ansible_galaxy_path: "~/.local/bin/ansible-galaxy"
|
||||
ansible_virtualenv_path: /tmp/ansible_venv
|
||||
ansible_galaxy_path: "{{ ansible_virtualenv_path }}/bin/ansible-galaxy"
|
||||
|
||||
tasks:
|
||||
|
||||
@@ -11,9 +12,15 @@
|
||||
include_role:
|
||||
name: ensure-pip
|
||||
|
||||
- name: Install ansible
|
||||
- name: Install Ansible in virtualenv
|
||||
pip:
|
||||
name: ansible-core<2.12
|
||||
name: ansible-core<2.19
|
||||
virtualenv: "{{ ansible_virtualenv_path }}"
|
||||
virtualenv_command: "{{ ensure_pip_virtualenv_command }}"
|
||||
|
||||
- name: Detect ansible version
|
||||
command: "{{ ansible_virtualenv_path }}/bin/ansible --version"
|
||||
register: ansible_version
|
||||
|
||||
- name: Discover tag version
|
||||
set_fact:
|
||||
|
||||
12
ci/roles/baremetal_port_group/defaults/main.yml
Normal file
12
ci/roles/baremetal_port_group/defaults/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
expected_fields:
|
||||
- address
|
||||
- created_at
|
||||
- extra
|
||||
- id
|
||||
- links
|
||||
- mode
|
||||
- name
|
||||
- node_id
|
||||
- properties
|
||||
- standalone_ports_supported
|
||||
- updated_at
|
||||
100
ci/roles/baremetal_port_group/tasks/main.yml
Normal file
100
ci/roles/baremetal_port_group/tasks/main.yml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
# TODO: Actually run this role in CI. Atm we do not have DevStack's ironic plugin enabled.
|
||||
- name: Create baremetal node
|
||||
openstack.cloud.baremetal_node:
|
||||
cloud: "{{ cloud }}"
|
||||
driver_info:
|
||||
ipmi_address: "1.2.3.4"
|
||||
ipmi_username: "admin"
|
||||
ipmi_password: "secret"
|
||||
name: ansible_baremetal_node
|
||||
nics:
|
||||
- mac: "aa:bb:cc:aa:bb:cc"
|
||||
state: present
|
||||
register: node
|
||||
|
||||
- name: Create baremetal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
name: ansible_baremetal_port_group
|
||||
node: ansible_baremetal_node
|
||||
address: fa:16:3e:aa:aa:ab
|
||||
mode: active-backup
|
||||
standalone_ports_supported: true
|
||||
extra:
|
||||
test: created
|
||||
properties:
|
||||
miimon: '100'
|
||||
register: port_group
|
||||
|
||||
- debug: var=port_group
|
||||
|
||||
- name: Assert return values of baremetal_port_group module
|
||||
assert:
|
||||
that:
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- expected_fields|difference(port_group.port_group.keys())|length == 0
|
||||
- port_group.port_group.name == "ansible_baremetal_port_group"
|
||||
- port_group.port_group.node_id == node.node.id
|
||||
|
||||
- name: Update baremetal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
id: "{{ port_group.port_group.id }}"
|
||||
mode: 802.3ad
|
||||
standalone_ports_supported: false
|
||||
extra:
|
||||
test: updated
|
||||
register: updated_port_group
|
||||
|
||||
- name: Assert return values of updated baremetal port group
|
||||
assert:
|
||||
that:
|
||||
- updated_port_group is changed
|
||||
- updated_port_group.port_group.id == port_group.port_group.id
|
||||
- updated_port_group.port_group.mode == "802.3ad"
|
||||
- not updated_port_group.port_group.standalone_ports_supported
|
||||
- updated_port_group.port_group.extra.test == "updated"
|
||||
|
||||
- name: Update baremetal port group again
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
id: "{{ port_group.port_group.id }}"
|
||||
mode: 802.3ad
|
||||
standalone_ports_supported: false
|
||||
extra:
|
||||
test: updated
|
||||
register: updated_port_group
|
||||
|
||||
- name: Assert idempotency for baremetal port group module
|
||||
assert:
|
||||
that:
|
||||
- updated_port_group is not changed
|
||||
- updated_port_group.port_group.id == port_group.port_group.id
|
||||
|
||||
- name: Delete baremetal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
id: "{{ port_group.port_group.id }}"
|
||||
|
||||
- name: Delete baremetal port group again
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
id: "{{ port_group.port_group.id }}"
|
||||
register: deleted_port_group
|
||||
|
||||
- name: Assert idempotency for deleted baremetal port group
|
||||
assert:
|
||||
that:
|
||||
- deleted_port_group is not changed
|
||||
|
||||
- name: Delete baremetal node
|
||||
openstack.cloud.baremetal_node:
|
||||
cloud: "{{ cloud }}"
|
||||
name: ansible_baremetal_node
|
||||
state: absent
|
||||
@@ -72,6 +72,8 @@
|
||||
image_id: '{{ image_id }}'
|
||||
is_floating_ip_enabled: true
|
||||
keypair_id: '{{ keypair.keypair.id }}'
|
||||
flavor_id: 'm1.small'
|
||||
master_flavor_id: 'm1.small'
|
||||
name: k8s
|
||||
state: present
|
||||
register: coe_cluster_template
|
||||
|
||||
@@ -241,7 +241,7 @@
|
||||
that:
|
||||
- server1_fips is success
|
||||
- server1_fips is not changed
|
||||
- server1_fips.floating_ips
|
||||
- server1_fips.floating_ips|length > 0
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- expected_fields|difference(server1_fips.floating_ips[0].keys())|length == 0
|
||||
|
||||
@@ -260,7 +260,7 @@
|
||||
- name: Assert return values of floating_ip module
|
||||
assert:
|
||||
that:
|
||||
- floating_ip.floating_ip
|
||||
- floating_ip.floating_ip|length > 0
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- expected_fields|difference(floating_ip.floating_ip.keys())|length == 0
|
||||
|
||||
@@ -312,7 +312,7 @@
|
||||
- name: Assert floating ip attached to server 2
|
||||
assert:
|
||||
that:
|
||||
- server2_fip.floating_ip
|
||||
- server2_fip.floating_ip|length > 0
|
||||
|
||||
- name: Find all floating ips for debugging
|
||||
openstack.cloud.floating_ip_info:
|
||||
|
||||
@@ -279,6 +279,11 @@
|
||||
ansible.builtin.set_fact:
|
||||
cache: "{{ cache.content | b64decode | from_yaml }}"
|
||||
|
||||
- name: Further process Ansible 2.19+ cache
|
||||
ansible.builtin.set_fact:
|
||||
cache: "{{ cache.__payload__ | from_yaml }}"
|
||||
when: cache.__payload__ is defined
|
||||
|
||||
- name: Check Ansible's cache
|
||||
assert:
|
||||
that:
|
||||
@@ -303,6 +308,25 @@
|
||||
that:
|
||||
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
|
||||
|
||||
- name: List servers with inventory plugin with IPv4 only
|
||||
ansible.builtin.command:
|
||||
cmd: ansible-inventory --list --yaml --extra-vars only_ipv4=true --inventory-file openstack.yaml
|
||||
chdir: "{{ tmp_dir.path }}"
|
||||
environment:
|
||||
ANSIBLE_INVENTORY_CACHE: "True"
|
||||
ANSIBLE_INVENTORY_CACHE_PLUGIN: "jsonfile"
|
||||
ANSIBLE_CACHE_PLUGIN_CONNECTION: "{{ tmp_dir.path }}/.cache/"
|
||||
register: inventory
|
||||
|
||||
- name: Read YAML output from inventory plugin again
|
||||
ansible.builtin.set_fact:
|
||||
inventory: "{{ inventory.stdout | from_yaml }}"
|
||||
|
||||
- name: Check YAML output from inventory plugin again
|
||||
assert:
|
||||
that:
|
||||
- inventory.all.children.RegionOne.hosts.keys() | sort == ['ansible_server1', 'ansible_server2'] | sort
|
||||
|
||||
- name: Delete server 2
|
||||
openstack.cloud.resource:
|
||||
service: compute
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
- name: Ensure public key is returned
|
||||
assert:
|
||||
that:
|
||||
- keypair.keypair.public_key is defined and keypair.keypair.public_key
|
||||
- keypair.keypair.public_key is defined and keypair.keypair.public_key|length > 0
|
||||
|
||||
- name: Create another keypair
|
||||
openstack.cloud.keypair:
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
- name: Check output of creating network
|
||||
assert:
|
||||
that:
|
||||
- infonet.network
|
||||
- infonet.network is defined
|
||||
- item in infonet.network
|
||||
loop: "{{ expected_fields }}"
|
||||
|
||||
|
||||
17
ci/roles/network_segment/defaults/main.yml
Normal file
17
ci/roles/network_segment/defaults/main.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
expected_fields:
|
||||
- description
|
||||
- id
|
||||
- name
|
||||
- network_id
|
||||
- network_type
|
||||
- physical_network
|
||||
- segmentation_id
|
||||
|
||||
network_name: segment_network
|
||||
segment_name: example_segment
|
||||
network_type: vlan
|
||||
segmentation_id: 999
|
||||
physical_network: public
|
||||
initial_description: "example segment description"
|
||||
updated_description: "updated segment description"
|
||||
72
ci/roles/network_segment/tasks/main.yml
Normal file
72
ci/roles/network_segment/tasks/main.yml
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
- name: Create network {{ network_name }}
|
||||
openstack.cloud.network:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ network_name }}"
|
||||
state: present
|
||||
|
||||
- name: Create segment {{ segment_name }}
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
description: "{{ initial_description }}"
|
||||
network: "{{ network_name }}"
|
||||
network_type: "{{ network_type }}"
|
||||
segmentation_id: "{{ segmentation_id }}"
|
||||
physical_network: "{{ physical_network }}"
|
||||
state: present
|
||||
register: segment
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: segment is changed
|
||||
|
||||
- name: Assert segment fields
|
||||
assert:
|
||||
that: item in segment.network_segment
|
||||
loop: "{{ expected_fields }}"
|
||||
|
||||
- name: Update segment {{ segment_name }} by name - no changes
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
description: "{{ initial_description }}"
|
||||
state: present
|
||||
register: segment
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: segment is not changed
|
||||
|
||||
- name: Update segment {{ segment_name }} by all fields - changes
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
description: "{{ updated_description }}"
|
||||
network: "{{ network_name }}"
|
||||
network_type: "{{ network_type }}"
|
||||
segmentation_id: "{{ segmentation_id }}"
|
||||
physical_network: "{{ physical_network }}"
|
||||
state: present
|
||||
register: segment
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: segment is changed
|
||||
|
||||
- name: Delete segment {{ segment_name }}
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
state: absent
|
||||
register: segment
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: segment is changed
|
||||
|
||||
- name: Delete network {{ network_name }}
|
||||
openstack.cloud.network:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ network_name }}"
|
||||
state: absent
|
||||
37
ci/roles/object_containers_info/defaults/main.yml
Normal file
37
ci/roles/object_containers_info/defaults/main.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
|
||||
test_container_unprefixed_name: ansible-test-container
|
||||
test_container_prefixed_prefix: ansible-prefixed-test-container
|
||||
test_container_prefixed_num: 2
|
||||
|
||||
test_object_data: "Hello, world!"
|
||||
|
||||
expected_fields_single:
|
||||
- bytes
|
||||
- bytes_used
|
||||
- content_type
|
||||
- count
|
||||
- history_location
|
||||
- id
|
||||
- if_none_match
|
||||
- is_content_type_detected
|
||||
- is_newest
|
||||
- meta_temp_url_key
|
||||
- meta_temp_url_key_2
|
||||
- name
|
||||
- object_count
|
||||
- read_ACL
|
||||
- storage_policy
|
||||
- sync_key
|
||||
- sync_to
|
||||
- timestamp
|
||||
- versions_location
|
||||
- write_ACL
|
||||
|
||||
expected_fields_multiple:
|
||||
- bytes
|
||||
- bytes_used
|
||||
- count
|
||||
- id
|
||||
- name
|
||||
- object_count
|
||||
124
ci/roles/object_containers_info/tasks/main.yml
Normal file
124
ci/roles/object_containers_info/tasks/main.yml
Normal file
@@ -0,0 +1,124 @@
|
||||
---
|
||||
|
||||
- name: Generate list of containers to create
|
||||
ansible.builtin.set_fact:
|
||||
all_test_containers: >-
|
||||
{{
|
||||
[test_container_unprefixed_name]
|
||||
+ (
|
||||
[test_container_prefixed_prefix + '-']
|
||||
| product(range(test_container_prefixed_num) | map('string'))
|
||||
| map('join', '')
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Run checks
|
||||
block:
|
||||
|
||||
- name: Create all containers
|
||||
openstack.cloud.object_container:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ item }}"
|
||||
read_ACL: ".r:*,.rlistings"
|
||||
loop: "{{ all_test_containers }}"
|
||||
|
||||
- name: Create an object in all containers
|
||||
openstack.cloud.object:
|
||||
cloud: "{{ cloud }}"
|
||||
container: "{{ item }}"
|
||||
name: hello.txt
|
||||
data: "{{ test_object_data }}"
|
||||
loop: "{{ all_test_containers }}"
|
||||
|
||||
- name: Fetch single containers by name
|
||||
openstack.cloud.object_containers_info:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ item }}"
|
||||
register: single_containers
|
||||
loop: "{{ all_test_containers }}"
|
||||
|
||||
- name: Check that all fields are returned for single containers
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (item.containers | length) == 1
|
||||
- item.containers[0].name == item.item
|
||||
- item.containers[0].bytes == (test_object_data | length)
|
||||
- item.containers[0].read_ACL == ".r:*,.rlistings"
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- (expected_fields_single | difference(item.containers[0].keys()) | length) == 0
|
||||
quiet: true
|
||||
loop: "{{ single_containers.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item }}"
|
||||
|
||||
- name: Fetch multiple containers by prefix
|
||||
openstack.cloud.object_containers_info:
|
||||
cloud: "{{ cloud }}"
|
||||
prefix: "{{ test_container_prefixed_prefix }}"
|
||||
register: multiple_containers
|
||||
|
||||
- name: Check that the correct number of prefixed containers were returned
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (multiple_containers.containers | length) == test_container_prefixed_num
|
||||
fail_msg: >-
|
||||
Incorrect number of containers found
|
||||
(found {{ multiple_containers.containers | length }},
|
||||
expected {{ test_container_prefixed_num }})
|
||||
quiet: true
|
||||
|
||||
- name: Check that all prefixed containers exist
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- >-
|
||||
(test_container_prefixed_prefix + '-' + (item | string))
|
||||
in (multiple_containers.containers | map(attribute='name'))
|
||||
fail_msg: "Container not found: {{ test_container_prefixed_prefix + '-' + (item | string) }}"
|
||||
quiet: true
|
||||
loop: "{{ range(test_container_prefixed_num) | list }}"
|
||||
loop_control:
|
||||
label: "{{ test_container_prefixed_prefix + '-' + (item | string) }}"
|
||||
|
||||
- name: Check that the expected fields are returned for all prefixed containers
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- item.name.startswith(test_container_prefixed_prefix)
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- (expected_fields_multiple | difference(item.keys()) | length) == 0
|
||||
quiet: true
|
||||
loop: "{{ multiple_containers.containers | sort(attribute='name') }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Fetch all containers
|
||||
openstack.cloud.object_containers_info:
|
||||
cloud: "{{ cloud }}"
|
||||
register: all_containers
|
||||
|
||||
- name: Check that all expected containers were returned
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- item in (all_containers.containers | map(attribute='name'))
|
||||
fail_msg: "Container not found: {{ item }}"
|
||||
quiet: true
|
||||
loop: "{{ all_test_containers }}"
|
||||
|
||||
- name: Check that the expected fields are returned for all containers
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
# allow new fields to be introduced but prevent fields from being removed
|
||||
- (expected_fields_multiple | difference(item.keys()) | length) == 0
|
||||
quiet: true
|
||||
loop: "{{ all_containers.containers | selectattr('name', 'in', all_test_containers) }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
always:
|
||||
|
||||
- name: Delete all containers
|
||||
openstack.cloud.object_container:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
delete_with_all_objects: true
|
||||
loop: "{{ all_test_containers }}"
|
||||
@@ -174,6 +174,38 @@
|
||||
that:
|
||||
- project.project.is_enabled == True
|
||||
|
||||
- name: Update project to add new extra_specs
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
name: ansible_project
|
||||
extra_specs:
|
||||
is_enabled: True
|
||||
another_tag: True
|
||||
register: project
|
||||
|
||||
- name: Assert return values of project module
|
||||
assert:
|
||||
that:
|
||||
- project.project.is_enabled == True
|
||||
- project.project.another_tag == True
|
||||
|
||||
- name: Update project to change existing extra_specs
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
name: ansible_project
|
||||
extra_specs:
|
||||
is_enabled: True
|
||||
another_tag: False
|
||||
register: project
|
||||
|
||||
- name: Assert return values of project module
|
||||
assert:
|
||||
that:
|
||||
- project.project.is_enabled == True
|
||||
- project.project.another_tag == False
|
||||
|
||||
- name: Delete project
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
|
||||
@@ -28,3 +28,9 @@ test_compute_quota:
|
||||
ram: 5
|
||||
server_group_members: 5
|
||||
server_groups: 5
|
||||
test_load_balancer_quota:
|
||||
load_balancers: 5
|
||||
health_monitors: 5
|
||||
listeners: 5
|
||||
pools: 5
|
||||
members: 5
|
||||
|
||||
158
ci/roles/quota/tasks/loadbalancer.yml
Normal file
158
ci/roles/quota/tasks/loadbalancer.yml
Normal file
@@ -0,0 +1,158 @@
|
||||
---
|
||||
- module_defaults:
|
||||
group/openstack.cloud.openstack:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ test_project }}"
|
||||
# Backward compatibility with Ansible 2.9
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ test_project }}"
|
||||
openstack.cloud.quota:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ test_project }}"
|
||||
block:
|
||||
- name: Create test project
|
||||
openstack.cloud.project:
|
||||
state: present
|
||||
|
||||
- name: Clear quotas before tests
|
||||
openstack.cloud.quota:
|
||||
state: absent
|
||||
register: default_quotas
|
||||
|
||||
- name: Set network quota
|
||||
openstack.cloud.quota: "{{ test_network_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: quotas is changed
|
||||
|
||||
- name: Assert field values
|
||||
assert:
|
||||
that: quotas.quotas.network[item.key] == item.value
|
||||
loop: "{{ test_network_quota | dict2items }}"
|
||||
|
||||
- name: Set network quota again
|
||||
openstack.cloud.quota: "{{ test_network_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: quotas is not changed
|
||||
|
||||
- name: Set volume quotas
|
||||
openstack.cloud.quota: "{{ test_volume_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: quotas is changed
|
||||
|
||||
- name: Assert field values
|
||||
assert:
|
||||
that: quotas.quotas.volume[item.key] == item.value
|
||||
loop: "{{ test_volume_quota | dict2items }}"
|
||||
|
||||
- name: Set volume quotas again
|
||||
openstack.cloud.quota: "{{ test_volume_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: quotas is not changed
|
||||
|
||||
- name: Set compute quotas
|
||||
openstack.cloud.quota: "{{ test_compute_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: quotas is changed
|
||||
|
||||
- name: Assert field values
|
||||
assert:
|
||||
that: quotas.quotas.compute[item.key] == item.value
|
||||
loop: "{{ test_compute_quota | dict2items }}"
|
||||
|
||||
- name: Set compute quotas again
|
||||
openstack.cloud.quota: "{{ test_compute_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Set load_balancer quotas
|
||||
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: quotas is changed
|
||||
|
||||
- name: Assert field values
|
||||
assert:
|
||||
that: quotas.quotas.load_balancer[item.key] == item.value
|
||||
loop: "{{ test_load_balancer_quota | dict2items }}"
|
||||
|
||||
- name: Set load_balancer quotas again
|
||||
openstack.cloud.quota: "{{ test_load_balancer_quota }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: quotas is not changed
|
||||
|
||||
- name: Unset all quotas
|
||||
openstack.cloud.quota:
|
||||
state: absent
|
||||
register: quotas
|
||||
|
||||
- name: Assert defaults restore
|
||||
assert:
|
||||
that: quotas.quotas == default_quotas.quotas
|
||||
|
||||
- name: Set all quotas at once
|
||||
openstack.cloud.quota:
|
||||
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: quotas is changed
|
||||
|
||||
- name: Assert volume values
|
||||
assert:
|
||||
that: quotas.quotas.volume[item.key] == item.value
|
||||
loop: "{{ test_volume_quota | dict2items }}"
|
||||
|
||||
- name: Assert network values
|
||||
assert:
|
||||
that: quotas.quotas.network[item.key] == item.value
|
||||
loop: "{{ test_network_quota | dict2items }}"
|
||||
|
||||
- name: Assert compute values
|
||||
assert:
|
||||
that: quotas.quotas.compute[item.key] == item.value
|
||||
loop: "{{ test_compute_quota | dict2items }}"
|
||||
|
||||
- name: Assert load_balancer values
|
||||
assert:
|
||||
that: quotas.quotas.load_balancer[item.key] == item.value
|
||||
loop: "{{ test_load_balancer_quota | dict2items }}"
|
||||
|
||||
- name: Set all quotas at once again
|
||||
openstack.cloud.quota:
|
||||
"{{ [test_network_quota, test_volume_quota, test_compute_quota, test_load_balancer_quota] | combine }}"
|
||||
register: quotas
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: quotas is not changed
|
||||
|
||||
- name: Unset all quotas
|
||||
openstack.cloud.quota:
|
||||
state: absent
|
||||
register: quotas
|
||||
|
||||
- name: Delete test project
|
||||
openstack.cloud.project:
|
||||
state: absent
|
||||
|
||||
@@ -128,4 +128,9 @@
|
||||
|
||||
- name: Delete test project
|
||||
openstack.cloud.project:
|
||||
state: absent
|
||||
state: absent
|
||||
|
||||
- import_tasks: loadbalancer.yml
|
||||
tags:
|
||||
- loadbalancer
|
||||
|
||||
|
||||
@@ -14,6 +14,15 @@
|
||||
email: test@example.net
|
||||
register: dns_zone
|
||||
|
||||
- name: Ensure recordset not present
|
||||
openstack.cloud.recordset:
|
||||
cloud: "{{ cloud }}"
|
||||
zone: "{{ dns_zone.zone.name }}"
|
||||
name: "{{ recordset_name }}"
|
||||
recordset_type: "a"
|
||||
records: "{{ records }}"
|
||||
state: absent
|
||||
|
||||
- name: Create a recordset
|
||||
openstack.cloud.recordset:
|
||||
cloud: "{{ cloud }}"
|
||||
@@ -22,11 +31,13 @@
|
||||
recordset_type: "a"
|
||||
records: "{{ records }}"
|
||||
register: recordset
|
||||
until: '"PENDING" not in recordset["recordset"].status'
|
||||
retries: 10
|
||||
delay: 5
|
||||
|
||||
- name: Verify recordset info
|
||||
assert:
|
||||
that:
|
||||
- recordset is changed
|
||||
- recordset["recordset"].name == recordset_name
|
||||
- recordset["recordset"].zone_name == dns_zone.zone.name
|
||||
- recordset["recordset"].records | list | sort == records | list | sort
|
||||
|
||||
@@ -45,12 +45,6 @@
|
||||
state: absent
|
||||
user: admin
|
||||
|
||||
- name: Delete project
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: ansible_project
|
||||
|
||||
- name: Create domain
|
||||
openstack.cloud.identity_domain:
|
||||
cloud: "{{ cloud }}"
|
||||
@@ -78,6 +72,7 @@
|
||||
state: present
|
||||
name: ansible_user
|
||||
domain: default
|
||||
register: specific_user
|
||||
|
||||
- name: Create user in specific domain
|
||||
openstack.cloud.identity_user:
|
||||
@@ -138,6 +133,45 @@
|
||||
that:
|
||||
- role_assignment is changed
|
||||
|
||||
- name: Assign role to user in specific domain on default domain project
|
||||
openstack.cloud.role_assignment:
|
||||
cloud: "{{ cloud }}"
|
||||
role: anotherrole
|
||||
user: "{{ specific_user.user.id }}"
|
||||
domain: default
|
||||
project: ansible_project
|
||||
register: role_assignment
|
||||
|
||||
- name: Assert role assignment
|
||||
assert:
|
||||
that:
|
||||
- role_assignment is changed
|
||||
|
||||
- name: Revoke role to user in specific domain
|
||||
openstack.cloud.role_assignment:
|
||||
cloud: "{{ cloud }}"
|
||||
role: anotherrole
|
||||
user: "{{ specific_user.user.id }}"
|
||||
domain: default
|
||||
project: ansible_project
|
||||
state: absent
|
||||
register: role_assignment
|
||||
|
||||
- name: Assert role assignment revoked
|
||||
assert:
|
||||
that:
|
||||
- role_assignment is changed
|
||||
|
||||
- name: Assign role to user in specific domain on default domain project
|
||||
openstack.cloud.role_assignment:
|
||||
cloud: "{{ cloud }}"
|
||||
role: anotherrole
|
||||
user: ansible_user
|
||||
user_domain: "{{ specific_user.user.domain_id }}"
|
||||
project: ansible_project
|
||||
project_domain: default
|
||||
register: role_assignment
|
||||
|
||||
- name: Delete group in default domain
|
||||
openstack.cloud.identity_group:
|
||||
cloud: "{{ cloud }}"
|
||||
@@ -171,3 +205,10 @@
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: ansible_domain
|
||||
|
||||
- name: Delete project
|
||||
openstack.cloud.project:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: ansible_project
|
||||
|
||||
|
||||
@@ -558,6 +558,46 @@
|
||||
assert:
|
||||
that: router is not changed
|
||||
|
||||
- name: Create router without explicit IP address
|
||||
openstack.cloud.router:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
name: "{{ router_name }}"
|
||||
enable_snat: false
|
||||
interfaces:
|
||||
- shade_subnet1
|
||||
network: "{{ external_network_name }}"
|
||||
external_fixed_ips:
|
||||
- subnet_id: shade_subnet5
|
||||
register: router
|
||||
|
||||
- name: Assert idempotent module
|
||||
assert:
|
||||
that: router is changed
|
||||
|
||||
- name: Update router without explicit IP address
|
||||
openstack.cloud.router:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
name: "{{ router_name }}"
|
||||
enable_snat: false
|
||||
interfaces:
|
||||
- shade_subnet1
|
||||
network: "{{ external_network_name }}"
|
||||
external_fixed_ips:
|
||||
- subnet_id: shade_subnet5
|
||||
register: router
|
||||
|
||||
- name: Assert idempotent module
|
||||
assert:
|
||||
that: router is not changed
|
||||
|
||||
- name: Delete router
|
||||
openstack.cloud.router:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: "{{ router_name }}"
|
||||
|
||||
- name: Create router with simple interface
|
||||
openstack.cloud.router:
|
||||
cloud: "{{ cloud }}"
|
||||
|
||||
@@ -399,6 +399,9 @@
|
||||
- port-id: "{{ port.port.id }}"
|
||||
reuse_ips: false
|
||||
state: present
|
||||
tags:
|
||||
- first
|
||||
- second
|
||||
wait: true
|
||||
register: server
|
||||
|
||||
@@ -413,6 +416,7 @@
|
||||
|selectattr('OS-EXT-IPS:type', 'equalto', 'floating')
|
||||
|map(attribute='addr')
|
||||
|list|length == 0
|
||||
- server.server.tags == ["first", "second"]
|
||||
|
||||
- name: Find all floating ips for debugging
|
||||
openstack.cloud.floating_ip_info:
|
||||
@@ -454,6 +458,8 @@
|
||||
- '{{ server_security_group }}'
|
||||
- '{{ server_alt_security_group }}'
|
||||
state: present
|
||||
tags:
|
||||
- yellow
|
||||
wait: true
|
||||
register: server_updated
|
||||
|
||||
@@ -475,6 +481,7 @@
|
||||
- server_updated.server.addresses[server_network]|length == 2
|
||||
- port.port.fixed_ips[0].ip_address in
|
||||
server_updated.server.addresses[server_network]|map(attribute='addr')
|
||||
- server_updated.server.tags == ['yellow']
|
||||
# TODO: Verify networks once openstacksdk's issue #2010352 has been solved
|
||||
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
||||
#- server_updated.server.addresses.public|length > 0
|
||||
@@ -509,6 +516,8 @@
|
||||
- '{{ server_security_group }}'
|
||||
- '{{ server_alt_security_group }}'
|
||||
state: present
|
||||
tags:
|
||||
- yellow
|
||||
wait: true
|
||||
register: server_updated_again
|
||||
|
||||
@@ -517,6 +526,7 @@
|
||||
that:
|
||||
- server.server.id == server_updated_again.server.id
|
||||
- server_updated_again is not changed
|
||||
- server_updated_again.server.tags == ['yellow']
|
||||
|
||||
# TODO: Drop failure test once openstacksdk's issue #2010352 has been solved
|
||||
# Ref.: https://storyboard.openstack.org/#!/story/2010352
|
||||
|
||||
@@ -460,20 +460,14 @@
|
||||
register: server
|
||||
ignore_errors: true
|
||||
|
||||
- name: Assert shelve offload server
|
||||
assert:
|
||||
that:
|
||||
- ((server is success)
|
||||
or (server is not success
|
||||
and "Cannot 'shelveOffload' instance" in server.msg
|
||||
and ("while it is in vm_state shelved_offloaded" in server.msg
|
||||
or "while it is in task_state shelving_offloading" in server.msg )))
|
||||
|
||||
- name: Get info about server
|
||||
openstack.cloud.server_info:
|
||||
cloud: "{{ cloud }}"
|
||||
server: ansible_server
|
||||
register: servers
|
||||
until: servers.servers.0.task_state == none
|
||||
retries: 30
|
||||
delay: 10
|
||||
|
||||
- name: Ensure status for server is SHELVED_OFFLOADED
|
||||
# no change if server has been offloaded automatically after first shelve command
|
||||
@@ -559,7 +553,7 @@
|
||||
assert:
|
||||
that:
|
||||
- servers.servers.0.status == 'ACTIVE'
|
||||
- server is not changed
|
||||
- server is changed
|
||||
|
||||
- name: Reboot server (HARD)
|
||||
openstack.cloud.server_action:
|
||||
@@ -579,7 +573,7 @@
|
||||
assert:
|
||||
that:
|
||||
- servers.servers.0.status == 'ACTIVE'
|
||||
- server is not changed
|
||||
- server is changed
|
||||
|
||||
- name: Delete server
|
||||
openstack.cloud.server:
|
||||
|
||||
5
ci/roles/share_type/defaults/main.yml
Normal file
5
ci/roles/share_type/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
share_backend_name: GENERIC_BACKEND
|
||||
share_type_name: test_share_type
|
||||
share_type_description: Test share type for CI
|
||||
share_type_alt_description: Changed test share type
|
||||
130
ci/roles/share_type/tasks/main.yml
Normal file
130
ci/roles/share_type/tasks/main.yml
Normal file
@@ -0,0 +1,130 @@
|
||||
---
|
||||
- name: Create share type
|
||||
openstack.cloud.share_type:
|
||||
name: "{{ share_type_name }}"
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
extra_specs:
|
||||
share_backend_name: "{{ share_backend_name }}"
|
||||
snapshot_support: true
|
||||
create_share_from_snapshot_support: true
|
||||
description: "{{ share_type_description }}"
|
||||
register: the_result
|
||||
|
||||
- name: Check created share type
|
||||
vars:
|
||||
the_share_type: "{{ the_result.share_type }}"
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- "'id' in the_result.share_type"
|
||||
- the_share_type.description == share_type_description
|
||||
- the_share_type.is_public == True
|
||||
- the_share_type.name == share_type_name
|
||||
- the_share_type.extra_specs['share_backend_name'] == share_backend_name
|
||||
- the_share_type.extra_specs['snapshot_support'] == "True"
|
||||
- the_share_type.extra_specs['create_share_from_snapshot_support'] == "True"
|
||||
success_msg: >-
|
||||
Created share type: {{ the_result.share_type.id }},
|
||||
Name: {{ the_result.share_type.name }},
|
||||
Description: {{ the_result.share_type.description }}
|
||||
|
||||
- name: Test share type info module
|
||||
openstack.cloud.share_type_info:
|
||||
name: "{{ share_type_name }}"
|
||||
cloud: "{{ cloud }}"
|
||||
register: info_result
|
||||
|
||||
- name: Check share type info result
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- info_result.share_type.id == the_result.share_type.id
|
||||
- info_result.share_type.name == share_type_name
|
||||
- info_result.share_type.description == share_type_description
|
||||
success_msg: "Share type info retrieved successfully"
|
||||
|
||||
- name: Test, check idempotency
|
||||
openstack.cloud.share_type:
|
||||
name: "{{ share_type_name }}"
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
extra_specs:
|
||||
share_backend_name: "{{ share_backend_name }}"
|
||||
snapshot_support: true
|
||||
create_share_from_snapshot_support: true
|
||||
description: "{{ share_type_description }}"
|
||||
is_public: true
|
||||
register: the_result
|
||||
|
||||
- name: Check result.changed is false
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- the_result.changed == false
|
||||
success_msg: "Request with the same details lead to no changes"
|
||||
|
||||
- name: Add extra spec
|
||||
openstack.cloud.share_type:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ share_type_name }}"
|
||||
state: present
|
||||
extra_specs:
|
||||
share_backend_name: "{{ share_backend_name }}"
|
||||
snapshot_support: true
|
||||
create_share_from_snapshot_support: true
|
||||
some_spec: fake_spec
|
||||
description: "{{ share_type_alt_description }}"
|
||||
is_public: true
|
||||
register: the_result
|
||||
|
||||
- name: Check share type extra spec
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- "'some_spec' in the_result.share_type.extra_specs"
|
||||
- the_result.share_type.extra_specs["some_spec"] == "fake_spec"
|
||||
- the_result.share_type.description == share_type_alt_description
|
||||
success_msg: >-
|
||||
New extra specs: {{ the_result.share_type.extra_specs }}
|
||||
|
||||
- name: Remove extra spec by updating with reduced set
|
||||
openstack.cloud.share_type:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ share_type_name }}"
|
||||
state: present
|
||||
extra_specs:
|
||||
share_backend_name: "{{ share_backend_name }}"
|
||||
snapshot_support: true
|
||||
create_share_from_snapshot_support: true
|
||||
description: "{{ share_type_alt_description }}"
|
||||
is_public: true
|
||||
register: the_result
|
||||
|
||||
- name: Check extra spec was removed
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- "'some_spec' not in the_result.share_type.extra_specs"
|
||||
success_msg: "Extra spec was successfully removed"
|
||||
|
||||
- name: Delete share type
|
||||
openstack.cloud.share_type:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ share_type_name }}"
|
||||
state: absent
|
||||
register: the_result
|
||||
|
||||
- name: Check deletion was successful
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- the_result.changed == true
|
||||
success_msg: "Share type deleted successfully"
|
||||
|
||||
- name: Test deletion idempotency
|
||||
openstack.cloud.share_type:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ share_type_name }}"
|
||||
state: absent
|
||||
register: the_result
|
||||
|
||||
- name: Check deletion idempotency
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- the_result.changed == false
|
||||
success_msg: "Deletion idempotency works correctly"
|
||||
@@ -25,3 +25,4 @@ expected_fields:
|
||||
- updated_at
|
||||
- use_default_subnet_pool
|
||||
subnet_name: shade_subnet
|
||||
segment_name: example_segment
|
||||
|
||||
@@ -17,10 +17,20 @@
|
||||
name: "{{ network_name }}"
|
||||
state: present
|
||||
|
||||
- name: Create network segment {{ segment_name }}
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
network: "{{ network_name }}"
|
||||
network_type: "vxlan"
|
||||
segmentation_id: 1000
|
||||
state: present
|
||||
|
||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }}
|
||||
openstack.cloud.subnet:
|
||||
cloud: "{{ cloud }}"
|
||||
network_name: "{{ network_name }}"
|
||||
network_segment: "{{ segment_name }}"
|
||||
name: "{{ subnet_name }}"
|
||||
state: present
|
||||
enable_dhcp: "{{ enable_subnet_dhcp }}"
|
||||
@@ -142,6 +152,48 @@
|
||||
assert:
|
||||
that: subnet is not changed
|
||||
|
||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
|
||||
openstack.cloud.subnet:
|
||||
cloud: "{{ cloud }}"
|
||||
network_name: "{{ network_name }}"
|
||||
name: "{{ subnet_name }}"
|
||||
state: present
|
||||
cidr: 192.168.0.0/24
|
||||
disable_gateway_ip: true
|
||||
register: subnet
|
||||
|
||||
- name: Assert changed
|
||||
assert:
|
||||
that: subnet is changed
|
||||
|
||||
- name: Create subnet {{ subnet_name }} on network {{ network_name }} without gateway IP
|
||||
openstack.cloud.subnet:
|
||||
cloud: "{{ cloud }}"
|
||||
network_name: "{{ network_name }}"
|
||||
name: "{{ subnet_name }}"
|
||||
state: present
|
||||
cidr: 192.168.0.0/24
|
||||
disable_gateway_ip: true
|
||||
register: subnet
|
||||
|
||||
- name: Assert not changed
|
||||
assert:
|
||||
that: subnet is not changed
|
||||
|
||||
- name: Delete subnet {{ subnet_name }} again
|
||||
openstack.cloud.subnet:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ subnet_name }}"
|
||||
state: absent
|
||||
register: subnet
|
||||
|
||||
- name: Delete network segment {{ segment_name }}
|
||||
openstack.cloud.network_segment:
|
||||
cloud: "{{ cloud }}"
|
||||
name: "{{ segment_name }}"
|
||||
network: "{{ network_name }}"
|
||||
state: absent
|
||||
|
||||
- name: Delete network {{ network_name }}
|
||||
openstack.cloud.network:
|
||||
cloud: "{{ cloud }}"
|
||||
|
||||
@@ -119,22 +119,23 @@
|
||||
name: "{{ subnet_name }}"
|
||||
register: subnet_result
|
||||
|
||||
- name: Verify Subnet Allocation Pools Exist
|
||||
assert:
|
||||
that:
|
||||
- idem2 is not changed
|
||||
- subnet_result.subnets is defined
|
||||
- subnet_result.subnets | length == 1
|
||||
- subnet_result.subnets[0].allocation_pools is defined
|
||||
- subnet_result.subnets[0].allocation_pools | length == 2
|
||||
# TODO(sshnaidm): Uncomment this section when the issue with allocation_pools is fixed
|
||||
# - name: Verify Subnet Allocation Pools Exist
|
||||
# assert:
|
||||
# that:
|
||||
# - idem2 is not changed
|
||||
# - subnet_result.subnets is defined
|
||||
# - subnet_result.subnets | length == 1
|
||||
# - subnet_result.subnets[0].allocation_pools is defined
|
||||
# - subnet_result.subnets[0].allocation_pools | length == 2
|
||||
|
||||
- name: Verify Subnet Allocation Pools
|
||||
assert:
|
||||
that:
|
||||
- (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8') or
|
||||
(subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.16')
|
||||
- (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.8') or
|
||||
(subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.16')
|
||||
# - name: Verify Subnet Allocation Pools
|
||||
# assert:
|
||||
# that:
|
||||
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8') or
|
||||
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.16')
|
||||
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.8') or
|
||||
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.16')
|
||||
|
||||
- name: Delete subnet {{ subnet_name }}
|
||||
openstack.cloud.subnet:
|
||||
|
||||
@@ -125,22 +125,23 @@
|
||||
name: "{{ subnet_name }}"
|
||||
register: subnet_result
|
||||
|
||||
- name: Verify Subnet Allocation Pools Exist
|
||||
assert:
|
||||
that:
|
||||
- idem2 is not changed
|
||||
- subnet_result.subnets is defined
|
||||
- subnet_result.subnets | length == 1
|
||||
- subnet_result.subnets[0].allocation_pools is defined
|
||||
- subnet_result.subnets[0].allocation_pools | length == 2
|
||||
|
||||
- name: Verify Subnet Allocation Pools
|
||||
assert:
|
||||
that:
|
||||
- (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4') or
|
||||
(subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.8')
|
||||
- (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.4') or
|
||||
(subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.8')
|
||||
# NOT(gtema) Temporarily disable the check to land other gate fix
|
||||
#- name: Verify Subnet Allocation Pools Exist
|
||||
# assert:
|
||||
# that:
|
||||
# - idem2 is not changed
|
||||
# - subnet_result.subnets is defined
|
||||
# - subnet_result.subnets | length == 1
|
||||
# - subnet_result.subnets[0].allocation_pools is defined
|
||||
# - subnet_result.subnets[0].allocation_pools | length == 2
|
||||
#
|
||||
#- name: Verify Subnet Allocation Pools
|
||||
# assert:
|
||||
# that:
|
||||
# - (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4') or
|
||||
# (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.8')
|
||||
# - (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.4') or
|
||||
# (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.8')
|
||||
|
||||
- name: Delete subnet {{ subnet_name }}
|
||||
openstack.cloud.subnet:
|
||||
|
||||
1
ci/roles/trait/defaults/main.yml
Normal file
1
ci/roles/trait/defaults/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
trait_name: CUSTOM_ANSIBLE_TRAIT
|
||||
28
ci/roles/trait/tasks/main.yml
Normal file
28
ci/roles/trait/tasks/main.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Create trait
|
||||
openstack.cloud.trait:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
id: "{{ trait_name }}"
|
||||
until: result is success
|
||||
retries: 5
|
||||
delay: 20
|
||||
register: result
|
||||
|
||||
- name: Assert trait
|
||||
assert:
|
||||
that:
|
||||
- "'name' in result.trait"
|
||||
- "result.trait.id == trait_name"
|
||||
|
||||
- name: Remove trait
|
||||
openstack.cloud.trait:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
id: "{{ trait_name }}"
|
||||
register: result1
|
||||
|
||||
- name: Assert trait removed
|
||||
assert:
|
||||
that:
|
||||
- "'trait' not in result1"
|
||||
32
ci/roles/volume_manage/defaults/main.yml
Normal file
32
ci/roles/volume_manage/defaults/main.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
test_volume: ansible_test_volume
|
||||
managed_volume: managed_test_volume
|
||||
expected_fields:
|
||||
- attachments
|
||||
- availability_zone
|
||||
- consistency_group_id
|
||||
- created_at
|
||||
- updated_at
|
||||
- description
|
||||
- extended_replication_status
|
||||
- group_id
|
||||
- host
|
||||
- image_id
|
||||
- is_bootable
|
||||
- is_encrypted
|
||||
- is_multiattach
|
||||
- migration_id
|
||||
- migration_status
|
||||
- project_id
|
||||
- replication_driver_data
|
||||
- replication_status
|
||||
- scheduler_hints
|
||||
- size
|
||||
- snapshot_id
|
||||
- source_volume_id
|
||||
- status
|
||||
- user_id
|
||||
- volume_image_metadata
|
||||
- volume_type
|
||||
- id
|
||||
- name
|
||||
- metadata
|
||||
65
ci/roles/volume_manage/tasks/main.yml
Normal file
65
ci/roles/volume_manage/tasks/main.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
- name: Create volume
|
||||
openstack.cloud.volume:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
size: 1
|
||||
name: "{{ test_volume }}"
|
||||
description: Test volume
|
||||
register: vol
|
||||
|
||||
- assert:
|
||||
that: item in vol.volume
|
||||
loop: "{{ expected_fields }}"
|
||||
|
||||
- name: Unmanage volume
|
||||
openstack.cloud.volume_manage:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: "{{ vol.volume.id }}"
|
||||
|
||||
- name: Unmanage volume again
|
||||
openstack.cloud.volume_manage:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: "{{ vol.volume.id }}"
|
||||
register: unmanage_idempotency
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- unmanage_idempotency is not changed
|
||||
|
||||
- name: Manage volume
|
||||
openstack.cloud.volume_manage:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
source_name: volume-{{ vol.volume.id }}
|
||||
host: "{{ vol.volume.host }}"
|
||||
name: "{{ managed_volume }}"
|
||||
register: new_vol
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- new_vol.volume.name == managed_volume
|
||||
|
||||
- name: Manage volume again
|
||||
openstack.cloud.volume_manage:
|
||||
cloud: "{{ cloud }}"
|
||||
state: present
|
||||
source_name: volume-{{ vol.volume.id }}
|
||||
host: "{{ vol.volume.host }}"
|
||||
name: "{{ managed_volume }}"
|
||||
register: vol_idempotency
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- vol_idempotency is not changed
|
||||
|
||||
- pause:
|
||||
seconds: 10
|
||||
|
||||
- name: Delete volume
|
||||
openstack.cloud.volume:
|
||||
cloud: "{{ cloud }}"
|
||||
state: absent
|
||||
name: "{{ managed_volume }}"
|
||||
@@ -75,10 +75,10 @@ ansible-galaxy collection install --requirements-file ci/requirements.yml
|
||||
if [ -z "$PIP_INSTALL" ]; then
|
||||
tox -ebuild
|
||||
ansible-galaxy collection install "$(find build_artifact/ -maxdepth 1 -name 'openstack-cloud-*')" --force
|
||||
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
|
||||
TEST_COLLECTIONS_PATHS=${HOME}/.ansible/collections:$ANSIBLE_COLLECTIONS_PATH
|
||||
else
|
||||
pip freeze | grep ansible-collections-openstack
|
||||
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATHS
|
||||
TEST_COLLECTIONS_PATHS=$VIRTUAL_ENV/share/ansible/collections:$ANSIBLE_COLLECTIONS_PATH
|
||||
fi
|
||||
|
||||
# We need to source the current tox environment so that Ansible will
|
||||
@@ -124,12 +124,17 @@ if [ ! -e /etc/magnum ]; then
|
||||
tag_opt+=" --skip-tags coe_cluster,coe_cluster_template"
|
||||
fi
|
||||
|
||||
if ! systemctl is-enabled devstack@m-api.service 2>&1; then
|
||||
# Skip share_type tasks if Manila is not available
|
||||
tag_opt+=" --skip-tags share_type"
|
||||
fi
|
||||
|
||||
cd ci/
|
||||
|
||||
# Run tests
|
||||
set -o pipefail
|
||||
# shellcheck disable=SC2086
|
||||
ANSIBLE_COLLECTIONS_PATHS=$TEST_COLLECTIONS_PATHS ansible-playbook \
|
||||
ANSIBLE_COLLECTIONS_PATH=$TEST_COLLECTIONS_PATHS ansible-playbook \
|
||||
-vvv ./run-collection.yml \
|
||||
-e "sdk_version=${SDK_VER} cloud=${CLOUD} cloud_alt=${CLOUD_ALT} ${ANSIBLE_VARS}" \
|
||||
${tag_opt} 2>&1 | sudo tee /opt/stack/logs/test_output.log
|
||||
|
||||
@@ -32,10 +32,13 @@
|
||||
- { role: loadbalancer, tags: loadbalancer }
|
||||
- { role: logging, tags: logging }
|
||||
- { role: network, tags: network }
|
||||
- { role: network_segment, tags: network_segment }
|
||||
- { role: neutron_rbac_policy, tags: neutron_rbac_policy }
|
||||
- { role: object, tags: object }
|
||||
- { role: object_container, tags: object_container }
|
||||
- { role: object_containers_info, tags: object_containers_info }
|
||||
- { role: port, tags: port }
|
||||
- { role: trait, tags: trait }
|
||||
- { role: trunk, tags: trunk }
|
||||
- { role: project, tags: project }
|
||||
- { role: quota, tags: quota }
|
||||
@@ -51,12 +54,14 @@
|
||||
- { role: server_group, tags: server_group }
|
||||
- { role: server_metadata, tags: server_metadata }
|
||||
- { role: server_volume, tags: server_volume }
|
||||
- { role: share_type, tags: share_type }
|
||||
- { role: stack, tags: stack }
|
||||
- { role: subnet, tags: subnet }
|
||||
- { role: subnet_pool, tags: subnet_pool }
|
||||
- { role: volume, tags: volume }
|
||||
- { role: volume_type, tags: volume_type }
|
||||
- { role: volume_backup, tags: volume_backup }
|
||||
- { role: volume_manage, tags: volume_manage }
|
||||
- { role: volume_service, tags: volume_service }
|
||||
- { role: volume_snapshot, tags: volume_snapshot }
|
||||
- { role: volume_type_access, tags: volume_type_access }
|
||||
|
||||
@@ -11,7 +11,7 @@ For hacking on the Ansible OpenStack collection it helps to [prepare a DevStack
|
||||
|
||||
## Hosting
|
||||
|
||||
* [Bug tracker][storyboard]
|
||||
* [Bug tracker][bugtracker]
|
||||
* [Mailing list `openstack-discuss@lists.openstack.org`][openstack-discuss].
|
||||
Prefix subjects with `[aoc]` or `[aco]` for faster responses.
|
||||
* [Code Hosting][opendev-a-c-o]
|
||||
@@ -188,4 +188,4 @@ Read [Release Guide](releasing.md) on how to publish new releases.
|
||||
[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
|
||||
[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
|
||||
[openstacksdk]: https://opendev.org/openstack/openstacksdk
|
||||
[storyboard]: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||
[bugtracker]: https://bugs.launchpad.net/ansible-collections-openstack
|
||||
|
||||
@@ -11,7 +11,7 @@ dependencies: {}
|
||||
repository: https://opendev.org/openstack/ansible-collections-openstack
|
||||
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
||||
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
||||
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||
issues: https://bugs.launchpad.net/ansible-collections-openstack
|
||||
build_ignore:
|
||||
- "*.tar.gz"
|
||||
- build_artifact
|
||||
@@ -32,4 +32,4 @@ build_ignore:
|
||||
- .vscode
|
||||
- ansible_collections_openstack.egg-info
|
||||
- changelogs
|
||||
version: 2.3.0
|
||||
version: 2.5.0
|
||||
|
||||
@@ -11,7 +11,7 @@ dependencies: {}
|
||||
repository: https://opendev.org/openstack/ansible-collections-openstack
|
||||
documentation: https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html
|
||||
homepage: https://opendev.org/openstack/ansible-collections-openstack
|
||||
issues: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
|
||||
issues: https://bugs.launchpad.net/ansible-collections-openstack
|
||||
build_ignore:
|
||||
- "*.tar.gz"
|
||||
- build_artifact
|
||||
|
||||
@@ -10,6 +10,7 @@ action_groups:
|
||||
- baremetal_node_action
|
||||
- baremetal_node_info
|
||||
- baremetal_port
|
||||
- baremetal_port_group
|
||||
- baremetal_port_info
|
||||
- catalog_service
|
||||
- catalog_service_info
|
||||
@@ -51,11 +52,13 @@ action_groups:
|
||||
- lb_pool
|
||||
- loadbalancer
|
||||
- network
|
||||
- network_segment
|
||||
- networks_info
|
||||
- neutron_rbac_policies_info
|
||||
- neutron_rbac_policy
|
||||
- object
|
||||
- object_container
|
||||
- object_containers_info
|
||||
- port
|
||||
- port_info
|
||||
- project
|
||||
@@ -77,6 +80,8 @@ action_groups:
|
||||
- server_info
|
||||
- server_metadata
|
||||
- server_volume
|
||||
- share_type
|
||||
- share_type_info
|
||||
- stack
|
||||
- stack_info
|
||||
- subnet
|
||||
@@ -84,6 +89,7 @@ action_groups:
|
||||
- subnets_info
|
||||
- trunk
|
||||
- volume
|
||||
- volume_manage
|
||||
- volume_backup
|
||||
- volume_backup_info
|
||||
- volume_info
|
||||
|
||||
@@ -96,6 +96,18 @@ options:
|
||||
only.
|
||||
type: bool
|
||||
default: false
|
||||
only_ipv4:
|
||||
description:
|
||||
- Use only ipv4 addresses for ansible_host and ansible_ssh_host.
|
||||
- Using I(only_ipv4) helps when running Ansible in a ipv4 only setup.
|
||||
type: bool
|
||||
default: false
|
||||
server_filters:
|
||||
description:
|
||||
- A dictionary of server filter value pairs.
|
||||
- Available parameters can be seen under https://docs.openstack.org/api-ref/compute/#list-servers
|
||||
type: dict
|
||||
default: {}
|
||||
show_all:
|
||||
description:
|
||||
- Whether all servers should be listed or not.
|
||||
@@ -273,7 +285,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
clouds_yaml_path = self.get_option('clouds_yaml_path')
|
||||
config_files = openstack.config.loader.CONFIG_FILES
|
||||
if clouds_yaml_path:
|
||||
config_files += clouds_yaml_path
|
||||
config_files = clouds_yaml_path + config_files
|
||||
|
||||
config = openstack.config.loader.OpenStackConfig(
|
||||
config_files=config_files)
|
||||
@@ -303,6 +315,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
expand_hostvars = self.get_option('expand_hostvars')
|
||||
all_projects = self.get_option('all_projects')
|
||||
server_filters = self.get_option('server_filters')
|
||||
servers = []
|
||||
|
||||
def _expand_server(server, cloud, volumes):
|
||||
@@ -349,7 +362,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
all_projects=all_projects,
|
||||
# details are required because 'addresses'
|
||||
# attribute must be populated
|
||||
details=True)
|
||||
details=True,
|
||||
**server_filters)
|
||||
]:
|
||||
servers.append(server)
|
||||
except openstack.exceptions.OpenStackCloudException as e:
|
||||
@@ -384,10 +398,17 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if address['OS-EXT-IPS:type'] == 'floating'),
|
||||
None)
|
||||
|
||||
fixed_ip = next(
|
||||
(address['addr'] for address in addresses
|
||||
if address['OS-EXT-IPS:type'] == 'fixed'),
|
||||
None)
|
||||
if self.get_option('only_ipv4'):
|
||||
fixed_ip = next(
|
||||
(address['addr'] for address in addresses
|
||||
if (address['OS-EXT-IPS:type'] == 'fixed' and address['version'] == 4)),
|
||||
None)
|
||||
|
||||
else:
|
||||
fixed_ip = next(
|
||||
(address['addr'] for address in addresses
|
||||
if address['OS-EXT-IPS:type'] == 'fixed'),
|
||||
None)
|
||||
|
||||
ip = floating_ip if floating_ip is not None and not self.get_option('private') else fixed_ip
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
|
||||
" excluded.")
|
||||
for param in (
|
||||
'auth', 'region_name', 'validate_certs',
|
||||
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||
if module.params[param] is not None:
|
||||
module.fail_json(msg=fail_message.format(param=param))
|
||||
# For 'interface' parameter, fail if we receive a non-default value
|
||||
@@ -199,6 +199,7 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
|
||||
verify=module.params['validate_certs'],
|
||||
cacert=module.params['ca_cert'],
|
||||
key=module.params['client_key'],
|
||||
cert=module.params['client_cert'],
|
||||
api_timeout=module.params['api_timeout'],
|
||||
interface=module.params['interface'],
|
||||
)
|
||||
@@ -358,7 +359,7 @@ class OpenStackModule:
|
||||
" excluded.")
|
||||
for param in (
|
||||
'auth', 'region_name', 'validate_certs',
|
||||
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
|
||||
if self.params[param] is not None:
|
||||
self.fail_json(msg=fail_message.format(param=param))
|
||||
# For 'interface' parameter, fail if we receive a non-default value
|
||||
@@ -373,6 +374,7 @@ class OpenStackModule:
|
||||
verify=self.params['validate_certs'],
|
||||
cacert=self.params['ca_cert'],
|
||||
key=self.params['client_key'],
|
||||
cert=self.params['client_cert'],
|
||||
api_timeout=self.params['api_timeout'],
|
||||
interface=self.params['interface'],
|
||||
)
|
||||
|
||||
257
plugins/modules/baremetal_port_group.py
Normal file
257
plugins/modules/baremetal_port_group.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2026 OpenStack Ansible SIG
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
module: baremetal_port_group
|
||||
short_description: Create/Delete Bare Metal port group resources from OpenStack
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Create, update and remove Bare Metal port groups from OpenStack.
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- ID of the port group.
|
||||
- Will be auto-generated if not specified.
|
||||
type: str
|
||||
aliases: ['uuid']
|
||||
name:
|
||||
description:
|
||||
- Name of the port group.
|
||||
type: str
|
||||
node:
|
||||
description:
|
||||
- ID or Name of the node this resource belongs to.
|
||||
- Required when creating a new port group.
|
||||
type: str
|
||||
address:
|
||||
description:
|
||||
- Physical hardware address of this port group, typically the hardware
|
||||
MAC address.
|
||||
type: str
|
||||
extra:
|
||||
description:
|
||||
- A set of one or more arbitrary metadata key and value pairs.
|
||||
type: dict
|
||||
standalone_ports_supported:
|
||||
description:
|
||||
- Whether the port group supports ports that are not members of this
|
||||
port group.
|
||||
type: bool
|
||||
mode:
|
||||
description:
|
||||
- The port group mode.
|
||||
type: str
|
||||
properties:
|
||||
description:
|
||||
- Key/value properties for the port group.
|
||||
type: dict
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the resource.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create Bare Metal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: devstack
|
||||
state: present
|
||||
name: bond0
|
||||
node: bm-0
|
||||
address: fa:16:3e:aa:aa:aa
|
||||
mode: '802.3ad'
|
||||
standalone_ports_supported: true
|
||||
register: result
|
||||
|
||||
- name: Update Bare Metal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: devstack
|
||||
state: present
|
||||
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
|
||||
mode: 'active-backup'
|
||||
properties:
|
||||
miimon: '100'
|
||||
register: result
|
||||
|
||||
- name: Delete Bare Metal port group
|
||||
openstack.cloud.baremetal_port_group:
|
||||
cloud: devstack
|
||||
state: absent
|
||||
id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
port_group:
|
||||
description: A port group dictionary, subset of the dictionary keys listed
|
||||
below may be returned, depending on your cloud provider.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
address:
|
||||
description: Physical hardware address of the port group.
|
||||
returned: success
|
||||
type: str
|
||||
created_at:
|
||||
description: Bare Metal port group created at timestamp.
|
||||
returned: success
|
||||
type: str
|
||||
extra:
|
||||
description: A set of one or more arbitrary metadata key and value
|
||||
pairs.
|
||||
returned: success
|
||||
type: dict
|
||||
id:
|
||||
description: The UUID for the Bare Metal port group resource.
|
||||
returned: success
|
||||
type: str
|
||||
links:
|
||||
description: A list of relative links, including the self and
|
||||
bookmark links.
|
||||
returned: success
|
||||
type: list
|
||||
mode:
|
||||
description: The port group mode.
|
||||
returned: success
|
||||
type: str
|
||||
name:
|
||||
description: Bare Metal port group name.
|
||||
returned: success
|
||||
type: str
|
||||
node_id:
|
||||
description: UUID of the Bare Metal node this resource belongs to.
|
||||
returned: success
|
||||
type: str
|
||||
properties:
|
||||
description: Key/value properties for this port group.
|
||||
returned: success
|
||||
type: dict
|
||||
standalone_ports_supported:
|
||||
description: Whether standalone ports are supported.
|
||||
returned: success
|
||||
type: bool
|
||||
updated_at:
|
||||
description: Bare Metal port group updated at timestamp.
|
||||
returned: success
|
||||
type: str
|
||||
'''
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
||||
OpenStackModule
|
||||
)
|
||||
|
||||
|
||||
class BaremetalPortGroupModule(OpenStackModule):
|
||||
argument_spec = dict(
|
||||
id=dict(aliases=['uuid']),
|
||||
name=dict(),
|
||||
node=dict(),
|
||||
address=dict(),
|
||||
extra=dict(type='dict'),
|
||||
standalone_ports_supported=dict(type='bool'),
|
||||
mode=dict(),
|
||||
properties=dict(type='dict'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
required_one_of=[
|
||||
('id', 'name'),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def _find_port_group(self):
|
||||
id_or_name = self.params['id'] if self.params['id'] else self.params['name']
|
||||
if not id_or_name:
|
||||
return None
|
||||
|
||||
try:
|
||||
return self.conn.baremetal.find_port_group(id_or_name)
|
||||
except self.sdk.exceptions.ResourceNotFound:
|
||||
return None
|
||||
|
||||
def _build_create_attrs(self):
|
||||
attrs = {}
|
||||
|
||||
for key in ['id', 'name', 'address', 'extra',
|
||||
'standalone_ports_supported', 'mode', 'properties']:
|
||||
if self.params[key] is not None:
|
||||
attrs[key] = self.params[key]
|
||||
|
||||
node_name_or_id = self.params['node']
|
||||
if not node_name_or_id:
|
||||
self.fail_json(msg="Parameter 'node' is required when creating a new port group")
|
||||
|
||||
node = self.conn.baremetal.find_node(node_name_or_id, ignore_missing=False)
|
||||
attrs['node_id'] = node['id']
|
||||
return attrs
|
||||
|
||||
def _build_update_attrs(self, port_group):
|
||||
attrs = {}
|
||||
|
||||
for key in ['name', 'address', 'extra',
|
||||
'standalone_ports_supported', 'mode', 'properties']:
|
||||
if self.params[key] is not None and self.params[key] != port_group.get(key):
|
||||
attrs[key] = self.params[key]
|
||||
|
||||
return attrs
|
||||
|
||||
def _will_change(self, port_group, state):
|
||||
if state == 'absent':
|
||||
return bool(port_group)
|
||||
|
||||
if not port_group:
|
||||
return True
|
||||
|
||||
return bool(self._build_update_attrs(port_group))
|
||||
|
||||
def run(self):
|
||||
state = self.params['state']
|
||||
port_group = self._find_port_group()
|
||||
|
||||
if self.ansible.check_mode:
|
||||
if state == 'present' and not port_group:
|
||||
self._build_create_attrs()
|
||||
self.exit_json(changed=self._will_change(port_group, state))
|
||||
|
||||
if state == 'present':
|
||||
if not port_group:
|
||||
port_group = self.conn.baremetal.create_port_group(
|
||||
**self._build_create_attrs())
|
||||
self.exit_json(
|
||||
changed=True,
|
||||
port_group=port_group.to_dict(computed=False))
|
||||
|
||||
update_attrs = self._build_update_attrs(port_group)
|
||||
changed = bool(update_attrs)
|
||||
|
||||
if changed:
|
||||
port_group = self.conn.baremetal.update_port_group(
|
||||
port_group['id'], **update_attrs)
|
||||
|
||||
self.exit_json(
|
||||
changed=changed,
|
||||
port_group=port_group.to_dict(computed=False))
|
||||
|
||||
if not port_group:
|
||||
self.exit_json(changed=False)
|
||||
|
||||
self.conn.baremetal.delete_port_group(port_group['id'])
|
||||
self.exit_json(changed=True)
|
||||
|
||||
|
||||
def main():
|
||||
module = BaremetalPortGroupModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -41,11 +41,11 @@ extends_documentation_fragment:
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Fetch all DNS zones
|
||||
openstack.cloud.dns_zones:
|
||||
openstack.cloud.dns_zone_info:
|
||||
cloud: devstack
|
||||
|
||||
- name: Fetch DNS zones by name
|
||||
openstack.cloud.dns_zones:
|
||||
openstack.cloud.dns_zone_info:
|
||||
cloud: devstack
|
||||
name: ansible.test.zone.
|
||||
'''
|
||||
|
||||
@@ -128,6 +128,20 @@ options:
|
||||
- Should only be used when needed, such as when the user needs the cloud to
|
||||
transform image format.
|
||||
type: bool
|
||||
import_method:
|
||||
description:
|
||||
- Method to use for importing the image. Not all deployments support all methods.
|
||||
- Supports web-download or glance-download.
|
||||
- copy-image is not supported with create actions.
|
||||
- glance-direct is removed from the import method so use_import can be used in that case.
|
||||
type: str
|
||||
choices: [web-download, glance-download]
|
||||
uri:
|
||||
description:
|
||||
- Required only if using the web-download import method.
|
||||
- This url is where the data is made available to the Image service.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
'''
|
||||
@@ -399,11 +413,13 @@ class ImageModule(OpenStackModule):
|
||||
visibility=dict(choices=['public', 'private', 'shared', 'community']),
|
||||
volume=dict(),
|
||||
use_import=dict(type='bool'),
|
||||
import_method=dict(choices=['web-download', 'glance-download']),
|
||||
uri=dict()
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
mutually_exclusive=[
|
||||
('filename', 'volume'),
|
||||
('filename', 'volume', 'uri'),
|
||||
('visibility', 'is_public'),
|
||||
],
|
||||
)
|
||||
@@ -412,7 +428,7 @@ class ImageModule(OpenStackModule):
|
||||
attr_params = ('id', 'name', 'filename', 'disk_format',
|
||||
'container_format', 'wait', 'timeout', 'is_public',
|
||||
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags',
|
||||
'use_import')
|
||||
'use_import', 'import_method', 'uri')
|
||||
|
||||
def _resolve_visibility(self):
|
||||
"""resolve a visibility value to be compatible with older versions"""
|
||||
@@ -513,6 +529,22 @@ class ImageModule(OpenStackModule):
|
||||
if image['status'] == 'deactivated':
|
||||
self.conn.image.reactivate_image(image)
|
||||
changed = True
|
||||
elif image['status'] == 'queued':
|
||||
if (
|
||||
self.params['filename']
|
||||
and hasattr(self.conn.image, 'stage_image')):
|
||||
self.conn.image.stage_image(
|
||||
image, filename=self.params['filename'])
|
||||
changed = True
|
||||
elif self.params['filename']:
|
||||
with open(self.params['filename'], 'rb') as image_data:
|
||||
self.conn.image.upload_image(
|
||||
container_format=self.params['container_format'],
|
||||
disk_format=self.params['disk_format'],
|
||||
data=image_data,
|
||||
id=image.id,
|
||||
name=image.name)
|
||||
changed = True
|
||||
|
||||
update_payload = self._build_update(image)
|
||||
|
||||
|
||||
183
plugins/modules/network_segment.py
Normal file
183
plugins/modules/network_segment.py
Normal file
@@ -0,0 +1,183 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025 British Broadcasting Corporation
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: network_segment
|
||||
short_description: Creates/removes network segments from OpenStack
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Add, update or remove network segments from OpenStack.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name to be assigned to the segment. Although Neutron allows for
|
||||
non-unique segment names, this module enforces segment name
|
||||
uniqueness.
|
||||
required: true
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the segment
|
||||
type: str
|
||||
network:
|
||||
description:
|
||||
- Name or id of the network to which the segment should be attached
|
||||
type: str
|
||||
network_type:
|
||||
description:
|
||||
- The type of physical network that maps to this segment resource.
|
||||
type: str
|
||||
physical_network:
|
||||
description:
|
||||
- The physical network where this segment object is implemented.
|
||||
type: str
|
||||
segmentation_id:
|
||||
description:
|
||||
- An isolated segment on the physical network. The I(network_type)
|
||||
attribute defines the segmentation model. For example, if the
|
||||
I(network_type) value is vlan, this ID is a vlan identifier. If
|
||||
the I(network_type) value is gre, this ID is a gre key.
|
||||
type: int
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a VLAN type network segment named 'segment1'.
|
||||
- openstack.cloud.network_segment:
|
||||
cloud: mycloud
|
||||
name: segment1
|
||||
network: my_network
|
||||
network_type: vlan
|
||||
segmentation_id: 2000
|
||||
physical_network: my_physnet
|
||||
state: present
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description: Id of segment
|
||||
returned: On success when segment exists.
|
||||
type: str
|
||||
network_segment:
|
||||
description: Dictionary describing the network segment.
|
||||
returned: On success when network segment exists.
|
||||
type: dict
|
||||
contains:
|
||||
description:
|
||||
description: Description
|
||||
type: str
|
||||
id:
|
||||
description: Id
|
||||
type: str
|
||||
name:
|
||||
description: Name
|
||||
type: str
|
||||
network_id:
|
||||
description: Network Id
|
||||
type: str
|
||||
network_type:
|
||||
description: Network type
|
||||
type: str
|
||||
physical_network:
|
||||
description: Physical network
|
||||
type: str
|
||||
segmentation_id:
|
||||
description: Segmentation Id
|
||||
type: int
|
||||
'''
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
||||
|
||||
|
||||
class NetworkSegmentModule(OpenStackModule):
|
||||
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
description=dict(),
|
||||
network=dict(),
|
||||
network_type=dict(),
|
||||
physical_network=dict(),
|
||||
segmentation_id=dict(type='int'),
|
||||
state=dict(default='present', choices=['absent', 'present'])
|
||||
)
|
||||
|
||||
def run(self):
|
||||
|
||||
state = self.params['state']
|
||||
name = self.params['name']
|
||||
network_name_or_id = self.params['network']
|
||||
|
||||
kwargs = {}
|
||||
filters = {}
|
||||
for arg in ('description', 'network_type', 'physical_network', 'segmentation_id'):
|
||||
if self.params[arg] is not None:
|
||||
kwargs[arg] = self.params[arg]
|
||||
|
||||
for arg in ('network_type', 'physical_network'):
|
||||
if self.params[arg] is not None:
|
||||
filters[arg] = self.params[arg]
|
||||
|
||||
if network_name_or_id:
|
||||
network = self.conn.network.find_network(network_name_or_id,
|
||||
ignore_missing=False,
|
||||
**filters)
|
||||
kwargs['network_id'] = network.id
|
||||
filters['network_id'] = network.id
|
||||
|
||||
segment = self.conn.network.find_segment(name, **filters)
|
||||
|
||||
if state == 'present':
|
||||
if not segment:
|
||||
segment = self.conn.network.create_segment(name=name, **kwargs)
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
update_kwargs = {}
|
||||
|
||||
# As the name is required and all other attributes cannot be
|
||||
# changed (and appear in filters above), we only need to handle
|
||||
# updates to the description here.
|
||||
for arg in ["description"]:
|
||||
if (
|
||||
arg in kwargs
|
||||
# ensure user wants something specific
|
||||
and kwargs[arg] is not None
|
||||
# and this is not what we have right now
|
||||
and kwargs[arg] != segment[arg]
|
||||
):
|
||||
update_kwargs[arg] = kwargs[arg]
|
||||
|
||||
if update_kwargs:
|
||||
segment = self.conn.network.update_segment(
|
||||
segment.id, **update_kwargs
|
||||
)
|
||||
changed = True
|
||||
|
||||
segment = segment.to_dict(computed=False)
|
||||
self.exit(changed=changed, network_segment=segment, id=segment['id'])
|
||||
elif state == 'absent':
|
||||
if not segment:
|
||||
self.exit(changed=False)
|
||||
else:
|
||||
self.conn.network.delete_segment(segment['id'])
|
||||
self.exit(changed=True)
|
||||
|
||||
|
||||
def main():
|
||||
module = NetworkSegmentModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
202
plugins/modules/object_containers_info.py
Normal file
202
plugins/modules/object_containers_info.py
Normal file
@@ -0,0 +1,202 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2024 Catalyst Cloud Limited
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: object_containers_info
|
||||
short_description: Fetch container info from the OpenStack Swift service.
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Fetch container info from the OpenStack Swift service.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the container
|
||||
type: str
|
||||
aliases: ["container"]
|
||||
prefix:
|
||||
description:
|
||||
- Filter containers by prefix
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: List all containers existing on the project
|
||||
openstack.cloud.object_containers_info:
|
||||
|
||||
- name: Retrive a single container by name
|
||||
openstack.cloud.object_containers_info:
|
||||
name: test-container
|
||||
|
||||
- name: Retrieve and filter containers by prefix
|
||||
openstack.cloud.object_containers_info:
|
||||
prefix: test-
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
containers:
|
||||
description: List of dictionaries describing matching containers.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
bytes:
|
||||
description: The total number of bytes that are stored in Object Storage
|
||||
for the container.
|
||||
type: int
|
||||
sample: 5449
|
||||
bytes_used:
|
||||
description: The count of bytes used in total.
|
||||
type: int
|
||||
sample: 5449
|
||||
content_type:
|
||||
description: The MIME type of the list of names.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
count:
|
||||
description: The number of objects in the container.
|
||||
type: int
|
||||
sample: 1
|
||||
history_location:
|
||||
description: Enables versioning on the container.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
id:
|
||||
description: The ID of the container. Equals I(name).
|
||||
type: str
|
||||
sample: "otc"
|
||||
if_none_match:
|
||||
description: "In combination with C(Expect: 100-Continue), specify an
|
||||
C(If-None-Match: *) header to query whether the server
|
||||
already has a copy of the object before any data is sent.
|
||||
Only set when searching for a container by name."
|
||||
type: str
|
||||
sample: null
|
||||
is_content_type_detected:
|
||||
description: If set to C(true), Object Storage guesses the content type
|
||||
based on the file extension and ignores the value sent in
|
||||
the Content-Type header, if present.
|
||||
Only fetched when searching for a container by name.
|
||||
type: bool
|
||||
sample: null
|
||||
is_newest:
|
||||
description: If set to True, Object Storage queries all replicas to
|
||||
return the most recent one. If you omit this header, Object
|
||||
Storage responds faster after it finds one valid replica.
|
||||
Because setting this header to True is more expensive for
|
||||
the back end, use it only when it is absolutely needed.
|
||||
Only fetched when searching for a container by name.
|
||||
type: bool
|
||||
sample: null
|
||||
meta_temp_url_key:
|
||||
description: The secret key value for temporary URLs. If not set,
|
||||
this header is not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
meta_temp_url_key_2:
|
||||
description: A second secret key value for temporary URLs. If not set,
|
||||
this header is not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
name:
|
||||
description: The name of the container.
|
||||
type: str
|
||||
sample: "otc"
|
||||
object_count:
|
||||
description: The number of objects.
|
||||
type: int
|
||||
sample: 1
|
||||
read_ACL:
|
||||
description: The ACL that grants read access. If not set, this header is
|
||||
not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
storage_policy:
|
||||
description: Storage policy used by the container. It is not possible to
|
||||
change policy of an existing container.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
sync_key:
|
||||
description: The secret key for container synchronization. If not set,
|
||||
this header is not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
sync_to:
|
||||
description: The destination for container synchronization. If not set,
|
||||
this header is not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
timestamp:
|
||||
description: The timestamp of the transaction.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
versions_location:
|
||||
description: Enables versioning on this container. The value is the name
|
||||
of another container. You must UTF-8-encode and then
|
||||
URL-encode the name before you include it in the header. To
|
||||
disable versioning, set the header to an empty string.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
write_ACL:
|
||||
description: The ACL that grants write access. If not set, this header is
|
||||
not returned by this operation.
|
||||
Only fetched when searching for a container by name.
|
||||
type: str
|
||||
sample: null
|
||||
"""
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
||||
|
||||
|
||||
class ObjectContainersInfoModule(OpenStackModule):
|
||||
argument_spec = dict(
|
||||
name=dict(aliases=["container"]),
|
||||
prefix=dict(),
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def run(self):
|
||||
if self.params["name"]:
|
||||
containers = [
|
||||
(
|
||||
self.conn.object_store.get_container_metadata(
|
||||
self.params["name"],
|
||||
).to_dict(computed=False)
|
||||
),
|
||||
]
|
||||
else:
|
||||
query = {}
|
||||
if self.params["prefix"]:
|
||||
query["prefix"] = self.params["prefix"]
|
||||
containers = [
|
||||
c.to_dict(computed=False)
|
||||
for c in self.conn.object_store.containers(**query)
|
||||
]
|
||||
self.exit(changed=False, containers=containers)
|
||||
|
||||
|
||||
def main():
|
||||
module = ObjectContainersInfoModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -511,7 +511,7 @@ class PortModule(OpenStackModule):
|
||||
**(dict(network_id=network.id) if network else dict()))
|
||||
|
||||
if self.ansible.check_mode:
|
||||
self.exit_json(changed=self._will_change(network, port, state))
|
||||
self.exit_json(changed=self._will_change(port, state))
|
||||
|
||||
if state == 'present' and not port:
|
||||
# create port
|
||||
|
||||
@@ -181,7 +181,7 @@ class IdentityProjectModule(OpenStackModule):
|
||||
raise ValueError('Duplicate key(s) in extra_specs: {0}'
|
||||
.format(', '.join(list(duplicate_keys))))
|
||||
for k, v in extra_specs.items():
|
||||
if v != project[k]:
|
||||
if k not in project or v != project[k]:
|
||||
attributes[k] = v
|
||||
|
||||
if attributes:
|
||||
|
||||
@@ -38,6 +38,9 @@ options:
|
||||
groups:
|
||||
description: Number of groups that are allowed for the project
|
||||
type: int
|
||||
health_monitors:
|
||||
description: Maximum number of health monitors that can be created.
|
||||
type: int
|
||||
injected_file_content_bytes:
|
||||
description:
|
||||
- Maximum file size in bytes.
|
||||
@@ -61,6 +64,12 @@ options:
|
||||
key_pairs:
|
||||
description: Number of key pairs to allow.
|
||||
type: int
|
||||
l7_policies:
|
||||
description: The maximum amount of L7 policies you can create.
|
||||
type: int
|
||||
listeners:
|
||||
description: The maximum number of listeners you can create.
|
||||
type: int
|
||||
load_balancers:
|
||||
description: The maximum amount of load balancers you can create
|
||||
type: int
|
||||
@@ -68,6 +77,9 @@ options:
|
||||
metadata_items:
|
||||
description: Number of metadata items allowed per instance.
|
||||
type: int
|
||||
members:
|
||||
description: Number of members allowed for loadbalancer.
|
||||
type: int
|
||||
name:
|
||||
description: Name of the OpenStack Project to manage.
|
||||
required: true
|
||||
@@ -227,6 +239,33 @@ quotas:
|
||||
server_groups:
|
||||
description: Number of server groups to allow.
|
||||
type: int
|
||||
load_balancer:
|
||||
description: Load_balancer service quotas
|
||||
type: dict
|
||||
contains:
|
||||
health_monitors:
|
||||
description: Maximum number of health monitors that can be
|
||||
created.
|
||||
type: int
|
||||
l7_policies:
|
||||
description: The maximum amount of L7 policies you can
|
||||
create.
|
||||
type: int
|
||||
listeners:
|
||||
description: The maximum number of listeners you can create
|
||||
type: int
|
||||
load_balancers:
|
||||
description: The maximum amount of load balancers one can
|
||||
create
|
||||
type: int
|
||||
members:
|
||||
description: The maximum amount of members for
|
||||
loadbalancer.
|
||||
type: int
|
||||
pools:
|
||||
description: The maximum amount of pools one can create.
|
||||
type: int
|
||||
|
||||
network:
|
||||
description: Network service quotas
|
||||
type: dict
|
||||
@@ -234,16 +273,9 @@ quotas:
|
||||
floating_ips:
|
||||
description: Number of floating IP's to allow.
|
||||
type: int
|
||||
load_balancers:
|
||||
description: The maximum amount of load balancers one can
|
||||
create
|
||||
type: int
|
||||
networks:
|
||||
description: Number of networks to allow.
|
||||
type: int
|
||||
pools:
|
||||
description: The maximum amount of pools one can create.
|
||||
type: int
|
||||
ports:
|
||||
description: Number of Network ports to allow, this needs
|
||||
to be greater than the instances limit.
|
||||
@@ -312,9 +344,7 @@ quotas:
|
||||
server_groups: 10,
|
||||
network:
|
||||
floating_ips: 50,
|
||||
load_balancers: 10,
|
||||
networks: 10,
|
||||
pools: 10,
|
||||
ports: 160,
|
||||
rbac_policies: 10,
|
||||
routers: 10,
|
||||
@@ -330,6 +360,13 @@ quotas:
|
||||
per_volume_gigabytes: -1,
|
||||
snapshots: 10,
|
||||
volumes: 10,
|
||||
load_balancer:
|
||||
health_monitors: 10,
|
||||
load_balancers: 10,
|
||||
l7_policies: 10,
|
||||
listeners: 10,
|
||||
pools: 5,
|
||||
members: 5,
|
||||
'''
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
|
||||
@@ -337,9 +374,8 @@ from collections import defaultdict
|
||||
|
||||
|
||||
class QuotaModule(OpenStackModule):
|
||||
# TODO: Add missing network quota options 'check_limit', 'health_monitors',
|
||||
# 'l7_policies', 'listeners' to argument_spec, DOCUMENTATION and
|
||||
# RETURN docstrings
|
||||
# TODO: Add missing network quota options 'check_limit'
|
||||
# to argument_spec, DOCUMENTATION and RETURN docstrings
|
||||
argument_spec = dict(
|
||||
backup_gigabytes=dict(type='int'),
|
||||
backups=dict(type='int'),
|
||||
@@ -350,6 +386,7 @@ class QuotaModule(OpenStackModule):
|
||||
'network_floating_ips']),
|
||||
gigabytes=dict(type='int'),
|
||||
groups=dict(type='int'),
|
||||
health_monitors=dict(type='int'),
|
||||
injected_file_content_bytes=dict(type='int',
|
||||
aliases=['injected_file_size']),
|
||||
injected_file_path_bytes=dict(type='int',
|
||||
@@ -357,8 +394,11 @@ class QuotaModule(OpenStackModule):
|
||||
injected_files=dict(type='int'),
|
||||
instances=dict(type='int'),
|
||||
key_pairs=dict(type='int', no_log=False),
|
||||
l7_policies=dict(type='int'),
|
||||
listeners=dict(type='int'),
|
||||
load_balancers=dict(type='int', aliases=['loadbalancer']),
|
||||
metadata_items=dict(type='int'),
|
||||
members=dict(type='int'),
|
||||
name=dict(required=True),
|
||||
networks=dict(type='int', aliases=['network']),
|
||||
per_volume_gigabytes=dict(type='int'),
|
||||
@@ -382,9 +422,9 @@ class QuotaModule(OpenStackModule):
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
# Some attributes in quota resources don't exist in the api anymore, mostly
|
||||
# compute quotas that were simply network proxies. This map allows marking
|
||||
# them to be skipped.
|
||||
# Some attributes in quota resources don't exist in the api anymore, e.g.
|
||||
# compute quotas that were simply network proxies, and pre-Octavia network
|
||||
# quotas. This map allows marking them to be skipped.
|
||||
exclusion_map = {
|
||||
'compute': {
|
||||
# 'fixed_ips', # Available until Nova API version 2.35
|
||||
@@ -397,7 +437,16 @@ class QuotaModule(OpenStackModule):
|
||||
# 'injected_file_path_bytes', # Nova API
|
||||
# 'injected_files', # version 2.56
|
||||
},
|
||||
'network': {'name'},
|
||||
'load_balancer': {'name'},
|
||||
'network': {
|
||||
'name',
|
||||
'l7_policies',
|
||||
'load_balancers',
|
||||
'loadbalancer',
|
||||
'health_monitors',
|
||||
'pools',
|
||||
'listeners',
|
||||
},
|
||||
'volume': {'name'},
|
||||
}
|
||||
|
||||
@@ -409,12 +458,18 @@ class QuotaModule(OpenStackModule):
|
||||
self.warn('Block storage service aka volume service is not'
|
||||
' supported by your cloud. Ignoring volume quotas.')
|
||||
|
||||
if self.conn.has_service('load-balancer'):
|
||||
quota['load_balancer'] = self.conn.load_balancer.get_quota(
|
||||
project.id)
|
||||
else:
|
||||
self.warn('Loadbalancer service is not supported by your'
|
||||
' cloud. Ignoring loadbalancer quotas.')
|
||||
|
||||
if self.conn.has_service('network'):
|
||||
quota['network'] = self.conn.network.get_quota(project.id)
|
||||
else:
|
||||
self.warn('Network service is not supported by your cloud.'
|
||||
' Ignoring network quotas.')
|
||||
|
||||
quota['compute'] = self.conn.compute.get_quota_set(project.id)
|
||||
|
||||
return quota
|
||||
@@ -452,7 +507,6 @@ class QuotaModule(OpenStackModule):
|
||||
|
||||
# Get current quota values
|
||||
quotas = self._get_quotas(project)
|
||||
|
||||
changed = False
|
||||
|
||||
if self.ansible.check_mode:
|
||||
@@ -468,6 +522,8 @@ class QuotaModule(OpenStackModule):
|
||||
self.conn.network.delete_quota(project.id)
|
||||
if 'volume' in quotas:
|
||||
self.conn.block_storage.revert_quota_set(project)
|
||||
if 'load_balancer' in quotas:
|
||||
self.conn.load_balancer.delete_quota(project.id)
|
||||
|
||||
# Necessary since we can't tell what the default quotas are
|
||||
quotas = self._get_quotas(project)
|
||||
@@ -485,6 +541,10 @@ class QuotaModule(OpenStackModule):
|
||||
if 'network' in changes:
|
||||
quotas['network'] = self.conn.network.update_quota(
|
||||
project.id, **changes['network'])
|
||||
if 'load_balancer' in changes:
|
||||
quotas['load_balancer'] = \
|
||||
self.conn.load_balancer.update_quota(
|
||||
project.id, **changes['load_balancer'])
|
||||
changed = True
|
||||
|
||||
quotas = {k: v.to_dict(computed=False) for k, v in quotas.items()}
|
||||
|
||||
@@ -239,7 +239,11 @@ class DnsRecordsetModule(OpenStackModule):
|
||||
elif self._needs_update(kwargs, recordset):
|
||||
recordset = self.conn.dns.update_recordset(recordset, **kwargs)
|
||||
changed = True
|
||||
self.exit_json(changed=changed, recordset=recordset)
|
||||
# NOTE(gtema): this is a workaround to temporarily bring the
|
||||
# zone_id param back which may not me populated by SDK
|
||||
rs = recordset.to_dict(computed=False)
|
||||
rs["zone_id"] = zone.id
|
||||
self.exit_json(changed=changed, recordset=rs)
|
||||
elif state == 'absent' and recordset is not None:
|
||||
self.conn.dns.delete_recordset(recordset)
|
||||
changed = True
|
||||
|
||||
@@ -19,7 +19,9 @@ options:
|
||||
- Valid only with keystone version 3.
|
||||
- Required if I(project) is not specified.
|
||||
- When I(project) is specified, then I(domain) will not be used for
|
||||
scoping the role association, only for finding resources.
|
||||
scoping the role association, only for finding resources. Deprecated
|
||||
for finding resources, please use I(group_domain), I(project_domain),
|
||||
I(role_domain), or I(user_domain).
|
||||
- "When scoping the role association, I(project) has precedence over
|
||||
I(domain) and I(domain) has precedence over I(system): When I(project)
|
||||
is specified, then I(domain) and I(system) are not used for role
|
||||
@@ -32,24 +34,45 @@ options:
|
||||
- Valid only with keystone version 3.
|
||||
- If I(group) is not specified, then I(user) is required. Both may not be
|
||||
specified at the same time.
|
||||
- You can supply I(group_domain) or the deprecated usage of I(domain) to
|
||||
find group resources.
|
||||
type: str
|
||||
group_domain:
|
||||
description:
|
||||
- Name or ID for the domain.
|
||||
- Valid only with keystone version 3.
|
||||
- Only valid for finding group resources.
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- Name or ID of the project to scope the role association to.
|
||||
- If you are using keystone version 2, then this value is required.
|
||||
- When I(project) is specified, then I(domain) will not be used for
|
||||
scoping the role association, only for finding resources.
|
||||
scoping the role association, only for finding resources. Prefer
|
||||
I(group_domain) over I(domain).
|
||||
- "When scoping the role association, I(project) has precedence over
|
||||
I(domain) and I(domain) has precedence over I(system): When I(project)
|
||||
is specified, then I(domain) and I(system) are not used for role
|
||||
association. When I(domain) is specified, then I(system) will not be
|
||||
used for role association."
|
||||
type: str
|
||||
project_domain:
|
||||
description:
|
||||
- Name or ID for the domain.
|
||||
- Valid only with keystone version 3.
|
||||
- Only valid for finding project resources.
|
||||
type: str
|
||||
role:
|
||||
description:
|
||||
- Name or ID for the role.
|
||||
required: true
|
||||
type: str
|
||||
role_domain:
|
||||
description:
|
||||
- Name or ID for the domain.
|
||||
- Valid only with keystone version 3.
|
||||
- Only valid for finding role resources.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Should the roles be present or absent on the user.
|
||||
@@ -73,6 +96,12 @@ options:
|
||||
- If I(user) is not specified, then I(group) is required. Both may not be
|
||||
specified at the same time.
|
||||
type: str
|
||||
user_domain:
|
||||
description:
|
||||
- Name or ID for the domain.
|
||||
- Valid only with keystone version 3.
|
||||
- Only valid for finding user resources.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
'''
|
||||
@@ -101,11 +130,15 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
||||
argument_spec = dict(
|
||||
domain=dict(),
|
||||
group=dict(),
|
||||
group_domain=dict(type='str'),
|
||||
project=dict(),
|
||||
project_domain=dict(type='str'),
|
||||
role=dict(required=True),
|
||||
role_domain=dict(type='str'),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
system=dict(),
|
||||
user=dict(),
|
||||
user_domain=dict(type='str'),
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
@@ -113,17 +146,33 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
||||
('user', 'group'),
|
||||
('domain', 'project', 'system'),
|
||||
],
|
||||
mutually_exclusive=[
|
||||
('user', 'group'),
|
||||
('project', 'system'), # domain should be part of this
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
def _find_domain_id(self, domain):
|
||||
if domain is not None:
|
||||
domain = self.conn.identity.find_domain(domain,
|
||||
ignore_missing=False)
|
||||
return dict(domain_id=domain['id'])
|
||||
return dict()
|
||||
|
||||
def run(self):
|
||||
filters = {}
|
||||
find_filters = {}
|
||||
kwargs = {}
|
||||
group_find_filters = {}
|
||||
project_find_filters = {}
|
||||
role_find_filters = {}
|
||||
user_find_filters = {}
|
||||
|
||||
role_find_filters.update(self._find_domain_id(
|
||||
self.params['role_domain']))
|
||||
role_name_or_id = self.params['role']
|
||||
role = self.conn.identity.find_role(role_name_or_id,
|
||||
ignore_missing=False)
|
||||
ignore_missing=False,
|
||||
**role_find_filters)
|
||||
filters['role_id'] = role['id']
|
||||
|
||||
domain_name_or_id = self.params['domain']
|
||||
@@ -131,22 +180,31 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
||||
domain = self.conn.identity.find_domain(
|
||||
domain_name_or_id, ignore_missing=False)
|
||||
filters['scope_domain_id'] = domain['id']
|
||||
find_filters['domain_id'] = domain['id']
|
||||
kwargs['domain'] = domain['id']
|
||||
group_find_filters['domain_id'] = domain['id']
|
||||
project_find_filters['domain_id'] = domain['id']
|
||||
user_find_filters['domain_id'] = domain['id']
|
||||
|
||||
user_name_or_id = self.params['user']
|
||||
if user_name_or_id is not None:
|
||||
user_find_filters.update(self._find_domain_id(
|
||||
self.params['user_domain']))
|
||||
user = self.conn.identity.find_user(
|
||||
user_name_or_id, ignore_missing=False, **find_filters)
|
||||
user_name_or_id, ignore_missing=False,
|
||||
**user_find_filters)
|
||||
filters['user_id'] = user['id']
|
||||
kwargs['user'] = user['id']
|
||||
else:
|
||||
user = None
|
||||
|
||||
group_name_or_id = self.params['group']
|
||||
if group_name_or_id is not None:
|
||||
group_find_filters.update(self._find_domain_id(
|
||||
self.params['group_domain']))
|
||||
group = self.conn.identity.find_group(
|
||||
group_name_or_id, ignore_missing=False, **find_filters)
|
||||
group_name_or_id, ignore_missing=False,
|
||||
**group_find_filters)
|
||||
filters['group_id'] = group['id']
|
||||
kwargs['group'] = group['id']
|
||||
else:
|
||||
group = None
|
||||
|
||||
system_name = self.params['system']
|
||||
if system_name is not None:
|
||||
@@ -154,14 +212,14 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
||||
if 'scope_domain_id' not in filters:
|
||||
filters['scope.system'] = system_name
|
||||
|
||||
kwargs['system'] = system_name
|
||||
|
||||
project_name_or_id = self.params['project']
|
||||
if project_name_or_id is not None:
|
||||
project_find_filters.update(self._find_domain_id(
|
||||
self.params['project_domain']))
|
||||
project = self.conn.identity.find_project(
|
||||
project_name_or_id, ignore_missing=False, **find_filters)
|
||||
project_name_or_id, ignore_missing=False,
|
||||
**project_find_filters)
|
||||
filters['scope_project_id'] = project['id']
|
||||
kwargs['project'] = project['id']
|
||||
|
||||
# project has precedence over domain and system
|
||||
filters.pop('scope_domain_id', None)
|
||||
@@ -176,10 +234,50 @@ class IdentityRoleAssignmentModule(OpenStackModule):
|
||||
or (state == 'absent' and role_assignments)))
|
||||
|
||||
if state == 'present' and not role_assignments:
|
||||
self.conn.grant_role(role['id'], **kwargs)
|
||||
if 'scope_domain_id' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.assign_domain_role_to_user(
|
||||
filters['scope_domain_id'], user, role)
|
||||
else:
|
||||
self.conn.identity.assign_domain_role_to_group(
|
||||
filters['scope_domain_id'], group, role)
|
||||
elif 'scope_project_id' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.assign_project_role_to_user(
|
||||
filters['scope_project_id'], user, role)
|
||||
else:
|
||||
self.conn.identity.assign_project_role_to_group(
|
||||
filters['scope_project_id'], group, role)
|
||||
elif 'scope.system' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.assign_system_role_to_user(
|
||||
user, role, filters['scope.system'])
|
||||
else:
|
||||
self.conn.identity.assign_system_role_to_group(
|
||||
group, role, filters['scope.system'])
|
||||
self.exit_json(changed=True)
|
||||
elif state == 'absent' and role_assignments:
|
||||
self.conn.revoke_role(role['id'], **kwargs)
|
||||
if 'scope_domain_id' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.unassign_domain_role_from_user(
|
||||
filters['scope_domain_id'], user, role)
|
||||
else:
|
||||
self.conn.identity.unassign_domain_role_from_group(
|
||||
filters['scope_domain_id'], group, role)
|
||||
elif 'scope_project_id' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.unassign_project_role_from_user(
|
||||
filters['scope_project_id'], user, role)
|
||||
else:
|
||||
self.conn.identity.unassign_project_role_from_group(
|
||||
filters['scope_project_id'], group, role)
|
||||
elif 'scope.system' in filters:
|
||||
if user is not None:
|
||||
self.conn.identity.unassign_system_role_from_user(
|
||||
user, role, filters['scope.system'])
|
||||
else:
|
||||
self.conn.identity.unassign_system_role_from_group(
|
||||
group, role, filters['scope.system'])
|
||||
self.exit_json(changed=True)
|
||||
else:
|
||||
self.exit_json(changed=False)
|
||||
|
||||
@@ -372,6 +372,10 @@ class RouterModule(OpenStackModule):
|
||||
for p in external_fixed_ips:
|
||||
if 'ip_address' in p:
|
||||
req_fip_map[p['subnet_id']].add(p['ip_address'])
|
||||
elif p['subnet_id'] in cur_fip_map:
|
||||
# handle idempotence of updating with no explicit ip
|
||||
req_fip_map[p['subnet_id']].update(
|
||||
cur_fip_map[p['subnet_id']])
|
||||
|
||||
# Check if external ip addresses need to be added
|
||||
for fip in external_fixed_ips:
|
||||
@@ -464,7 +468,7 @@ class RouterModule(OpenStackModule):
|
||||
subnet = self.conn.network.find_subnet(
|
||||
iface['subnet_id'], ignore_missing=False, **filters)
|
||||
fip = dict(subnet_id=subnet.id)
|
||||
if 'ip_address' in iface:
|
||||
if iface.get('ip_address', None) is not None:
|
||||
fip['ip_address'] = iface['ip_address']
|
||||
external_fixed_ips.append(fip)
|
||||
|
||||
|
||||
@@ -205,6 +205,12 @@ options:
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- A list of tags should be added to instance
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
terminate_volume:
|
||||
description:
|
||||
- If C(true), delete volume when deleting the instance and if it has
|
||||
@@ -756,6 +762,7 @@ server:
|
||||
description: A list of associated tags.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
task_state:
|
||||
description: The task state of this server.
|
||||
returned: success
|
||||
@@ -825,6 +832,7 @@ class ServerModule(OpenStackModule):
|
||||
scheduler_hints=dict(type='dict'),
|
||||
security_groups=dict(default=[], type='list', elements='str'),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
tags=dict(type='list', default=[], elements='str'),
|
||||
terminate_volume=dict(default=False, type='bool'),
|
||||
userdata=dict(),
|
||||
volume_size=dict(type='int'),
|
||||
@@ -890,7 +898,8 @@ class ServerModule(OpenStackModule):
|
||||
return {
|
||||
**self._build_update_ips(server),
|
||||
**self._build_update_security_groups(server),
|
||||
**self._build_update_server(server)}
|
||||
**self._build_update_server(server),
|
||||
**self._build_update_tags(server)}
|
||||
|
||||
def _build_update_ips(self, server):
|
||||
auto_ip = self.params['auto_ip']
|
||||
@@ -1030,6 +1039,13 @@ class ServerModule(OpenStackModule):
|
||||
|
||||
return update
|
||||
|
||||
def _build_update_tags(self, server):
|
||||
required_tags = self.params.get('tags')
|
||||
if set(server["tags"]) == set(required_tags):
|
||||
return {}
|
||||
update = dict(tags=required_tags)
|
||||
return update
|
||||
|
||||
def _create(self):
|
||||
for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
|
||||
if self.params[k] \
|
||||
@@ -1072,7 +1088,7 @@ class ServerModule(OpenStackModule):
|
||||
for k in ['auto_ip', 'availability_zone', 'boot_from_volume',
|
||||
'boot_volume', 'config_drive', 'description', 'key_name',
|
||||
'name', 'network', 'reuse_ips', 'scheduler_hints',
|
||||
'security_groups', 'terminate_volume', 'timeout',
|
||||
'security_groups', 'tags', 'terminate_volume', 'timeout',
|
||||
'userdata', 'volume_size', 'volumes', 'wait']:
|
||||
if self.params[k] is not None:
|
||||
args[k] = self.params[k]
|
||||
@@ -1104,6 +1120,7 @@ class ServerModule(OpenStackModule):
|
||||
def _update(self, server, update):
|
||||
server = self._update_ips(server, update)
|
||||
server = self._update_security_groups(server, update)
|
||||
server = self._update_tags(server, update)
|
||||
server = self._update_server(server, update)
|
||||
# Refresh server attributes after security groups etc. have changed
|
||||
#
|
||||
@@ -1176,6 +1193,16 @@ class ServerModule(OpenStackModule):
|
||||
# be postponed until all updates have been applied.
|
||||
return server
|
||||
|
||||
def _update_tags(self, server, update):
|
||||
tags = update.get('tags')
|
||||
|
||||
self.conn.compute.put(
|
||||
"/servers/{server_id}/tags".format(server_id=server['id']),
|
||||
json={"tags": tags},
|
||||
microversion="2.26"
|
||||
)
|
||||
return server
|
||||
|
||||
def _parse_metadata(self, metadata):
|
||||
if not metadata:
|
||||
return {}
|
||||
|
||||
@@ -136,6 +136,9 @@ class ServerActionModule(OpenStackModule):
|
||||
# rebuild does not depend on state
|
||||
will_change = (
|
||||
(action == 'rebuild')
|
||||
# `reboot_*` actions do not change state, servers remain `ACTIVE`
|
||||
or (action == 'reboot_hard')
|
||||
or (action == 'reboot_soft')
|
||||
or (action == 'lock' and not server['is_locked'])
|
||||
or (action == 'unlock' and server['is_locked'])
|
||||
or server.status.lower() not in [a.lower()
|
||||
|
||||
520
plugins/modules/share_type.py
Normal file
520
plugins/modules/share_type.py
Normal file
@@ -0,0 +1,520 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025 VEXXHOST, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: share_type
|
||||
short_description: Manage OpenStack share type
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Add, remove or update share types in OpenStack Manila.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Share type name or id.
|
||||
- For private share types, the UUID must be used instead of name.
|
||||
required: true
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the share type.
|
||||
type: str
|
||||
extra_specs:
|
||||
description:
|
||||
- Dictionary of share type extra specifications
|
||||
type: dict
|
||||
is_public:
|
||||
description:
|
||||
- Make share type accessible to the public.
|
||||
- Can be updated after creation using Manila API direct updates.
|
||||
type: bool
|
||||
default: true
|
||||
driver_handles_share_servers:
|
||||
description:
|
||||
- Boolean flag indicating whether share servers are managed by the driver.
|
||||
- Required for share type creation.
|
||||
- This is automatically added to extra_specs as 'driver_handles_share_servers'.
|
||||
type: bool
|
||||
default: true
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Delete share type by name
|
||||
openstack.cloud.share_type:
|
||||
name: test_share_type
|
||||
state: absent
|
||||
|
||||
- name: Delete share type by id
|
||||
openstack.cloud.share_type:
|
||||
name: fbadfa6b-5f17-4c26-948e-73b94de57b42
|
||||
state: absent
|
||||
|
||||
- name: Create share type
|
||||
openstack.cloud.share_type:
|
||||
name: manila-generic-share
|
||||
state: present
|
||||
driver_handles_share_servers: true
|
||||
extra_specs:
|
||||
share_backend_name: GENERIC_BACKEND
|
||||
snapshot_support: true
|
||||
create_share_from_snapshot_support: true
|
||||
description: Generic share type
|
||||
is_public: true
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
share_type:
|
||||
description: Dictionary describing share type
|
||||
returned: On success when I(state) is 'present'
|
||||
type: dict
|
||||
contains:
|
||||
name:
|
||||
description: share type name
|
||||
returned: success
|
||||
type: str
|
||||
sample: manila-generic-share
|
||||
extra_specs:
|
||||
description: share type extra specifications
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {"share_backend_name": "GENERIC_BACKEND", "snapshot_support": "true"}
|
||||
is_public:
|
||||
description: whether the share type is public
|
||||
returned: success
|
||||
type: bool
|
||||
sample: True
|
||||
description:
|
||||
description: share type description
|
||||
returned: success
|
||||
type: str
|
||||
sample: Generic share type
|
||||
driver_handles_share_servers:
|
||||
description: whether driver handles share servers
|
||||
returned: success
|
||||
type: bool
|
||||
sample: true
|
||||
id:
|
||||
description: share type uuid
|
||||
returned: success
|
||||
type: str
|
||||
sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
|
||||
"""
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
||||
OpenStackModule,
|
||||
)
|
||||
|
||||
# Manila API microversion 2.50 provides complete share type information
|
||||
# including is_default field and description
|
||||
# Reference: https://docs.openstack.org/api-ref/shared-file-system/#show-share-type-detail
|
||||
MANILA_MICROVERSION = "2.50"
|
||||
|
||||
|
||||
class ShareTypeModule(OpenStackModule):
|
||||
argument_spec = dict(
|
||||
name=dict(type="str", required=True),
|
||||
description=dict(type="str", required=False),
|
||||
extra_specs=dict(type="dict", required=False),
|
||||
is_public=dict(type="bool", default=True),
|
||||
driver_handles_share_servers=dict(type="bool", default=True),
|
||||
state=dict(type="str", default="present", choices=["absent", "present"]),
|
||||
)
|
||||
module_kwargs = dict(
|
||||
required_if=[("state", "present", ["driver_handles_share_servers"])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _extract_result(details):
|
||||
if details is not None:
|
||||
if hasattr(details, "to_dict"):
|
||||
result = details.to_dict(computed=False)
|
||||
elif isinstance(details, dict):
|
||||
result = details.copy()
|
||||
else:
|
||||
result = dict(details) if details else {}
|
||||
|
||||
# Normalize is_public field from API response
|
||||
if result and "os-share-type-access:is_public" in result:
|
||||
result["is_public"] = result["os-share-type-access:is_public"]
|
||||
elif result and "share_type_access:is_public" in result:
|
||||
result["is_public"] = result["share_type_access:is_public"]
|
||||
|
||||
return result
|
||||
return {}
|
||||
|
||||
def _find_share_type(self, name_or_id):
|
||||
"""
|
||||
Find share type by name or ID with comprehensive information.
|
||||
|
||||
Uses direct Manila API calls since SDK methods are not available.
|
||||
Handles both public and private share types.
|
||||
"""
|
||||
# Try direct access first for complete information
|
||||
share_type = self._find_by_direct_access(name_or_id)
|
||||
if share_type:
|
||||
return share_type
|
||||
|
||||
# If direct access fails, try searching in public listing
|
||||
# This handles cases where we have the name but need to find the ID
|
||||
try:
|
||||
response = self.conn.shared_file_system.get("/types")
|
||||
share_types = response.json().get("share_types", [])
|
||||
|
||||
for share_type in share_types:
|
||||
if share_type["name"] == name_or_id or share_type["id"] == name_or_id:
|
||||
# Found by name, now get complete info using the ID
|
||||
result = self._find_by_direct_access(share_type["id"])
|
||||
if result:
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _find_by_direct_access(self, name_or_id):
|
||||
"""
|
||||
Find share type by direct access using Manila API.
|
||||
|
||||
Uses microversion to get complete information including description and is_default.
|
||||
Falls back to basic API if microversion is not supported.
|
||||
"""
|
||||
# Try with microversion first for complete information
|
||||
try:
|
||||
response = self.conn.shared_file_system.get(
|
||||
f"/types/{name_or_id}", microversion=MANILA_MICROVERSION
|
||||
)
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
if share_type_data:
|
||||
return share_type_data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback: try without microversion for basic information
|
||||
try:
|
||||
response = self.conn.shared_file_system.get(f"/types/{name_or_id}")
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
if share_type_data:
|
||||
return share_type_data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def run(self):
|
||||
state = self.params["state"]
|
||||
name_or_id = self.params["name"]
|
||||
|
||||
# Find existing share type (similar to volume_type.py pattern)
|
||||
share_type = self._find_share_type(name_or_id)
|
||||
|
||||
if self.ansible.check_mode:
|
||||
self.exit_json(changed=self._will_change(state, share_type))
|
||||
|
||||
if state == "present" and not share_type:
|
||||
# Create type
|
||||
create_result = self._create()
|
||||
share_type = self._extract_result(create_result)
|
||||
self.exit_json(changed=True, share_type=share_type)
|
||||
|
||||
elif state == "present" and share_type:
|
||||
# Update type
|
||||
update = self._build_update(share_type)
|
||||
update_result = self._update(share_type, update)
|
||||
share_type = self._extract_result(update_result)
|
||||
self.exit_json(changed=bool(update), share_type=share_type)
|
||||
|
||||
elif state == "absent" and share_type:
|
||||
# Delete type
|
||||
self._delete(share_type)
|
||||
self.exit_json(changed=True)
|
||||
|
||||
else:
|
||||
# state == 'absent' and not share_type
|
||||
self.exit_json(changed=False)
|
||||
|
||||
def _build_update(self, share_type):
|
||||
return {
|
||||
**self._build_update_extra_specs(share_type),
|
||||
**self._build_update_share_type(share_type),
|
||||
}
|
||||
|
||||
def _build_update_extra_specs(self, share_type):
|
||||
update = {}
|
||||
|
||||
old_extra_specs = share_type.get("extra_specs", {})
|
||||
|
||||
# Build the complete new extra specs including driver_handles_share_servers
|
||||
new_extra_specs = {}
|
||||
|
||||
# Add driver_handles_share_servers (always required)
|
||||
if self.params.get("driver_handles_share_servers") is not None:
|
||||
new_extra_specs["driver_handles_share_servers"] = str(
|
||||
self.params["driver_handles_share_servers"]
|
||||
).title()
|
||||
|
||||
# Add user-defined extra specs
|
||||
if self.params.get("extra_specs"):
|
||||
new_extra_specs.update(
|
||||
{k: str(v) for k, v in self.params["extra_specs"].items()}
|
||||
)
|
||||
|
||||
delete_extra_specs_keys = set(old_extra_specs.keys()) - set(
|
||||
new_extra_specs.keys()
|
||||
)
|
||||
|
||||
if delete_extra_specs_keys:
|
||||
update["delete_extra_specs_keys"] = delete_extra_specs_keys
|
||||
|
||||
if old_extra_specs != new_extra_specs:
|
||||
update["create_extra_specs"] = new_extra_specs
|
||||
|
||||
return update
|
||||
|
||||
def _build_update_share_type(self, share_type):
|
||||
update = {}
|
||||
# Only allow description updates - name is used for identification
|
||||
allowed_attributes = ["description"]
|
||||
|
||||
# Handle is_public updates - CLI supports this, so we should too
|
||||
# Always check is_public since it has a default value of True
|
||||
current_is_public = share_type.get(
|
||||
"os-share-type-access:is_public",
|
||||
share_type.get("share_type_access:is_public"),
|
||||
)
|
||||
requested_is_public = self.params["is_public"] # Will be True by default now
|
||||
if current_is_public != requested_is_public:
|
||||
# Mark this as needing a special access update
|
||||
update["update_access"] = {
|
||||
"is_public": requested_is_public,
|
||||
"share_type_id": share_type.get("id"),
|
||||
}
|
||||
|
||||
type_attributes = {
|
||||
k: self.params[k]
|
||||
for k in allowed_attributes
|
||||
if k in self.params
|
||||
and self.params.get(k) is not None
|
||||
and self.params.get(k) != share_type.get(k)
|
||||
}
|
||||
|
||||
if type_attributes:
|
||||
update["type_attributes"] = type_attributes
|
||||
|
||||
return update
|
||||
|
||||
def _create(self):
|
||||
share_type_attrs = {"name": self.params["name"]}
|
||||
|
||||
if self.params.get("description") is not None:
|
||||
share_type_attrs["description"] = self.params["description"]
|
||||
|
||||
# Handle driver_handles_share_servers - this is the key required parameter
|
||||
extra_specs = {}
|
||||
if self.params.get("driver_handles_share_servers") is not None:
|
||||
extra_specs["driver_handles_share_servers"] = str(
|
||||
self.params["driver_handles_share_servers"]
|
||||
).title()
|
||||
|
||||
# Add user-defined extra specs
|
||||
if self.params.get("extra_specs"):
|
||||
extra_specs.update(
|
||||
{k: str(v) for k, v in self.params["extra_specs"].items()}
|
||||
)
|
||||
|
||||
if extra_specs:
|
||||
share_type_attrs["extra_specs"] = extra_specs
|
||||
|
||||
# Handle is_public parameter - field name depends on API version
|
||||
if self.params.get("is_public") is not None:
|
||||
# For microversion (API 2.7+), use share_type_access:is_public
|
||||
# For older versions, use os-share-type-access:is_public
|
||||
share_type_attrs["share_type_access:is_public"] = self.params["is_public"]
|
||||
# Also include legacy field for compatibility
|
||||
share_type_attrs["os-share-type-access:is_public"] = self.params[
|
||||
"is_public"
|
||||
]
|
||||
|
||||
try:
|
||||
payload = {"share_type": share_type_attrs}
|
||||
|
||||
# Try with microversion first (supports share_type_access:is_public)
|
||||
try:
|
||||
response = self.conn.shared_file_system.post(
|
||||
"/types", json=payload, microversion=MANILA_MICROVERSION
|
||||
)
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
except Exception:
|
||||
# Fallback: try without microversion (uses os-share-type-access:is_public)
|
||||
# Remove the newer field name for older API compatibility
|
||||
if "share_type_access:is_public" in share_type_attrs:
|
||||
del share_type_attrs["share_type_access:is_public"]
|
||||
payload = {"share_type": share_type_attrs}
|
||||
response = self.conn.shared_file_system.post("/types", json=payload)
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
|
||||
return share_type_data
|
||||
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to create share type: {str(e)}")
|
||||
|
||||
def _delete(self, share_type):
|
||||
# Use direct API call since SDK method may not exist
|
||||
try:
|
||||
share_type_id = (
|
||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
||||
)
|
||||
# Try with microversion first, fallback if not supported
|
||||
try:
|
||||
self.conn.shared_file_system.delete(
|
||||
f"/types/{share_type_id}", microversion=MANILA_MICROVERSION
|
||||
)
|
||||
except Exception:
|
||||
self.conn.shared_file_system.delete(f"/types/{share_type_id}")
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to delete share type: {str(e)}")
|
||||
|
||||
def _update(self, share_type, update):
|
||||
if not update:
|
||||
return share_type
|
||||
share_type = self._update_share_type(share_type, update)
|
||||
share_type = self._update_extra_specs(share_type, update)
|
||||
share_type = self._update_access(share_type, update)
|
||||
return share_type
|
||||
|
||||
def _update_extra_specs(self, share_type, update):
|
||||
share_type_id = (
|
||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
||||
)
|
||||
|
||||
delete_extra_specs_keys = update.get("delete_extra_specs_keys")
|
||||
if delete_extra_specs_keys:
|
||||
for key in delete_extra_specs_keys:
|
||||
try:
|
||||
# Try with microversion first, fallback if not supported
|
||||
try:
|
||||
self.conn.shared_file_system.delete(
|
||||
f"/types/{share_type_id}/extra_specs/{key}",
|
||||
microversion=MANILA_MICROVERSION,
|
||||
)
|
||||
except Exception:
|
||||
self.conn.shared_file_system.delete(
|
||||
f"/types/{share_type_id}/extra_specs/{key}"
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to delete extra spec '{key}': {str(e)}")
|
||||
# refresh share_type information
|
||||
share_type = self._find_share_type(share_type_id)
|
||||
|
||||
create_extra_specs = update.get("create_extra_specs")
|
||||
if create_extra_specs:
|
||||
# Convert values to strings as Manila API expects string values
|
||||
string_specs = {k: str(v) for k, v in create_extra_specs.items()}
|
||||
try:
|
||||
# Try with microversion first, fallback if not supported
|
||||
try:
|
||||
self.conn.shared_file_system.post(
|
||||
f"/types/{share_type_id}/extra_specs",
|
||||
json={"extra_specs": string_specs},
|
||||
microversion=MANILA_MICROVERSION,
|
||||
)
|
||||
except Exception:
|
||||
self.conn.shared_file_system.post(
|
||||
f"/types/{share_type_id}/extra_specs",
|
||||
json={"extra_specs": string_specs},
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to update extra specs: {str(e)}")
|
||||
# refresh share_type information
|
||||
share_type = self._find_share_type(share_type_id)
|
||||
|
||||
return share_type
|
||||
|
||||
def _update_access(self, share_type, update):
|
||||
"""Update share type access (public/private) using direct API update"""
|
||||
access_update = update.get("update_access")
|
||||
if not access_update:
|
||||
return share_type
|
||||
|
||||
share_type_id = access_update["share_type_id"]
|
||||
is_public = access_update["is_public"]
|
||||
|
||||
try:
|
||||
# Use direct update with share_type_access:is_public (works for both public and private)
|
||||
update_payload = {"share_type": {"share_type_access:is_public": is_public}}
|
||||
|
||||
try:
|
||||
self.conn.shared_file_system.put(
|
||||
f"/types/{share_type_id}",
|
||||
json=update_payload,
|
||||
microversion=MANILA_MICROVERSION,
|
||||
)
|
||||
except Exception:
|
||||
# Fallback: try with legacy field name for older API versions
|
||||
update_payload = {
|
||||
"share_type": {"os-share-type-access:is_public": is_public}
|
||||
}
|
||||
self.conn.shared_file_system.put(
|
||||
f"/types/{share_type_id}", json=update_payload
|
||||
)
|
||||
|
||||
# Refresh share type information after access change
|
||||
share_type = self._find_share_type(share_type_id)
|
||||
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to update share type access: {str(e)}")
|
||||
|
||||
return share_type
|
||||
|
||||
def _update_share_type(self, share_type, update):
|
||||
type_attributes = update.get("type_attributes")
|
||||
if type_attributes:
|
||||
share_type_id = (
|
||||
share_type.get("id") if isinstance(share_type, dict) else share_type.id
|
||||
)
|
||||
try:
|
||||
# Try with microversion first, fallback if not supported
|
||||
try:
|
||||
response = self.conn.shared_file_system.put(
|
||||
f"/types/{share_type_id}",
|
||||
json={"share_type": type_attributes},
|
||||
microversion=MANILA_MICROVERSION,
|
||||
)
|
||||
except Exception:
|
||||
response = self.conn.shared_file_system.put(
|
||||
f"/types/{share_type_id}", json={"share_type": type_attributes}
|
||||
)
|
||||
updated_type = response.json().get("share_type", {})
|
||||
return updated_type
|
||||
except Exception as e:
|
||||
self.fail_json(msg=f"Failed to update share type: {str(e)}")
|
||||
return share_type
|
||||
|
||||
def _will_change(self, state, share_type):
|
||||
if state == "present" and not share_type:
|
||||
return True
|
||||
if state == "present" and share_type:
|
||||
return bool(self._build_update(share_type))
|
||||
if state == "absent" and share_type:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
module = ShareTypeModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
239
plugins/modules/share_type_info.py
Normal file
239
plugins/modules/share_type_info.py
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025 VEXXHOST, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: share_type_info
|
||||
short_description: Get OpenStack share type details
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Get share type details in OpenStack Manila.
|
||||
- Get share type access details for private share types.
|
||||
- Uses Manila API microversion 2.50 to retrieve complete share type information including is_default field.
|
||||
- Safely falls back to basic information if microversion 2.50 is not supported by the backend.
|
||||
- Private share types can only be accessed by UUID.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Share type name or id.
|
||||
- For private share types, the UUID must be used instead of name.
|
||||
required: true
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Get share type details
|
||||
openstack.cloud.share_type_info:
|
||||
name: manila-generic-share
|
||||
|
||||
- name: Get share type details by id
|
||||
openstack.cloud.share_type_info:
|
||||
name: fbadfa6b-5f17-4c26-948e-73b94de57b42
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
share_type:
|
||||
description: Dictionary describing share type
|
||||
returned: On success
|
||||
type: dict
|
||||
contains:
|
||||
id:
|
||||
description: share type uuid
|
||||
returned: success
|
||||
type: str
|
||||
sample: 59575cfc-3582-4efc-8eee-f47fcb25ea6b
|
||||
name:
|
||||
description: share type name
|
||||
returned: success
|
||||
type: str
|
||||
sample: default
|
||||
description:
|
||||
description:
|
||||
- share type description
|
||||
- Available when Manila API microversion 2.50 is supported
|
||||
- Falls back to empty string if microversion is not available
|
||||
returned: success
|
||||
type: str
|
||||
sample: "Default Manila share type"
|
||||
is_default:
|
||||
description:
|
||||
- whether this is the default share type
|
||||
- Retrieved from the API response when microversion 2.50 is supported
|
||||
- Falls back to null if microversion is not available or field is not present
|
||||
returned: success
|
||||
type: bool
|
||||
sample: true
|
||||
is_public:
|
||||
description: whether the share type is public (true) or private (false)
|
||||
returned: success
|
||||
type: bool
|
||||
sample: true
|
||||
required_extra_specs:
|
||||
description: Required extra specifications for the share type
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {"driver_handles_share_servers": "True"}
|
||||
optional_extra_specs:
|
||||
description: Optional extra specifications for the share type
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {"snapshot_support": "True", "create_share_from_snapshot_support": "True"}
|
||||
"""
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
||||
OpenStackModule,
|
||||
)
|
||||
|
||||
# Manila API microversion 2.50 provides complete share type information
|
||||
# including is_default field and description
|
||||
# Reference: https://docs.openstack.org/api-ref/shared-file-system/#show-share-type-detail
|
||||
MANILA_MICROVERSION = "2.50"
|
||||
|
||||
|
||||
class ShareTypeInfoModule(OpenStackModule):
|
||||
argument_spec = dict(name=dict(type="str", required=True))
|
||||
module_kwargs = dict(
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(ShareTypeInfoModule, self).__init__(**kwargs)
|
||||
|
||||
def _find_share_type(self, name_or_id):
|
||||
"""
|
||||
Find share type by name or ID with comprehensive information.
|
||||
"""
|
||||
share_type = self._find_by_direct_access(name_or_id)
|
||||
if share_type:
|
||||
return share_type
|
||||
|
||||
# If direct access fails, try searching in public listing
|
||||
# This handles cases where we have the name but need to find the ID
|
||||
try:
|
||||
response = self.conn.shared_file_system.get("/types")
|
||||
share_types = response.json().get("share_types", [])
|
||||
|
||||
for share_type in share_types:
|
||||
if share_type["name"] == name_or_id or share_type["id"] == name_or_id:
|
||||
# Found by name, now get complete info using the ID
|
||||
result = self._find_by_direct_access(share_type["id"])
|
||||
if result:
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _find_by_direct_access(self, name_or_id):
|
||||
"""
|
||||
Find share type by direct access (for private share types).
|
||||
"""
|
||||
try:
|
||||
response = self.conn.shared_file_system.get(
|
||||
f"/types/{name_or_id}", microversion=MANILA_MICROVERSION
|
||||
)
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
if share_type_data:
|
||||
return share_type_data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback: try without microversion for basic information
|
||||
try:
|
||||
response = self.conn.shared_file_system.get(f"/types/{name_or_id}")
|
||||
share_type_data = response.json().get("share_type", {})
|
||||
if share_type_data:
|
||||
return share_type_data
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _normalize_share_type_dict(self, share_type_dict):
|
||||
"""
|
||||
Normalize share type dictionary to match CLI output format.
|
||||
"""
|
||||
# Extract extra specs information
|
||||
extra_specs = share_type_dict.get("extra_specs", {})
|
||||
required_extra_specs = share_type_dict.get("required_extra_specs", {})
|
||||
|
||||
# Optional extra specs are those in extra_specs but not in required_extra_specs
|
||||
optional_extra_specs = {
|
||||
key: value
|
||||
for key, value in extra_specs.items()
|
||||
if key not in required_extra_specs
|
||||
}
|
||||
|
||||
# Determine if this is the default share type
|
||||
# Use the is_default field from API response (available with microversion 2.50)
|
||||
# If not available (older API versions), default to None
|
||||
is_default = share_type_dict.get("is_default", None)
|
||||
|
||||
# Handle the description field - available through microversion 2.50
|
||||
# Convert None to empty string if API returns null
|
||||
description = share_type_dict.get("description") or ""
|
||||
|
||||
# Determine visibility - check both new and legacy field names
|
||||
# Use the same logic as share_type.py for consistency
|
||||
is_public = share_type_dict.get(
|
||||
"os-share-type-access:is_public",
|
||||
share_type_dict.get("share_type_access:is_public"),
|
||||
)
|
||||
|
||||
# Build the normalized dictionary matching CLI output
|
||||
normalized = {
|
||||
"id": share_type_dict.get("id"),
|
||||
"name": share_type_dict.get("name"),
|
||||
"is_public": is_public,
|
||||
"is_default": is_default,
|
||||
"required_extra_specs": required_extra_specs,
|
||||
"optional_extra_specs": optional_extra_specs,
|
||||
"description": description,
|
||||
}
|
||||
|
||||
return normalized
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Main execution method following OpenStackModule pattern.
|
||||
|
||||
Retrieves share type information using Manila API microversion for complete
|
||||
details including description and is_default fields. Falls back gracefully to
|
||||
basic API calls if microversion is not supported by the backend.
|
||||
"""
|
||||
name_or_id = self.params["name"]
|
||||
|
||||
share_type = self._find_share_type(name_or_id)
|
||||
if not share_type:
|
||||
self.fail_json(
|
||||
msg=f"Share type '{name_or_id}' not found. "
|
||||
f"If this is a private share type, use its UUID instead of name."
|
||||
)
|
||||
|
||||
if hasattr(share_type, "to_dict"):
|
||||
share_type_dict = share_type.to_dict()
|
||||
elif isinstance(share_type, dict):
|
||||
share_type_dict = share_type
|
||||
else:
|
||||
share_type_dict = dict(share_type) if share_type else {}
|
||||
|
||||
# Normalize the output to match CLI format
|
||||
normalized_share_type = self._normalize_share_type_dict(share_type_dict)
|
||||
|
||||
# Return results in the standard format
|
||||
result = dict(changed=False, share_type=normalized_share_type)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
module = ShareTypeInfoModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -229,8 +229,10 @@ class StackInfoModule(OpenStackModule):
|
||||
if self.params[k] is not None:
|
||||
kwargs[k] = self.params[k]
|
||||
|
||||
stacks = [stack.to_dict(computed=False)
|
||||
for stack in self.conn.orchestration.stacks(**kwargs)]
|
||||
stacks = []
|
||||
for stack in self.conn.orchestration.stacks(**kwargs):
|
||||
stack_obj = self.conn.orchestration.get_stack(stack.id)
|
||||
stacks.append(stack_obj.to_dict(computed=False))
|
||||
|
||||
self.exit_json(changed=False, stacks=stacks)
|
||||
|
||||
|
||||
@@ -115,6 +115,10 @@ options:
|
||||
- Required when I(state) is 'present'
|
||||
aliases: ['network_name']
|
||||
type: str
|
||||
network_segment:
|
||||
description:
|
||||
- Name or id of the network segment to which the subnet should be associated
|
||||
type: str
|
||||
project:
|
||||
description:
|
||||
- Project name or ID containing the subnet (name admin-only)
|
||||
@@ -294,6 +298,7 @@ class SubnetModule(OpenStackModule):
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
network=dict(aliases=['network_name']),
|
||||
network_segment=dict(),
|
||||
cidr=dict(),
|
||||
description=dict(),
|
||||
ip_version=dict(type='int', default=4, choices=[4, 6]),
|
||||
@@ -369,9 +374,11 @@ class SubnetModule(OpenStackModule):
|
||||
return [dict(start=pool_start, end=pool_end)]
|
||||
return None
|
||||
|
||||
def _build_params(self, network, project, subnet_pool):
|
||||
def _build_params(self, network, segment, project, subnet_pool):
|
||||
params = {attr: self.params[attr] for attr in self.attr_params}
|
||||
params['network_id'] = network.id
|
||||
if segment:
|
||||
params['segment_id'] = segment.id
|
||||
if project:
|
||||
params['project_id'] = project.id
|
||||
if subnet_pool:
|
||||
@@ -382,6 +389,8 @@ class SubnetModule(OpenStackModule):
|
||||
params['allocation_pools'] = self.params['allocation_pools']
|
||||
params = self._add_extra_attrs(params)
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
if self.params['disable_gateway_ip']:
|
||||
params['gateway_ip'] = None
|
||||
return params
|
||||
|
||||
def _build_updates(self, subnet, params):
|
||||
@@ -414,6 +423,7 @@ class SubnetModule(OpenStackModule):
|
||||
def run(self):
|
||||
state = self.params['state']
|
||||
network_name_or_id = self.params['network']
|
||||
network_segment_name_or_id = self.params['network_segment']
|
||||
project_name_or_id = self.params['project']
|
||||
subnet_pool_name_or_id = self.params['subnet_pool']
|
||||
subnet_name = self.params['name']
|
||||
@@ -442,6 +452,13 @@ class SubnetModule(OpenStackModule):
|
||||
**filters)
|
||||
filters['network_id'] = network.id
|
||||
|
||||
segment = None
|
||||
if network_segment_name_or_id:
|
||||
segment = self.conn.network.find_segment(network_segment_name_or_id,
|
||||
ignore_missing=False,
|
||||
**filters)
|
||||
filters['segment_id'] = segment.id
|
||||
|
||||
subnet_pool = None
|
||||
if subnet_pool_name_or_id:
|
||||
subnet_pool = self.conn.network.find_subnet_pool(
|
||||
@@ -458,7 +475,7 @@ class SubnetModule(OpenStackModule):
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
params = self._build_params(network, project, subnet_pool)
|
||||
params = self._build_params(network, segment, project, subnet_pool)
|
||||
if subnet is None:
|
||||
subnet = self.conn.network.create_subnet(**params)
|
||||
changed = True
|
||||
|
||||
110
plugins/modules/trait.py
Normal file
110
plugins/modules/trait.py
Normal file
@@ -0,0 +1,110 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025, ScaleUp Technologies GmbH & Co. KG
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: trait
|
||||
short_description: Add/Delete a trait from OpenStack
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Add or Delete a trait from OpenStack
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- ID/Name of this trait
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Creates a trait with the ID CUSTOM_WINDOWS_SPLA
|
||||
- openstack.cloud.trait:
|
||||
cloud: openstack
|
||||
state: present
|
||||
id: CUSTOM_WINDOWS_SPLA
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
trait:
|
||||
description: Dictionary describing the trait.
|
||||
returned: On success when I(state) is 'present'
|
||||
type: dict
|
||||
contains:
|
||||
id:
|
||||
description: ID of the trait.
|
||||
returned: success
|
||||
type: str
|
||||
'''
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
||||
OpenStackModule)
|
||||
|
||||
|
||||
class TraitModule(OpenStackModule):
|
||||
|
||||
argument_spec = dict(
|
||||
id=dict(required=True),
|
||||
state=dict(default='present',
|
||||
choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def _system_state_change(self, trait):
|
||||
state = self.params['state']
|
||||
if state == 'present' and not trait:
|
||||
return True
|
||||
if state == 'absent' and trait:
|
||||
return True
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
|
||||
state = self.params['state']
|
||||
id = self.params['id']
|
||||
|
||||
try:
|
||||
trait = self.conn.placement.get_trait(id)
|
||||
except self.sdk.exceptions.NotFoundException:
|
||||
trait = None
|
||||
|
||||
if self.ansible.check_mode:
|
||||
self.exit_json(changed=self._system_state_change(trait), trait=trait)
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
if not trait:
|
||||
trait = self.conn.placement.create_trait(id)
|
||||
changed = True
|
||||
|
||||
self.exit_json(
|
||||
changed=changed, trait=trait.to_dict(computed=False))
|
||||
|
||||
elif state == 'absent':
|
||||
if trait:
|
||||
self.conn.placement.delete_trait(id, ignore_missing=False)
|
||||
self.exit_json(changed=True)
|
||||
|
||||
self.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
module = TraitModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
309
plugins/modules/volume_manage.py
Normal file
309
plugins/modules/volume_manage.py
Normal file
@@ -0,0 +1,309 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2025 by Pure Storage, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: volume_manage
|
||||
short_description: Manage/Unmanage Volumes
|
||||
author: OpenStack Ansible SIG
|
||||
description:
|
||||
- Manage or Unmanage Volume in OpenStack.
|
||||
options:
|
||||
description:
|
||||
description:
|
||||
- String describing the volume
|
||||
type: str
|
||||
metadata:
|
||||
description: Metadata for the volume
|
||||
type: dict
|
||||
name:
|
||||
description:
|
||||
- Name of the volume to be unmanaged or
|
||||
the new name of a managed volume
|
||||
- When I(state) is C(absent) this must be
|
||||
the cinder volume ID
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
type: str
|
||||
bootable:
|
||||
description:
|
||||
- Bootable flag for volume.
|
||||
type: bool
|
||||
default: False
|
||||
volume_type:
|
||||
description:
|
||||
- Volume type for volume
|
||||
type: str
|
||||
availability_zone:
|
||||
description:
|
||||
- The availability zone.
|
||||
type: str
|
||||
host:
|
||||
description:
|
||||
- Cinder host on which the existing volume resides
|
||||
- Takes the form "host@backend-name#pool"
|
||||
- Required when I(state) is C(present).
|
||||
type: str
|
||||
source_name:
|
||||
description:
|
||||
- Name of existing volume
|
||||
type: str
|
||||
source_id:
|
||||
description:
|
||||
- Identifier of existing volume
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- openstack.cloud.openstack
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
volume:
|
||||
description: Cinder's representation of the volume object
|
||||
returned: always
|
||||
type: dict
|
||||
contains:
|
||||
attachments:
|
||||
description: Instance attachment information. For a amanaged volume, this
|
||||
will always be empty.
|
||||
type: list
|
||||
availability_zone:
|
||||
description: The name of the availability zone.
|
||||
type: str
|
||||
consistency_group_id:
|
||||
description: The UUID of the consistency group.
|
||||
type: str
|
||||
created_at:
|
||||
description: The date and time when the resource was created.
|
||||
type: str
|
||||
description:
|
||||
description: The volume description.
|
||||
type: str
|
||||
extended_replication_status:
|
||||
description: Extended replication status on this volume.
|
||||
type: str
|
||||
group_id:
|
||||
description: The ID of the group.
|
||||
type: str
|
||||
host:
|
||||
description: The volume's current back-end.
|
||||
type: str
|
||||
id:
|
||||
description: The UUID of the volume.
|
||||
type: str
|
||||
image_id:
|
||||
description: Image on which the volume was based
|
||||
type: str
|
||||
is_bootable:
|
||||
description: Enables or disables the bootable attribute. You can boot an
|
||||
instance from a bootable volume.
|
||||
type: str
|
||||
is_encrypted:
|
||||
description: If true, this volume is encrypted.
|
||||
type: bool
|
||||
is_multiattach:
|
||||
description: Whether this volume can be attached to more than one
|
||||
server.
|
||||
type: bool
|
||||
metadata:
|
||||
description: A metadata object. Contains one or more metadata key and
|
||||
value pairs that are associated with the volume.
|
||||
type: dict
|
||||
migration_id:
|
||||
description: The volume ID that this volume name on the backend is
|
||||
based on.
|
||||
type: str
|
||||
migration_status:
|
||||
description: The status of this volume migration (None means that a
|
||||
migration is not currently in progress).
|
||||
type: str
|
||||
name:
|
||||
description: The volume name.
|
||||
type: str
|
||||
project_id:
|
||||
description: The project ID which the volume belongs to.
|
||||
type: str
|
||||
replication_driver_data:
|
||||
description: Data set by the replication driver
|
||||
type: str
|
||||
replication_status:
|
||||
description: The volume replication status.
|
||||
type: str
|
||||
scheduler_hints:
|
||||
description: Scheduler hints for the volume
|
||||
type: dict
|
||||
size:
|
||||
description: The size of the volume, in gibibytes (GiB).
|
||||
type: int
|
||||
snapshot_id:
|
||||
description: To create a volume from an existing snapshot, specify the
|
||||
UUID of the volume snapshot. The volume is created in same
|
||||
availability zone and with same size as the snapshot.
|
||||
type: str
|
||||
source_volume_id:
|
||||
description: The UUID of the source volume. The API creates a new volume
|
||||
with the same size as the source volume unless a larger size
|
||||
is requested.
|
||||
type: str
|
||||
status:
|
||||
description: The volume status.
|
||||
type: str
|
||||
updated_at:
|
||||
description: The date and time when the resource was updated.
|
||||
type: str
|
||||
user_id:
|
||||
description: The UUID of the user.
|
||||
type: str
|
||||
volume_image_metadata:
|
||||
description: List of image metadata entries. Only included for volumes
|
||||
that were created from an image, or from a snapshot of a
|
||||
volume originally created from an image.
|
||||
type: dict
|
||||
volume_type:
|
||||
description: The associated volume type name for the volume.
|
||||
type: str
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Manage volume
|
||||
openstack.cloud.volume_manage:
|
||||
name: newly-managed-vol
|
||||
source_name: manage-me
|
||||
host: host@backend-name#pool
|
||||
|
||||
- name: Unmanage volume
|
||||
openstack.cloud.volume_manage:
|
||||
name: "5c831866-3bb3-4d67-a7d3-1b90880c9d18"
|
||||
state: absent
|
||||
"""
|
||||
|
||||
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
|
||||
OpenStackModule,
|
||||
)
|
||||
|
||||
|
||||
class VolumeManageModule(OpenStackModule):
|
||||
|
||||
argument_spec = dict(
|
||||
description=dict(type="str"),
|
||||
metadata=dict(type="dict"),
|
||||
source_name=dict(type="str"),
|
||||
source_id=dict(type="str"),
|
||||
availability_zone=dict(type="str"),
|
||||
host=dict(type="str"),
|
||||
bootable=dict(default="false", type="bool"),
|
||||
volume_type=dict(type="str"),
|
||||
name=dict(required=True, type="str"),
|
||||
state=dict(
|
||||
default="present", choices=["absent", "present"], type="str"
|
||||
),
|
||||
)
|
||||
|
||||
module_kwargs = dict(
|
||||
required_if=[("state", "present", ["host"])],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def run(self):
|
||||
name = self.params["name"]
|
||||
state = self.params["state"]
|
||||
changed = False
|
||||
|
||||
if state == "present":
|
||||
changed = True
|
||||
if not self.ansible.check_mode:
|
||||
volumes = self._manage_list()
|
||||
manageable = volumes["manageable-volumes"]
|
||||
safe_to_manage = self._is_safe_to_manage(
|
||||
manageable, self.params["source_name"]
|
||||
)
|
||||
if not safe_to_manage:
|
||||
self.exit_json(changed=False)
|
||||
volume = self._manage()
|
||||
if volume:
|
||||
self.exit_json(
|
||||
changed=changed, volume=volume.to_dict(computed=False)
|
||||
)
|
||||
else:
|
||||
self.exit_json(changed=False)
|
||||
else:
|
||||
self.exit_json(changed=changed)
|
||||
|
||||
else:
|
||||
volume = self.conn.block_storage.find_volume(name)
|
||||
if volume:
|
||||
changed = True
|
||||
if not self.ansible.check_mode:
|
||||
self._unmanage()
|
||||
self.exit_json(changed=changed)
|
||||
else:
|
||||
self.exit_json(changed=changed)
|
||||
|
||||
def _is_safe_to_manage(self, manageable_list, target_name):
|
||||
entry = next(
|
||||
(
|
||||
v
|
||||
for v in manageable_list
|
||||
if isinstance(v.get("reference"), dict)
|
||||
and (
|
||||
v["reference"].get("name") == target_name
|
||||
or v["reference"].get("source-name") == target_name
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if entry is None:
|
||||
return False
|
||||
return entry.get("safe_to_manage", False)
|
||||
|
||||
def _manage(self):
|
||||
kwargs = {
|
||||
key: self.params[key]
|
||||
for key in [
|
||||
"description",
|
||||
"bootable",
|
||||
"volume_type",
|
||||
"availability_zone",
|
||||
"host",
|
||||
"metadata",
|
||||
"name",
|
||||
]
|
||||
if self.params.get(key) is not None
|
||||
}
|
||||
kwargs["ref"] = {}
|
||||
if self.params["source_name"]:
|
||||
kwargs["ref"]["source-name"] = self.params["source_name"]
|
||||
if self.params["source_id"]:
|
||||
kwargs["ref"]["source-id"] = self.params["source_id"]
|
||||
|
||||
volume = self.conn.block_storage.manage_volume(**kwargs)
|
||||
|
||||
return volume
|
||||
|
||||
def _manage_list(self):
|
||||
response = self.conn.block_storage.get(
|
||||
"/manageable_volumes?host=" + self.params["host"],
|
||||
microversion="3.8",
|
||||
)
|
||||
response.raise_for_status()
|
||||
manageable_volumes = response.json()
|
||||
return manageable_volumes
|
||||
|
||||
def _unmanage(self):
|
||||
self.conn.block_storage.unmanage_volume(self.params["name"])
|
||||
|
||||
|
||||
def main():
|
||||
module = VolumeManageModule()
|
||||
module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
setup.py
2
setup.py
@@ -4,6 +4,6 @@
|
||||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
setup_requires=['pbr', 'setuptools'],
|
||||
pbr=True,
|
||||
py_modules=[])
|
||||
|
||||
12
tests/requirements-ansible-2.16.txt
Normal file
12
tests/requirements-ansible-2.16.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
ansible-core>=2.16.0,<2.17.0
|
||||
flake8
|
||||
galaxy-importer
|
||||
openstacksdk
|
||||
pycodestyle
|
||||
pylint
|
||||
rstcheck
|
||||
ruamel.yaml
|
||||
tox
|
||||
voluptuous
|
||||
yamllint
|
||||
setuptools
|
||||
12
tests/requirements-ansible-2.18.txt
Normal file
12
tests/requirements-ansible-2.18.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
ansible-core>=2.18.0,<2.19.0
|
||||
flake8
|
||||
galaxy-importer
|
||||
openstacksdk
|
||||
pycodestyle
|
||||
pylint
|
||||
rstcheck
|
||||
ruamel.yaml
|
||||
tox
|
||||
voluptuous
|
||||
yamllint
|
||||
setuptools
|
||||
@@ -1,31 +0,0 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
|
||||
#
|
||||
# Compat for python2.7
|
||||
#
|
||||
|
||||
# One unittest needs to import builtins via __import__() so we need to have
|
||||
# the string that represents it
|
||||
try:
|
||||
import __builtin__ # noqa
|
||||
except ImportError:
|
||||
BUILTINS = 'builtins'
|
||||
else:
|
||||
BUILTINS = '__builtin__'
|
||||
@@ -1,120 +0,0 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
|
||||
'''
|
||||
Compat module for Python3.x's unittest.mock module
|
||||
'''
|
||||
import sys
|
||||
|
||||
# Python 2.7
|
||||
|
||||
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
|
||||
# is the same as the python3 stdlib mock library
|
||||
|
||||
try:
|
||||
# Allow wildcard import because we really do want to import all of mock's
|
||||
# symbols into this compat shim
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
from unittest.mock import * # noqa
|
||||
except ImportError:
|
||||
# Python 2
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
try:
|
||||
from mock import * # noqa
|
||||
except ImportError:
|
||||
print('You need the mock library installed on python2.x to run tests')
|
||||
|
||||
|
||||
# Prior to 3.4.4, mock_open cannot handle binary read_data
|
||||
if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
|
||||
file_spec = None
|
||||
|
||||
def _iterate_read_data(read_data):
|
||||
# Helper for mock_open:
|
||||
# Retrieve lines from read_data via a generator so that separate calls to
|
||||
# readline, read, and readlines are properly interleaved
|
||||
sep = b'\n' if isinstance(read_data, bytes) else '\n'
|
||||
data_as_list = [li + sep for li in read_data.split(sep)]
|
||||
|
||||
if data_as_list[-1] == sep:
|
||||
# If the last line ended in a newline, the list comprehension will have an
|
||||
# extra entry that's just a newline. Remove this.
|
||||
data_as_list = data_as_list[:-1]
|
||||
else:
|
||||
# If there wasn't an extra newline by itself, then the file being
|
||||
# emulated doesn't have a newline to end the last line remove the
|
||||
# newline that our naive format() added
|
||||
data_as_list[-1] = data_as_list[-1][:-1]
|
||||
|
||||
for line in data_as_list:
|
||||
yield line
|
||||
|
||||
def mock_open(mock=None, read_data=''):
|
||||
"""
|
||||
A helper function to create a mock to replace the use of `open`. It works
|
||||
for `open` called directly or used as a context manager.
|
||||
|
||||
The `mock` argument is the mock object to configure. If `None` (the
|
||||
default) then a `MagicMock` will be created for you, with the API limited
|
||||
to methods or attributes available on standard file handles.
|
||||
|
||||
`read_data` is a string for the `read` methoddline`, and `readlines` of the
|
||||
file handle to return. This is an empty string by default.
|
||||
"""
|
||||
def _readlines_side_effect(*args, **kwargs):
|
||||
if handle.readlines.return_value is not None:
|
||||
return handle.readlines.return_value
|
||||
return list(_data)
|
||||
|
||||
def _read_side_effect(*args, **kwargs):
|
||||
if handle.read.return_value is not None:
|
||||
return handle.read.return_value
|
||||
return type(read_data)().join(_data)
|
||||
|
||||
def _readline_side_effect():
|
||||
if handle.readline.return_value is not None:
|
||||
while True:
|
||||
yield handle.readline.return_value
|
||||
for line in _data:
|
||||
yield line
|
||||
|
||||
global file_spec
|
||||
if file_spec is None:
|
||||
import _io # noqa
|
||||
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
|
||||
|
||||
if mock is None:
|
||||
mock = MagicMock(name='open', spec=open) # noqa
|
||||
|
||||
handle = MagicMock(spec=file_spec) # noqa
|
||||
handle.__enter__.return_value = handle
|
||||
|
||||
_data = _iterate_read_data(read_data)
|
||||
|
||||
handle.write.return_value = None
|
||||
handle.read.return_value = None
|
||||
handle.readline.return_value = None
|
||||
handle.readlines.return_value = None
|
||||
|
||||
handle.read.side_effect = _read_side_effect
|
||||
handle.readline.side_effect = _readline_side_effect()
|
||||
handle.readlines.side_effect = _readlines_side_effect
|
||||
|
||||
mock.return_value = handle
|
||||
return mock
|
||||
@@ -1,36 +0,0 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
|
||||
'''
|
||||
Compat module for Python2.7's unittest module
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
# Allow wildcard import because we really do want to import all of
|
||||
# unittests's symbols into this compat shim
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
if sys.version_info < (2, 7):
|
||||
try:
|
||||
# Need unittest2 on python2.6
|
||||
from unittest2 import * # noqa
|
||||
except ImportError:
|
||||
print('You need unittest2 installed on python2.6.x to run tests')
|
||||
else:
|
||||
from unittest import * # noqa
|
||||
@@ -1,4 +1,5 @@
|
||||
from ansible_collections.openstack.cloud.tests.unit.compat.mock import MagicMock
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@
|
||||
|
||||
import sys
|
||||
import json
|
||||
import unittest
|
||||
|
||||
from contextlib import contextmanager
|
||||
from io import BytesIO, StringIO
|
||||
from ansible_collections.openstack.cloud.tests.unit.compat import unittest
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils._text import to_bytes
|
||||
|
||||
|
||||
385
tests/unit/modules/cloud/openstack/test_baremetal_port_group.py
Normal file
385
tests/unit/modules/cloud/openstack/test_baremetal_port_group.py
Normal file
@@ -0,0 +1,385 @@
|
||||
import importlib.util
|
||||
import json
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
from ansible.module_utils import basic
|
||||
from ansible.module_utils._text import to_bytes
|
||||
|
||||
|
||||
def _load_module_under_test():
|
||||
module_path = Path(__file__).resolve().parents[5] / 'plugins/modules/baremetal_port_group.py'
|
||||
spec = importlib.util.spec_from_file_location('baremetal_port_group', str(module_path))
|
||||
if spec is None or spec.loader is None:
|
||||
raise ImportError('Cannot load baremetal_port_group module for tests')
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
baremetal_port_group = _load_module_under_test()
|
||||
|
||||
|
||||
def set_module_args(args):
|
||||
if '_ansible_remote_tmp' not in args:
|
||||
args['_ansible_remote_tmp'] = '/tmp'
|
||||
if '_ansible_keep_remote_files' not in args:
|
||||
args['_ansible_keep_remote_files'] = False
|
||||
|
||||
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
|
||||
basic._ANSIBLE_ARGS = to_bytes(args)
|
||||
|
||||
|
||||
class AnsibleExitJson(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleFailJson(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def exit_json(*args, **kwargs):
|
||||
if 'changed' not in kwargs:
|
||||
kwargs['changed'] = False
|
||||
raise AnsibleExitJson(kwargs)
|
||||
|
||||
|
||||
def fail_json(*args, **kwargs):
|
||||
kwargs['failed'] = True
|
||||
raise AnsibleFailJson(kwargs)
|
||||
|
||||
|
||||
class ModuleTestCase(unittest.TestCase):
|
||||
mock_module = None
|
||||
mock_sleep = None
|
||||
|
||||
def setUp(self):
|
||||
self.mock_module = patch.multiple(
|
||||
basic.AnsibleModule,
|
||||
exit_json=exit_json,
|
||||
fail_json=fail_json,
|
||||
)
|
||||
self.mock_module.start()
|
||||
self.mock_sleep = patch('time.sleep')
|
||||
self.mock_sleep.start()
|
||||
set_module_args({})
|
||||
self.addCleanup(self.mock_module.stop)
|
||||
self.addCleanup(self.mock_sleep.stop)
|
||||
|
||||
|
||||
class FakePortGroup(dict[str, object]):
|
||||
|
||||
def to_dict(self, computed=False):
|
||||
return dict(self)
|
||||
|
||||
|
||||
class FakeSDK(object):
|
||||
class exceptions:
|
||||
class OpenStackCloudException(Exception):
|
||||
pass
|
||||
|
||||
class ResourceNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TestBaremetalPortGroup(ModuleTestCase):
|
||||
module = baremetal_port_group
|
||||
|
||||
def setUp(self):
|
||||
super(TestBaremetalPortGroup, self).setUp()
|
||||
self.module = baremetal_port_group
|
||||
|
||||
def _run_module(self, module_args, baremetal):
|
||||
set_module_args(module_args)
|
||||
conn = mock.Mock()
|
||||
conn.baremetal = baremetal
|
||||
with mock.patch.object(
|
||||
baremetal_port_group.BaremetalPortGroupModule,
|
||||
'openstack_cloud_from_module',
|
||||
return_value=(FakeSDK(), conn),
|
||||
):
|
||||
self.module.main()
|
||||
|
||||
def _new_baremetal(self):
|
||||
baremetal = mock.Mock()
|
||||
baremetal.find_port_group.return_value = None
|
||||
baremetal.find_node.return_value = {'id': 'node-1'}
|
||||
return baremetal
|
||||
|
||||
def test_create_port_group(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.create_port_group.return_value = FakePortGroup(
|
||||
id='pg-1',
|
||||
name='bond0',
|
||||
node_id='node-1',
|
||||
address='fa:16:3e:aa:aa:aa',
|
||||
mode='active-backup',
|
||||
extra={},
|
||||
properties={},
|
||||
standalone_ports_supported=True,
|
||||
links=[],
|
||||
created_at='2026-01-01T00:00:00+00:00',
|
||||
updated_at=None,
|
||||
)
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': None,
|
||||
'name': 'bond0',
|
||||
'node': 'node-name',
|
||||
'address': 'fa:16:3e:aa:aa:aa',
|
||||
'extra': {},
|
||||
'standalone_ports_supported': True,
|
||||
'mode': 'active-backup',
|
||||
'properties': {},
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertTrue(result['changed'])
|
||||
self.assertEqual('pg-1', result['port_group']['id'])
|
||||
baremetal.find_node.assert_called_once_with('node-name', ignore_missing=False)
|
||||
baremetal.create_port_group.assert_called_once_with(
|
||||
name='bond0',
|
||||
node_id='node-1',
|
||||
address='fa:16:3e:aa:aa:aa',
|
||||
extra={},
|
||||
standalone_ports_supported=True,
|
||||
mode='active-backup',
|
||||
properties={},
|
||||
)
|
||||
|
||||
def test_create_port_group_without_node_fails(self):
|
||||
baremetal = self._new_baremetal()
|
||||
|
||||
with self.assertRaises(AnsibleFailJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': None,
|
||||
'name': 'bond0',
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
self.assertIn("Parameter 'node' is required", ex.exception.args[0]['msg'])
|
||||
baremetal.create_port_group.assert_not_called()
|
||||
|
||||
def test_update_port_group_when_values_changed(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = FakePortGroup(
|
||||
id='pg-1',
|
||||
name='bond0',
|
||||
node_id='node-1',
|
||||
mode='active-backup',
|
||||
address=None,
|
||||
extra={},
|
||||
properties={},
|
||||
standalone_ports_supported=True,
|
||||
links=[],
|
||||
created_at='2026-01-01T00:00:00+00:00',
|
||||
updated_at=None,
|
||||
)
|
||||
baremetal.update_port_group.return_value = FakePortGroup(
|
||||
id='pg-1',
|
||||
name='bond0',
|
||||
node_id='node-1',
|
||||
mode='802.3ad',
|
||||
address=None,
|
||||
extra={},
|
||||
properties={},
|
||||
standalone_ports_supported=True,
|
||||
links=[],
|
||||
created_at='2026-01-01T00:00:00+00:00',
|
||||
updated_at='2026-01-02T00:00:00+00:00',
|
||||
)
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': 'pg-1',
|
||||
'name': None,
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': '802.3ad',
|
||||
'properties': None,
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertTrue(result['changed'])
|
||||
self.assertEqual('802.3ad', result['port_group']['mode'])
|
||||
baremetal.update_port_group.assert_called_once_with('pg-1', mode='802.3ad')
|
||||
|
||||
def test_present_noop_when_already_matching(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = FakePortGroup(
|
||||
id='pg-1',
|
||||
name='bond0',
|
||||
node_id='node-1',
|
||||
mode='active-backup',
|
||||
address='fa:16:3e:aa:aa:aa',
|
||||
extra={'a': 'b'},
|
||||
properties={'miimon': '100'},
|
||||
standalone_ports_supported=False,
|
||||
links=[],
|
||||
created_at='2026-01-01T00:00:00+00:00',
|
||||
updated_at=None,
|
||||
)
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': 'pg-1',
|
||||
'name': 'bond0',
|
||||
'node': None,
|
||||
'address': 'fa:16:3e:aa:aa:aa',
|
||||
'extra': {'a': 'b'},
|
||||
'standalone_ports_supported': False,
|
||||
'mode': 'active-backup',
|
||||
'properties': {'miimon': '100'},
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertFalse(result['changed'])
|
||||
baremetal.update_port_group.assert_not_called()
|
||||
|
||||
def test_delete_existing_port_group(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = FakePortGroup(id='pg-1', name='bond0')
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': 'pg-1',
|
||||
'name': None,
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'absent',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertTrue(result['changed'])
|
||||
baremetal.delete_port_group.assert_called_once_with('pg-1')
|
||||
|
||||
def test_delete_missing_port_group_is_noop(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = None
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': 'pg-1',
|
||||
'name': None,
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'absent',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertFalse(result['changed'])
|
||||
baremetal.delete_port_group.assert_not_called()
|
||||
|
||||
def test_check_mode_create_marks_changed(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = None
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'_ansible_check_mode': True,
|
||||
'id': None,
|
||||
'name': 'bond0',
|
||||
'node': 'node-name',
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertTrue(result['changed'])
|
||||
baremetal.create_port_group.assert_not_called()
|
||||
baremetal.find_node.assert_called_once_with('node-name', ignore_missing=False)
|
||||
|
||||
def test_check_mode_create_without_node_fails(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.return_value = None
|
||||
|
||||
with self.assertRaises(AnsibleFailJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'_ansible_check_mode': True,
|
||||
'id': None,
|
||||
'name': 'bond0',
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'present',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
self.assertIn("Parameter 'node' is required", ex.exception.args[0]['msg'])
|
||||
baremetal.create_port_group.assert_not_called()
|
||||
baremetal.find_node.assert_not_called()
|
||||
|
||||
def test_find_port_group_resource_not_found_returns_none(self):
|
||||
baremetal = self._new_baremetal()
|
||||
baremetal.find_port_group.side_effect = FakeSDK.exceptions.ResourceNotFound()
|
||||
|
||||
with self.assertRaises(AnsibleExitJson) as ex:
|
||||
self._run_module(
|
||||
{
|
||||
'id': 'pg-1',
|
||||
'name': None,
|
||||
'node': None,
|
||||
'address': None,
|
||||
'extra': None,
|
||||
'standalone_ports_supported': None,
|
||||
'mode': None,
|
||||
'properties': None,
|
||||
'state': 'absent',
|
||||
},
|
||||
baremetal,
|
||||
)
|
||||
|
||||
result = ex.exception.args[0]
|
||||
self.assertFalse(result['changed'])
|
||||
@@ -1,7 +1,7 @@
|
||||
import collections
|
||||
import inspect
|
||||
import mock
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import yaml
|
||||
|
||||
from ansible.module_utils.six import string_types
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from ansible_collections.openstack.cloud.tests.unit.compat import unittest
|
||||
from ansible_collections.openstack.cloud.tests.unit.compat.mock import patch
|
||||
from ansible.module_utils import basic
|
||||
from ansible.module_utils._text import to_bytes
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ echo "Running test with Python version ${PY_VER}"
|
||||
|
||||
rm -rf "${ANSIBLE_COLLECTIONS_PATH}"
|
||||
mkdir -p ${ANSIBLE_COLLECTIONS_PATH}/ansible_collections/openstack/cloud
|
||||
cp -a ${TOXDIR}/{plugins,meta,tests,docs} ${ANSIBLE_COLLECTIONS_PATH}/ansible_collections/openstack/cloud
|
||||
cp -a ${TOXDIR}/{plugins,meta,tests,docs,galaxy.yml} ${ANSIBLE_COLLECTIONS_PATH}/ansible_collections/openstack/cloud
|
||||
cd ${ANSIBLE_COLLECTIONS_PATH}/ansible_collections/openstack/cloud/
|
||||
echo "Running ansible-test with version:"
|
||||
ansible --version
|
||||
|
||||
11
tox.ini
11
tox.ini
@@ -2,12 +2,10 @@
|
||||
minversion = 3.18.0
|
||||
envlist = linters_latest,ansible_latest
|
||||
skipsdist = True
|
||||
ignore_basepython_conflict = True
|
||||
|
||||
[testenv]
|
||||
skip_install = True
|
||||
install_command = python3 -m pip install {opts} {packages}
|
||||
basepython = python3
|
||||
passenv =
|
||||
OS_*
|
||||
setenv =
|
||||
@@ -36,13 +34,14 @@ deps =
|
||||
galaxy-importer
|
||||
pbr
|
||||
ruamel.yaml
|
||||
setuptools
|
||||
commands =
|
||||
python {toxinidir}/tools/build.py
|
||||
ansible --version
|
||||
ansible-galaxy collection build --force {toxinidir} --output-path {toxinidir}/build_artifact
|
||||
bash {toxinidir}/tools/check-import.sh {toxinidir}
|
||||
|
||||
[testenv:linters_{2_9,2_11,2_12,latest}]
|
||||
[testenv:linters_{2_9,2_11,2_12,2_16,2_18,latest}]
|
||||
allowlist_externals = bash
|
||||
commands =
|
||||
{[testenv:build]commands}
|
||||
@@ -56,6 +55,8 @@ deps =
|
||||
linters_2_9: -r{toxinidir}/tests/requirements-ansible-2.9.txt
|
||||
linters_2_11: -r{toxinidir}/tests/requirements-ansible-2.11.txt
|
||||
linters_2_12: -r{toxinidir}/tests/requirements-ansible-2.12.txt
|
||||
linters_2_16: -r{toxinidir}/tests/requirements-ansible-2.16.txt
|
||||
linters_2_18: -r{toxinidir}/tests/requirements-ansible-2.18.txt
|
||||
passenv = *
|
||||
|
||||
[flake8]
|
||||
@@ -69,7 +70,7 @@ ignore = W503,H4,E501,E402,H301
|
||||
show-source = True
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible_collections
|
||||
|
||||
[testenv:ansible_{2_9,2_11,2_12,latest}]
|
||||
[testenv:ansible_{2_9,2_11,2_12,2_16,2_18,latest}]
|
||||
allowlist_externals = bash
|
||||
commands =
|
||||
bash {toxinidir}/ci/run-ansible-tests-collection.sh -e {envdir} {posargs}
|
||||
@@ -79,6 +80,8 @@ deps =
|
||||
ansible_2_9: -r{toxinidir}/tests/requirements-ansible-2.9.txt
|
||||
ansible_2_11: -r{toxinidir}/tests/requirements-ansible-2.11.txt
|
||||
ansible_2_12: -r{toxinidir}/tests/requirements-ansible-2.12.txt
|
||||
ansible_2_16: -r{toxinidir}/tests/requirements-ansible-2.16.txt
|
||||
ansible_2_18: -r{toxinidir}/tests/requirements-ansible-2.18.txt
|
||||
# Need to pass some env vars for the Ansible playbooks
|
||||
passenv =
|
||||
HOME
|
||||
|
||||
Reference in New Issue
Block a user