1 Commits

Author SHA1 Message Date
Sagi Shnaidman
20a27c461d Remove secret from experimental branch
Change-Id: I521069b356a14839720de954959a35b49fa59117
2024-12-17 13:01:13 +02:00
51 changed files with 163 additions and 1870 deletions

View File

@@ -47,7 +47,6 @@
devstack_services:
designate: true
neutron-dns: true
neutron-trunk: true
zuul_copy_output:
'{{ devstack_log_dir }}/test_output.log': 'logs'
extensions_to_txt:
@@ -163,18 +162,45 @@
tox_constraints_file: '{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tests/constraints-openstacksdk-1.x.x.txt'
tox_install_siblings: false
# Job with Ansible 2.9 for checking backward compatibility
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.18
name: ansible-collections-openstack-functional-devstack-ansible-2.9
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.16 branch of ansible
using master of openstacksdk and stable 2.9 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.18
override-checkout: stable-2.9
vars:
tox_envlist: ansible_2_18
tox_envlist: ansible_2_9
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.11
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.11
vars:
tox_envlist: ansible_2_11
- job:
name: ansible-collections-openstack-functional-devstack-ansible-2.12
parent: ansible-collections-openstack-functional-devstack-base
branches: master
description: |
Run openstack collections functional tests against a master devstack
using master of openstacksdk and stable 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.12
vars:
tox_envlist: ansible_2_12
- job:
name: ansible-collections-openstack-functional-devstack-ansible-devel
@@ -218,22 +244,24 @@
bindep_profile: test py310
- job:
name: openstack-tox-linters-ansible-2.18
name: openstack-tox-linters-ansible-2.12
parent: openstack-tox-linters-ansible
nodeset: ubuntu-focal
description: |
Run openstack collections linter tests using the 2.18 branch of ansible
Run openstack collections linter tests using the 2.12 branch of ansible
required-projects:
- name: github.com/ansible/ansible
override-checkout: stable-2.18
override-checkout: stable-2.12
vars:
tox_envlist: linters_2_18
python_version: "3.12"
bindep_profile: test py312
ensure_tox_version: '<4'
tox_envlist: linters_2_12
python_version: 3.8
bindep_profile: test py38
# Cross-checks with other projects
- job:
name: bifrost-collections-src
parent: bifrost-integration-tinyipa-ubuntu-jammy
parent: bifrost-integration-tinyipa-ubuntu-focal
required-projects:
- openstack/ansible-collections-openstack
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
@@ -244,7 +272,7 @@
override-checkout: master
- job:
name: bifrost-keystone-collections-src
parent: bifrost-integration-tinyipa-keystone-ubuntu-jammy
parent: bifrost-integration-tinyipa-keystone-ubuntu-focal
required-projects:
- openstack/ansible-collections-openstack
- # always use master branch when collecting parent job variants, refer to git blame for rationale.
@@ -254,70 +282,69 @@
name: openstack/openstacksdk
override-checkout: master
- job:
name: ansible-collections-openstack-release
parent: base
run: ci/publish/publish_collection.yml
secrets:
- ansible_galaxy_info
- secret:
name: ansible_galaxy_info
data:
url: https://galaxy.ansible.com
token: !encrypted/pkcs1-oaep
- QJ3c5LfmM4YmqwwLKv4wK5lroWDLGeMyPkmHXhvf0ry3vGjKZvZxVpbIhFXJHXevHov/r
nvlqwmG8D5msynQKZDFg2ZwSMIQWRKfSbsSLe7A6NWI2wC+QtZSPiRiBcBcHY1QbNNW21
84cssYa1oHOA0WXpomBz1qXuPV48aKLjMnWysgFhNSx3Oog+ZOSCczyyVVuXP1lIWIO26
AtRTrEcr37K3JY9usE2PCbZKFOq/+IDPz9fbS7PtBOv7iXOHOf3AfBiJiaJe3q/ecoaaq
ejk2WTKWfvq/3rY4pU1976kUcxgcd+jj9ReFyw8edCsc1ecL0qmZFbdHmC03jEcVo4p8I
WJQ0D5wk4/u2Fu9texNuBvb62Yu3Y028Zhm5rz8Zl/ISsdaA3losn5S7C7iAH/yKlGQEI
N/1X4M0tVPaMtsIhZyyz+JMbeNyVR9ZarqbtpzRtVhjxL7KOiAQbEzAmZcBbCJ2Z5iI+P
bTp03f9Y/tZNtkohARvx1TKhv8CvsmyGkMm+r5Y8aWz3SNy8LL6bSwtGun/ifbnadHmw/
TD5/UUXHHjBGkeAu9HTtwUZ5Qdkfg92PnPgruAAuOkF1Y4RyRS9qvwhtqyHO8TwU0INRY
5MHEzeOQWemoQb/qdENp+J/Q9oMEbpFYv9TkrWkxVoKop6Str8e3FF5sxmN/SE=
- project:
check:
jobs:
- tox-pep8
- openstack-tox-linters-ansible-devel
- openstack-tox-linters-ansible-2.18
- ansible-collections-openstack-functional-devstack
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-ansible-2.18
- ansible-collections-openstack-functional-devstack-ansible-devel
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-octavia
# - openstack-tox-linters-ansible-devel
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack:
# dependencies: &deps_unit_lint
# - tox-pep8
# - openstack-tox-linters-ansible-2.12
- bifrost-collections-src:
voting: false
irrelevant-files: *ignore_files
- bifrost-keystone-collections-src:
voting: false
irrelevant-files: *ignore_files
# - ansible-collections-openstack-functional-devstack-releases:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-2.9:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-2.12:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-ansible-devel:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-magnum:
# dependencies: *deps_unit_lint
# - ansible-collections-openstack-functional-devstack-octavia:
# dependencies: *deps_unit_lint
# - bifrost-collections-src:
# voting: false
# dependencies: *deps_unit_lint
# irrelevant-files: *ignore_files
# - bifrost-keystone-collections-src:
# voting: false
# dependencies: *deps_unit_lint
# irrelevant-files: *ignore_files
gate:
jobs:
- tox-pep8
- openstack-tox-linters-ansible-2.18
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-octavia
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack
# - ansible-collections-openstack-functional-devstack-releases
# - ansible-collections-openstack-functional-devstack-ansible-2.9
# - ansible-collections-openstack-functional-devstack-ansible-2.12
# - ansible-collections-openstack-functional-devstack-magnum
# - ansible-collections-openstack-functional-devstack-octavia
periodic:
jobs:
- openstack-tox-linters-ansible-devel
- openstack-tox-linters-ansible-2.18
- ansible-collections-openstack-functional-devstack
- ansible-collections-openstack-functional-devstack-releases
- ansible-collections-openstack-functional-devstack-ansible-2.18
- ansible-collections-openstack-functional-devstack-ansible-devel
- bifrost-collections-src
- bifrost-keystone-collections-src
- ansible-collections-openstack-functional-devstack-magnum
- ansible-collections-openstack-functional-devstack-octavia
# periodic:
# jobs:
# - openstack-tox-linters-ansible-devel
# - openstack-tox-linters-ansible-2.12
# - ansible-collections-openstack-functional-devstack
# - ansible-collections-openstack-functional-devstack-releases
# - ansible-collections-openstack-functional-devstack-ansible-2.9
# - ansible-collections-openstack-functional-devstack-ansible-2.12
# - ansible-collections-openstack-functional-devstack-ansible-devel
# - bifrost-collections-src
# - bifrost-keystone-collections-src
# - ansible-collections-openstack-functional-devstack-magnum
# - ansible-collections-openstack-functional-devstack-octavia
tag:
jobs:
- ansible-collections-openstack-release
# experimental:
# jobs:
# - ansible-collections-openstack-functional-devstack-ansible-2.11
# tag:
# jobs:
# - ansible-collections-openstack-release

View File

@@ -5,75 +5,6 @@ Ansible OpenStack Collection Release Notes
.. contents:: Topics
v2.3.1
======
Release Summary
---------------
Client TLS certificate support
Minor Changes
-------------
- Add ability to pass client tls certificate
v2.3.0
======
Release Summary
---------------
Bugfixes and new modules
Major Changes
-------------
- Add Neutron trunk module
- Add application_credential module
- Add module to filter available volume services
Minor Changes
-------------
- Add inactive state for the images
- Add insecure_registry property to coe_cluster_templates
- Add support for creation of the default external networks
- Add target_all_project option
- Add vlan_tranparency for creation networks
- Allow munch results in server_info module
- Allow to specify multiple allocation pools when creating a subnet
- CI - Disable auto-discovery for setuptools
- CI - Don't create port with binding profile
- CI - Fix CI in collection
- CI - Fix linters-devel and devstack tests
- CI - Fix regression in quota module
- CI - Fix test for server shelve
- CI - Migrate Bifrost jobs to Ubuntu Jammy
- CI - Remove 2.9 jobs from Zuul config
- CI - Run functional testing regardless of pep8/linter results
- Enable glance-direct interop image import
- Ensure coe_cluster_template compare labels properly
- Wait for deleted server to disappear from results
- router - Allow specifying external network name in a different project
Bugfixes
--------
- Allow wait false when auto_ip is false
- Fix exception when creating object from file
- Fix exception when updating container with metadata
- Fix typo in openstack.cloud.lb_pool
- Fix typo in parameter description
- fix subnet module - allow cidr option with subnet_pool
New Modules
-----------
- openstack.cloud.application_credential - Manage OpenStack Identity (Keystone) application credentials
- openstack.cloud.trunk - Add or delete trunks from an OpenStack cloud
- openstack.cloud.volume_service_info - Fetch OpenStack Volume (Cinder) services
v2.2.0
======

View File

@@ -526,55 +526,3 @@ releases:
- Add volume_type modules
release_summary: New module for volume_type and bugfixes
release_date: '2023-12-01'
2.3.0:
changes:
bugfixes:
- Allow wait false when auto_ip is false
- Fix exception when creating object from file
- Fix exception when updating container with metadata
- Fix typo in openstack.cloud.lb_pool
- Fix typo in parameter description
- fix subnet module - allow cidr option with subnet_pool
major_changes:
- Add Neutron trunk module
- Add application_credential module
- Add module to filter available volume services
minor_changes:
- Add inactive state for the images
- Add insecure_registry property to coe_cluster_templates
- Add support for creation of the default external networks
- Add target_all_project option
- Add vlan_tranparency for creation networks
- Allow munch results in server_info module
- Allow to specify multiple allocation pools when creating a subnet
- CI - Disable auto-discovery for setuptools
- CI - Don't create port with binding profile
- CI - Fix CI in collection
- CI - Fix linters-devel and devstack tests
- CI - Fix regression in quota module
- CI - Fix test for server shelve
- CI - Migrate Bifrost jobs to Ubuntu Jammy
- CI - Remove 2.9 jobs from Zuul config
- CI - Run functional testing regardless of pep8/linter results
- Enable glance-direct interop image import
- Ensure coe_cluster_template compare labels properly
- Wait for deleted server to disappear from results
- router - Allow specifying external network name in a different project
release_summary: Bugfixes and new modules
modules:
- description: Manage OpenStack Identity (Keystone) application credentials
name: application_credential
namespace: ''
- description: Add or delete trunks from an OpenStack cloud
name: trunk
namespace: ''
- description: Fetch OpenStack Volume (Cinder) services
name: volume_service_info
namespace: ''
release_date: '2024-11-28'
2.3.1:
changes:
minor_changes:
- Add ability to pass client tls certificate
release_summary: Client TLS certificate support
release_date: '2024-12-18'

View File

View File

@@ -1,9 +0,0 @@
expected_fields:
- description
- expires_at
- id
- name
- project_id
- roles
- secret
- unrestricted

View File

@@ -1,61 +0,0 @@
---
- name: Create application credentials
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: dummy description
register: appcred
- name: Assert return values of application_credential module
assert:
that:
- appcred is changed
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(appcred.application_credential.keys())|length == 0
- name: Create the application credential again
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: dummy description
register: appcred
- name: Assert return values of ansible_credential module
assert:
that:
# credentials are immutable so creating twice will cause delete and create
- appcred is changed
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(appcred.application_credential.keys())|length == 0
- name: Update the application credential again
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: present
name: ansible_creds
description: new description
register: appcred
- name: Assert application credential changed
assert:
that:
- appcred is changed
- appcred.application_credential.description == 'new description'
- name: Get list of all keypairs using application credential
openstack.cloud.keypair_info:
cloud: "{{ appcred.cloud }}"
- name: Delete application credential
openstack.cloud.application_credential:
cloud: "{{ cloud }}"
state: absent
name: ansible_creds
register: appcred
- name: Assert application credential changed
assert:
that: appcred is changed

View File

@@ -26,9 +26,6 @@
keypair_id: '{{ keypair.keypair.id }}'
name: k8s
state: present
labels:
docker_volume_size: 10
cloud_provider_tag: v1.23.1
register: coe_cluster_template
- name: Assert return values of coe_cluster_template module
@@ -46,9 +43,6 @@
keypair_id: '{{ keypair.keypair.id }}'
name: k8s
state: present
labels:
docker_volume_size: 10
cloud_provider_tag: v1.23.1
register: coe_cluster_template
- name: Assert return values of coe_cluster_template module

View File

@@ -176,34 +176,6 @@
- image is changed
- image.image.name == 'ansible_image-changed'
- name: Deactivate raw image
openstack.cloud.image:
cloud: "{{ cloud }}"
state: inactive
id: "{{ image.image.id }}"
name: 'ansible_image-changed'
register: image
- name: Assert changed
assert:
that:
- image is changed
- image.image.status == 'deactivated'
- name: Reactivate raw image
openstack.cloud.image:
cloud: "{{ cloud }}"
state: present
id: "{{ image.image.id }}"
name: 'ansible_image-changed'
register: image
- name: Assert changed
assert:
that:
- image is changed
- image.image.status == 'active'
- name: Rename back raw image (defaults)
openstack.cloud.image:
cloud: "{{ cloud }}"

View File

@@ -7,4 +7,3 @@ expected_fields:
- project_id
- target_project_id
- tenant_id
all_project_symbol: '*'

View File

@@ -69,29 +69,6 @@
id: "{{ rbac_policy.rbac_policy.id }}"
state: absent
- name: Create a new network RBAC policy by targeting all projects
openstack.cloud.neutron_rbac_policy:
cloud: "{{ cloud }}"
object_id: "{{ network.network.id }}"
object_type: 'network'
action: 'access_as_shared'
target_all_project: true
project_id: "{{ source_project.project.id }}"
register: rbac_policy
- name: Assert return values of neutron_rbac_policy module
assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(rbac_policy.rbac_policy.keys())|length == 0
- rbac_policy.rbac_policy.target_project_id == all_project_symbol
- name: Delete RBAC policy
openstack.cloud.neutron_rbac_policy:
cloud: "{{ cloud }}"
id: "{{ rbac_policy.rbac_policy.id }}"
state: absent
- name: Get all rbac policies for {{ source_project.project.name }} - after deletion
openstack.cloud.neutron_rbac_policies_info:
cloud: "{{ cloud }}"

View File

@@ -5,7 +5,7 @@
state: present
name: ansible_container
- name: Create object from data
- name: Create object
openstack.cloud.object:
cloud: "{{ cloud }}"
state: present
@@ -28,47 +28,6 @@
name: ansible_object
container: ansible_container
- name: Create object from file
block:
- name: Create temporary data file
ansible.builtin.tempfile:
register: tmp_file
- name: Populate data file
ansible.builtin.copy:
content: "this is a test"
dest: "{{ tmp_file.path }}"
- name: Create object from data file
openstack.cloud.object:
cloud: "{{ cloud }}"
state: present
name: ansible_object
filename: "{{ tmp_file.path }}"
container: ansible_container
register: object
always:
- name: Remove temporary data file
ansible.builtin.file:
path: "{{ tmp_file.path }}"
state: absent
when: tmp_file is defined and 'path' in tmp_file
- name: Assert return values of object module
assert:
that:
- object.object.id == "ansible_object"
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(object.object.keys())|length == 0
- name: Delete object
openstack.cloud.object:
cloud: "{{ cloud }}"
state: absent
name: ansible_object
container: ansible_container
- name: Delete container
openstack.cloud.object_container:
cloud: "{{ cloud }}"

View File

@@ -31,21 +31,6 @@
- ('cache-control' in container.container.metadata.keys()|map('lower'))
- container.container.metadata['foo'] == 'bar'
- name: Update container metadata
openstack.cloud.object_container:
cloud: "{{ cloud }}"
name: ansible_container
metadata:
'foo': 'baz'
register: container
- name: Verify container metadata was updated
assert:
that:
- container is changed
- ('cache-control' in container.container.metadata.keys()|map('lower'))
- container.container.metadata['foo'] == 'baz'
- name: Update a container
openstack.cloud.object_container:
cloud: "{{ cloud }}"
@@ -60,7 +45,7 @@
that:
- container is changed
- ('cache-control' not in container.container.metadata.keys()|map('lower'))
- "container.container.metadata == {'foo': 'baz'}"
- "container.container.metadata == {'foo': 'bar'}"
- container.container.read_ACL is none or container.container.read_ACL == ""
- name: Delete container

View File

@@ -1,3 +1,6 @@
binding_profile:
"pci_slot": "0000:03:11.1"
"physical_network": "provider"
expected_fields:
- allowed_address_pairs
- binding_host_id

View File

@@ -256,6 +256,27 @@
state: absent
name: ansible_security_group
- name: Create port (with binding profile)
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ port_name }}"
network: "{{ network_name }}"
binding_profile: "{{ binding_profile }}"
register: port
- name: Assert binding_profile exists in created port
assert:
that: "port.port['binding_profile']"
- debug: var=port
- name: Delete port (with binding profile)
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ port_name }}"
- name: Delete subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"

View File

@@ -720,5 +720,3 @@
name: "{{ external_network_name }}"
- include_tasks: shared_network.yml
- include_tasks: shared_ext_network.yml

View File

@@ -1,99 +0,0 @@
---
# Test the case where we have a shared external network in one project used as
# the gateway on a router in a second project.
# See https://bugs.launchpad.net/ansible-collections-openstack/+bug/2049658
- name: Create the first project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test_1"
description: "Project that contains the external network to be shared"
domain: default
is_enabled: True
register: project_1
- name: Create the external network to be shared
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ external_network_name }}"
project: "shared_ext_net_test_1"
external: true
shared: true
register: shared_ext_network
- name: Create subnet on external network
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
network_name: "{{ shared_ext_network.id }}"
name: "shared_ext_subnet"
project: "shared_ext_net_test_1"
cidr: "10.6.6.0/24"
register: shared_subnet
- name: Create the second project
openstack.cloud.project:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test_2"
description: "Project that contains the subnet to be shared"
domain: default
is_enabled: True
register: project_2
- name: Create router with gateway on shared external network
openstack.cloud.router:
cloud: "{{ cloud }}"
state: present
name: "shared_ext_net_test2_router"
project: "shared_ext_net_test_2"
network: "{{ external_network_name }}"
register: router
- name: Gather routers info
openstack.cloud.routers_info:
cloud: "{{ cloud }}"
name: "shared_ext_net_test2_router"
register: routers
- name: Verify routers info
assert:
that:
- routers.routers.0.id == router.router.id
- routers.routers.0.external_gateway_info.external_fixed_ips|length == 1
- name: Delete router
openstack.cloud.router:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test2_router"
project: "shared_ext_net_test_2"
- name: Delete subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
network_name: "{{ shared_ext_network.id }}"
name: "shared_ext_subnet"
project: "shared_ext_net_test_1"
- name: Delete network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ external_network_name }}"
project: "shared_ext_net_test_1"
- name: Delete project 2
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test_2"
- name: Delete project 1
openstack.cloud.project:
cloud: "{{ cloud }}"
state: absent
name: "shared_ext_net_test_1"

View File

@@ -460,14 +460,19 @@
register: server
ignore_errors: true
- name: Assert shelve offload server
assert:
that:
- ((server is success)
or (server is not success
and "Cannot 'shelveOffload' instance" in server.msg
and "while it is in vm_state shelved_offloaded" in server.msg))
- name: Get info about server
openstack.cloud.server_info:
cloud: "{{ cloud }}"
server: ansible_server
register: servers
until: servers.servers.0.task_state == none
retries: 30
delay: 10
- name: Ensure status for server is SHELVED_OFFLOADED
# no change if server has been offloaded automatically after first shelve command

View File

@@ -150,6 +150,3 @@
- name: Subnet Allocation
include_tasks: subnet-allocation.yml
- name: Subnet Allocations from Subnet Pool
include_tasks: subnet-pool.yaml

View File

@@ -68,80 +68,6 @@
name: "{{ subnet_name }}"
state: absent
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.4
- start: 192.168.0.10
end: 192.168.0.12
- name: Create subnet {{ subnet_name }} on network {{ network_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.4
- start: 192.168.0.10
end: 192.168.0.12
register: idem2
- name: Update subnet {{ subnet_name }} allocation pools
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.0.0/24
gateway_ip: 192.168.0.1
allocation_pools:
- start: 192.168.0.2
end: 192.168.0.8
- start: 192.168.0.10
end: 192.168.0.16
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
- name: Verify Subnet Allocation Pools Exist
assert:
that:
- idem2 is not changed
- subnet_result.subnets is defined
- subnet_result.subnets | length == 1
- subnet_result.subnets[0].allocation_pools is defined
- subnet_result.subnets[0].allocation_pools | length == 2
- name: Verify Subnet Allocation Pools
assert:
that:
- (subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.8') or
(subnet_result.subnets[0].allocation_pools.0.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.0.16')
- (subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.8') or
(subnet_result.subnets[0].allocation_pools.1.start == '192.168.0.10' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.0.16')
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Delete network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"

View File

@@ -1,167 +0,0 @@
---
# This test cover case when subnet is constructed
# with few prefixes and neutron API is required
# CIDR parameter to be used together with subnet pool.
- name: Create network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: present
- name: Create address_scope
openstack.cloud.address_scope:
cloud: "{{ cloud }}"
name: "{{ address_scope_name }}"
shared: false
ip_version: "4"
register: create_address_scope
- name: Create subnet pool
openstack.cloud.subnet_pool:
cloud: "{{ cloud }}"
name: "{{ subnet_pool_name }}"
is_shared: false
address_scope: "{{ address_scope_name }}"
prefixes:
- 192.168.0.0/24
- 192.168.42.0/24
register: subnet_pool
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pool_start: 192.168.42.2
allocation_pool_end: 192.168.42.4
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pool_start: 192.168.42.2
allocation_pool_end: 192.168.42.4
register: idem1
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
- name: Verify Subnet Allocation Pools Exist
assert:
that:
- idem1 is not changed
- subnet_result.subnets is defined
- subnet_result.subnets | length == 1
- subnet_result.subnets[0].allocation_pools is defined
- subnet_result.subnets[0].allocation_pools | length == 1
- name: Verify Subnet Allocation Pools
assert:
that:
- subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2'
- subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4'
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Create subnet {{ subnet_name }} with multiple allocation pools on network {{ network_name }} from subnet pool {{ subnet_pool_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24 # we want specific cidr from subnet pool
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pools:
- start: 192.168.42.2
end: 192.168.42.4
- start: 192.168.42.6
end: 192.168.42.8
- name: Create subnet {{ subnet_name }} on network {{ network_name }} from subnet pool {{ subnet_pool_name }} again
openstack.cloud.subnet:
cloud: "{{ cloud }}"
network_name: "{{ network_name }}"
enable_dhcp: "{{ enable_subnet_dhcp }}"
name: "{{ subnet_name }}"
state: present
cidr: 192.168.42.0/24
ip_version: 4
subnet_pool: "{{ subnet_pool_name }}"
gateway_ip: 192.168.42.1
allocation_pools:
- start: 192.168.42.2
end: 192.168.42.4
- start: 192.168.42.6
end: 192.168.42.8
register: idem2
- name: Get Subnet Info
openstack.cloud.subnets_info:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
register: subnet_result
- name: Verify Subnet Allocation Pools Exist
assert:
that:
- idem2 is not changed
- subnet_result.subnets is defined
- subnet_result.subnets | length == 1
- subnet_result.subnets[0].allocation_pools is defined
- subnet_result.subnets[0].allocation_pools | length == 2
- name: Verify Subnet Allocation Pools
assert:
that:
- (subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.4') or
(subnet_result.subnets[0].allocation_pools.0.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.0.end == '192.168.42.8')
- (subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.2' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.4') or
(subnet_result.subnets[0].allocation_pools.1.start == '192.168.42.6' and subnet_result.subnets[0].allocation_pools.1.end == '192.168.42.8')
- name: Delete subnet {{ subnet_name }}
openstack.cloud.subnet:
cloud: "{{ cloud }}"
name: "{{ subnet_name }}"
state: absent
- name: Delete created subnet pool
openstack.cloud.subnet_pool:
cloud: "{{ cloud }}"
name: "{{ subnet_pool_name }}"
state: absent
- name: Delete created address scope
openstack.cloud.address_scope:
cloud: "{{ cloud }}"
name: "{{ address_scope_name }}"
state: absent
- name: Delete network {{ network_name }}
openstack.cloud.network:
cloud: "{{ cloud }}"
name: "{{ network_name }}"
state: absent

View File

@@ -1,21 +0,0 @@
expected_fields:
- created_at
- description
- id
- is_admin_state_up
- name
- port_id
- project_id
- revision_number
- status
- sub_ports
- tags
- tenant_id
- updated_at
trunk_name: ansible_trunk
parent_network_name: ansible_parent_port_network
parent_subnet_name: ansible_parent_port_subnet
parent_port_name: ansible_parent_port
subport_network_name: ansible_subport_network
subport_subnet_name: ansible_subport_subnet
subport_name: ansible_subport

View File

@@ -1,131 +0,0 @@
---
- name: Create parent network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_network_name }}"
external: true
register: parent_network
- name: Create parent subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_subnet_name }}"
network_name: "{{ parent_network_name }}"
cidr: 10.5.5.0/24
register: parent_subnet
- name: Create parent port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ parent_port_name }}"
network: "{{ parent_network_name }}"
fixed_ips:
- ip_address: 10.5.5.69
register: parent_port
- name: Create subport network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_network_name }}"
external: true
register: subport_network
- name: Create subport subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_subnet_name }}"
network_name: "{{ subport_network_name }}"
cidr: 10.5.6.0/24
register: subport_subnet
- name: Create subport
openstack.cloud.port:
cloud: "{{ cloud }}"
state: present
name: "{{ subport_name }}"
network: "{{ subport_network_name }}"
fixed_ips:
- ip_address: 10.5.6.55
register: subport
- name: Create trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
register: trunk
- debug: var=trunk
- name: assert return values of trunk module
assert:
that:
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(trunk.trunk.keys())|length == 0
- name: Add subport to trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports:
- port: "{{ subport_name }}"
segmentation_type: vlan
segmentation_id: 123
- name: Update subport from trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: present
name: "{{ trunk_name }}"
port: "{{ parent_port_name }}"
sub_ports: []
- name: Delete trunk
openstack.cloud.trunk:
cloud: "{{ cloud }}"
state: absent
name: "{{ trunk_name }}"
- name: Delete subport
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_name }}"
- name: Delete subport subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_subnet_name }}"
- name: Delete subport network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ subport_network_name }}"
- name: Delete parent port
openstack.cloud.port:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_port_name }}"
- name: Delete parent subnet
openstack.cloud.subnet:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_subnet_name }}"
- name: Delete parent network
openstack.cloud.network:
cloud: "{{ cloud }}"
state: absent
name: "{{ parent_network_name }}"

View File

@@ -37,7 +37,7 @@
- name: Check info
assert:
that:
- info1.volumes | selectattr("id", "equalto", info.volumes.0.id) | list | length == 1
- info1.volumes | selectattr("id", "equalto", "{{ info.volumes.0.id }}") | list | length == 1
- info1.volumes.0.name == 'ansible_test'
- info1.volumes.0.status == None

View File

@@ -1,9 +0,0 @@
expected_fields:
- availability_zone
- binary
- disabled_reason
- host
- name
- state
- status
- updated_at

View File

@@ -1,23 +0,0 @@
---
- name: Fetch volume services
openstack.cloud.volume_service_info:
cloud: "{{ cloud }}"
register: volume_services
- name: Assert return values of volume_service_info module
assert:
that:
- volume_services.volume_services | length > 0
# allow new fields to be introduced but prevent fields from being removed
- expected_fields|difference(volume_services.volume_services[0].keys())|length == 0
- name: Fetch volume services with filters
openstack.cloud.volume_service_info:
cloud: "{{ cloud }}"
binary: "cinder-volume"
register: volume_services
- name: Assert return values of volume_service_info module
assert:
that:
- volume_services.volume_services | length > 0

View File

@@ -5,7 +5,6 @@
roles:
- { role: address_scope, tags: address_scope }
- { role: application_credential, tags: application_credential }
- { role: auth, tags: auth }
- { role: catalog_service, tags: catalog_service }
- { role: coe_cluster, tags: coe_cluster }
@@ -36,7 +35,6 @@
- { role: object, tags: object }
- { role: object_container, tags: object_container }
- { role: port, tags: port }
- { role: trunk, tags: trunk }
- { role: project, tags: project }
- { role: quota, tags: quota }
- { role: recordset, tags: recordset }
@@ -57,6 +55,5 @@
- { role: volume, tags: volume }
- { role: volume_type, tags: volume_type }
- { role: volume_backup, tags: volume_backup }
- { role: volume_service, tags: volume_service }
- { role: volume_snapshot, tags: volume_snapshot }
- { role: volume_type_access, tags: volume_type_access }

View File

@@ -32,4 +32,4 @@ build_ignore:
- .vscode
- ansible_collections_openstack.egg-info
- changelogs
version: 2.3.1
version: 2.2.0

View File

@@ -2,7 +2,6 @@ requires_ansible: ">=2.8"
action_groups:
openstack:
- address_scope
- application_credential
- auth
- baremetal_deploy_template
- baremetal_inspect
@@ -82,12 +81,10 @@ action_groups:
- subnet
- subnet_pool
- subnets_info
- trunk
- volume
- volume_backup
- volume_backup_info
- volume_info
- volume_service_info
- volume_snapshot
- volume_snapshot_info
- volume_type_access

View File

@@ -183,7 +183,7 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
" excluded.")
for param in (
'auth', 'region_name', 'validate_certs',
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
if module.params[param] is not None:
module.fail_json(msg=fail_message.format(param=param))
# For 'interface' parameter, fail if we receive a non-default value
@@ -199,7 +199,6 @@ def openstack_cloud_from_module(module, min_version=None, max_version=None):
verify=module.params['validate_certs'],
cacert=module.params['ca_cert'],
key=module.params['client_key'],
cert=module.params['client_cert'],
api_timeout=module.params['api_timeout'],
interface=module.params['interface'],
)
@@ -359,7 +358,7 @@ class OpenStackModule:
" excluded.")
for param in (
'auth', 'region_name', 'validate_certs',
'ca_cert', 'client_cert', 'client_key', 'api_timeout', 'auth_type'):
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
if self.params[param] is not None:
self.fail_json(msg=fail_message.format(param=param))
# For 'interface' parameter, fail if we receive a non-default value

View File

@@ -1,332 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2024 Red Hat, Inc.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: application_credential
short_description: Manage OpenStack Identity (Keystone) application credentials
author: OpenStack Ansible SIG
description:
- Create or delete an OpenStack Identity (Keystone) application credential.
- When the secret parameter is not set a secret will be generated and returned
- in the response. Existing credentials cannot be modified so running this module
- against an existing credential will result in it being deleted and recreated.
- This needs to be taken into account when the secret is generated, as the secret
- will change on each run of the module.
options:
name:
description:
- Name of the application credential.
required: true
type: str
description:
description:
- Application credential description.
type: str
secret:
description:
- Secret to use for authentication
- (if not provided, one will be generated).
type: str
roles:
description:
- Roles to authorize (name or ID).
type: list
elements: dict
suboptions:
name:
description: Name of role
type: str
id:
description: ID of role
type: str
domain_id:
description: Domain ID
type: str
expires_at:
description:
- Sets an expiration date for the application credential,
- format of YYYY-mm-ddTHH:MM:SS
- (if not provided, the application credential will not expire).
type: str
unrestricted:
description:
- Enable application credential to create and delete other application
- credentials and trusts (this is potentially dangerous behavior and is
- disabled by default).
default: false
type: bool
access_rules:
description:
- List of access rules, each containing a request method, path, and service.
type: list
elements: dict
suboptions:
service:
description: Name of service endpoint
type: str
required: true
path:
description: Path portion of access URL
type: str
required: true
method:
description: HTTP method
type: str
required: true
state:
description:
- Should the resource be present or absent.
- Application credentials are immutable so running with an existing present
- credential will result in the credential being deleted and recreated.
choices: [present, absent]
default: present
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
"""
EXAMPLES = r"""
- name: Create application credential
openstack.cloud.application_credential:
cloud: mycloud
description: demodescription
name: democreds
state: present
- name: Create application credential with expiration, access rules and roles
openstack.cloud.application_credential:
cloud: mycloud
description: demodescription
name: democreds
access_rules:
- service: "compute"
path: "/v2.1/servers"
method: "GET"
expires_at: "2024-02-29T09:29:59"
roles:
- name: Member
state: present
- name: Delete application credential
openstack.cloud.application_credential:
cloud: mycloud
name: democreds
state: absent
"""
RETURN = r"""
application_credential:
description: Dictionary describing the project.
returned: On success when I(state) is C(present).
type: dict
contains:
id:
description: The ID of the application credential.
type: str
sample: "2e73d1b4f0cb473f920bd54dfce3c26d"
name:
description: The name of the application credential.
type: str
sample: "appcreds"
secret:
description: Secret to use for authentication
(if not provided, returns the generated value).
type: str
sample: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
description:
description: A description of the application credential's purpose.
type: str
sample: "App credential"
expires_at:
description: The expiration time of the application credential in UTC,
if one was specified.
type: str
sample: "2024-02-29T09:29:59.000000"
project_id:
description: The ID of the project the application credential was created
for and that authentication requests using this application
credential will be scoped to.
type: str
sample: "4b633c451ac74233be3721a3635275e5"
roles:
description: A list of one or more roles that this application credential
has associated with its project. A token using this application
credential will have these same roles.
type: list
elements: dict
sample: [{"name": "Member"}]
access_rules:
description: A list of access_rules objects
type: list
elements: dict
sample:
- id: "edecb6c791d541a3b458199858470d20"
service: "compute"
path: "/v2.1/servers"
method: "GET"
unrestricted:
description: A flag indicating whether the application credential may be
used for creation or destruction of other application credentials
or trusts.
type: bool
cloud:
description: The current cloud config with the username and password replaced
with the name and secret of the application credential. This
can be passed to the cloud parameter of other tasks, or written
to an openstack cloud config file.
returned: On success when I(state) is C(present).
type: dict
sample:
auth_type: "v3applicationcredential"
auth:
auth_url: "https://192.0.2.1/identity"
application_credential_secret: "JxE7LajLY75NZgDH1hfu0N_6xS9hQ-Af40W3"
application_credential_id: "3e73d1b4f0cb473f920bd54dfce3c26d"
"""
import copy
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule,
)
try:
import openstack.config
except ImportError:
pass
class IdentityApplicationCredentialModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
description=dict(),
secret=dict(no_log=True),
roles=dict(
type="list",
elements="dict",
options=dict(name=dict(), id=dict(), domain_id=dict()),
),
expires_at=dict(),
unrestricted=dict(type="bool", default=False),
access_rules=dict(
type="list",
elements="dict",
options=dict(
service=dict(required=True),
path=dict(required=True),
method=dict(required=True),
),
),
state=dict(default="present", choices=["absent", "present"]),
)
module_kwargs = dict()
cloud = None
def openstack_cloud_from_module(self):
# Fetch cloud param before it is popped
self.cloud = self.params["cloud"]
return OpenStackModule.openstack_cloud_from_module(self)
def run(self):
state = self.params["state"]
creds = self._find()
if state == "present" and not creds:
# Create creds
creds = self._create().to_dict(computed=False)
cloud_config = self._get_cloud_config(creds)
self.exit_json(
changed=True, application_credential=creds, cloud=cloud_config
)
elif state == "present" and creds:
# Recreate immutable creds
self._delete(creds)
creds = self._create().to_dict(computed=False)
cloud_config = self._get_cloud_config(creds)
self.exit_json(
changed=True, application_credential=creds, cloud=cloud_config
)
elif state == "absent" and creds:
# Delete creds
self._delete(creds)
self.exit_json(changed=True)
elif state == "absent" and not creds:
# Do nothing
self.exit_json(changed=False)
def _get_user_id(self):
return self.conn.session.get_user_id()
def _create(self):
kwargs = dict(
(k, self.params[k])
for k in [
"name",
"description",
"secret",
"expires_at",
"unrestricted",
"access_rules",
]
if self.params[k] is not None
)
roles = self.params["roles"]
if roles:
kwroles = []
for role in roles:
kwroles.append(
dict(
(k, role[k])
for k in ["name", "id", "domain_id"]
if role[k] is not None
)
)
kwargs["roles"] = kwroles
kwargs["user"] = self._get_user_id()
creds = self.conn.identity.create_application_credential(**kwargs)
return creds
def _get_cloud_config(self, creds):
cloud_region = openstack.config.OpenStackConfig().get_one(self.cloud)
conf = cloud_region.config
cloud_config = copy.deepcopy(conf)
cloud_config["auth_type"] = "v3applicationcredential"
cloud_config["auth"] = {
"application_credential_id": creds["id"],
"application_credential_secret": creds["secret"],
"auth_url": conf["auth"]["auth_url"],
}
return cloud_config
def _delete(self, creds):
user = self._get_user_id()
self.conn.identity.delete_application_credential(user, creds.id)
def _find(self):
name = self.params["name"]
user = self._get_user_id()
return self.conn.identity.find_application_credential(
user=user, name_or_id=name
)
def main():
module = IdentityApplicationCredentialModule()
module()
if __name__ == "__main__":
main()

View File

@@ -80,10 +80,6 @@ options:
- Magnum's default value for I(is_registry_enabled) is C(false).
type: bool
aliases: ['registry_enabled']
insecure_registry:
description:
- The URL pointing to users own private insecure docker registry.
type: str
is_tls_disabled:
description:
- Indicates whether the TLS should be disabled.
@@ -346,7 +342,6 @@ class COEClusterTemplateModule(OpenStackModule):
keypair_id=dict(),
labels=dict(type='raw'),
master_flavor_id=dict(),
insecure_registry=dict(),
is_master_lb_enabled=dict(type='bool', default=False,
aliases=['master_lb_enabled']),
is_public=dict(type='bool', aliases=['public']),
@@ -417,7 +412,6 @@ class COEClusterTemplateModule(OpenStackModule):
'fixed_subnet', 'flavor_id',
'http_proxy', 'https_proxy',
'image_id',
'insecure_registry',
'is_floating_ip_enabled',
'is_master_lb_enabled',
'is_public', 'is_registry_enabled',
@@ -433,9 +427,6 @@ class COEClusterTemplateModule(OpenStackModule):
if isinstance(labels, str):
labels = dict([tuple(kv.split(":"))
for kv in labels.split(",")])
elif isinstance(labels, dict):
labels = dict({str(k): str(v)
for k, v in labels.items()})
if labels != cluster_template['labels']:
non_updateable_keys.append('labels')
@@ -467,7 +458,7 @@ class COEClusterTemplateModule(OpenStackModule):
'external_network_id', 'fixed_network',
'fixed_subnet', 'flavor_id', 'http_proxy',
'https_proxy', 'image_id',
'insecure_registry', 'is_floating_ip_enabled',
'is_floating_ip_enabled',
'is_master_lb_enabled', 'is_public',
'is_registry_enabled', 'is_tls_disabled',
'keypair_id', 'master_flavor_id', 'name',

View File

@@ -56,7 +56,7 @@ options:
description:
- When I(update_password) is C(always), then the password will always be
updated.
- When I(update_password) is C(on_create), then the password is only set
- When I(update_password) is C(on_create), the the password is only set
when creating a user.
type: str
extends_documentation_fragment:

View File

@@ -100,8 +100,8 @@ options:
type: str
state:
description:
- Should the resource be present, absent or inactive.
choices: [present, absent, inactive]
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
tags:
@@ -122,12 +122,6 @@ options:
- I(volume) has been deprecated. Use module M(openstack.cloud.volume)
instead.
type: str
use_import:
description:
- Use the 'glance-direct' method of the interoperable image import mechanism.
- Should only be used when needed, such as when the user needs the cloud to
transform image format.
type: bool
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -153,7 +147,7 @@ EXAMPLES = r'''
RETURN = r'''
image:
description: Dictionary describing the Glance image.
returned: On success when I(state) is C(present) or C(inactive).
returned: On success when I(state) is C(present).
type: dict
contains:
id:
@@ -394,11 +388,10 @@ class ImageModule(OpenStackModule):
owner_domain=dict(aliases=['project_domain']),
properties=dict(type='dict', default={}),
ramdisk=dict(),
state=dict(default='present', choices=['absent', 'present', 'inactive']),
state=dict(default='present', choices=['absent', 'present']),
tags=dict(type='list', default=[], elements='str'),
visibility=dict(choices=['public', 'private', 'shared', 'community']),
volume=dict(),
use_import=dict(type='bool'),
)
module_kwargs = dict(
@@ -411,8 +404,7 @@ class ImageModule(OpenStackModule):
# resource attributes obtainable directly from params
attr_params = ('id', 'name', 'filename', 'disk_format',
'container_format', 'wait', 'timeout', 'is_public',
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags',
'use_import')
'is_protected', 'min_disk', 'min_ram', 'volume', 'tags')
def _resolve_visibility(self):
"""resolve a visibility value to be compatible with older versions"""
@@ -510,10 +502,6 @@ class ImageModule(OpenStackModule):
self.exit_json(changed=changed,
image=self._return_value(image.id))
if image['status'] == 'deactivated':
self.conn.image.reactivate_image(image)
changed = True
update_payload = self._build_update(image)
if update_payload:
@@ -529,20 +517,6 @@ class ImageModule(OpenStackModule):
wait=self.params['wait'],
timeout=self.params['timeout'])
changed = True
elif self.params['state'] == 'inactive' and image is not None:
if image['status'] == 'active':
self.conn.image.deactivate_image(image)
changed = True
update_payload = self._build_update(image)
if update_payload:
self.conn.image.update_image(image.id, **update_payload)
changed = True
self.exit_json(changed=changed, image=self._return_value(image.id))
self.exit_json(changed=changed)

View File

@@ -142,7 +142,7 @@ pool:
'''
EXAMPLES = r'''
- name: Create a load-balancer pool
- name: Create a load-balander pool
openstack.cloud.lb_pool:
cloud: mycloud
lb_algorithm: ROUND_ROBIN
@@ -151,7 +151,7 @@ EXAMPLES = r'''
protocol: HTTP
state: present
- name: Delete a load-balancer pool
- name: Delete a load-balander pool
openstack.cloud.lb_pool:
cloud: mycloud
name: test-pool

View File

@@ -30,15 +30,6 @@ options:
description:
- Whether this network is externally accessible.
type: bool
is_default:
description:
- Whether this network is default network or not. This is only effective
with external networks.
type: bool
is_vlan_transparent:
description:
- Whether this network is vlan_transparent or not.
type: bool
state:
description:
- Indicate desired state of the resource.
@@ -199,8 +190,6 @@ class NetworkModule(OpenStackModule):
shared=dict(type='bool'),
admin_state_up=dict(type='bool'),
external=dict(type='bool'),
is_default=dict(type='bool'),
is_vlan_transparent=dict(type='bool'),
provider_physical_network=dict(),
provider_network_type=dict(),
provider_segmentation_id=dict(type='int'),
@@ -218,8 +207,6 @@ class NetworkModule(OpenStackModule):
shared = self.params['shared']
admin_state_up = self.params['admin_state_up']
external = self.params['external']
is_default = self.params['is_default']
is_vlan_transparent = self.params['is_vlan_transparent']
provider_physical_network = self.params['provider_physical_network']
provider_network_type = self.params['provider_network_type']
provider_segmentation_id = self.params['provider_segmentation_id']
@@ -257,10 +244,6 @@ class NetworkModule(OpenStackModule):
kwargs["admin_state_up"] = admin_state_up
if external is not None:
kwargs["is_router_external"] = external
if is_default is not None:
kwargs["is_default"] = is_default
if is_vlan_transparent is not None:
kwargs["is_vlan_transparent"] = is_vlan_transparent
if not net:
net = self.conn.network.create_network(name=name, **kwargs)

View File

@@ -65,12 +65,6 @@ options:
- Required when creating or updating a RBAC policy rule, ignored when
deleting a policy.
type: str
target_all_project:
description:
- Whether all projects are targted for access.
- If this option set to true, C(target_project_id) is ignored.
type: bool
default: 'false'
state:
description:
- Whether the RBAC rule should be C(present) or C(absent).
@@ -151,8 +145,6 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class NeutronRBACPolicy(OpenStackModule):
all_project_symbol = '*'
argument_spec = dict(
action=dict(choices=['access_as_external', 'access_as_shared']),
id=dict(aliases=['policy_id']),
@@ -161,22 +153,17 @@ class NeutronRBACPolicy(OpenStackModule):
project_id=dict(),
state=dict(default='present', choices=['absent', 'present']),
target_project_id=dict(),
target_all_project=dict(type='bool', default=False),
)
module_kwargs = dict(
required_if=[
('state', 'present', ('target_project_id', 'target_all_project',), True),
('state', 'present', ('target_project_id',)),
('state', 'absent', ('id',)),
],
supports_check_mode=True,
)
def run(self):
target_all_project = self.params.get('target_all_project')
if target_all_project:
self.params['target_project_id'] = self.all_project_symbol
state = self.params['state']
policy = self._find()
@@ -275,7 +262,7 @@ class NeutronRBACPolicy(OpenStackModule):
return [p for p in policies
if any(p[k] == self.params[k]
for k in ['object_id'])]
for k in ['object_id', 'target_project_id'])]
def _update(self, policy, update):
attributes = update.get('attributes')

View File

@@ -295,11 +295,8 @@ class ObjectModule(OpenStackModule):
for k in ['data', 'filename']
if self.params[k] is not None)
object = self.conn.object_store.create_object(container_name, name,
**kwargs)
if not object:
object = self._find()
return object
return self.conn.object_store.create_object(container_name, name,
**kwargs)
def _delete(self, object):
container_name = self.params['container']

View File

@@ -269,7 +269,7 @@ class ContainerModule(OpenStackModule):
if metadata is not None:
# Swift metadata keys must be treated as case-insensitive
old_metadata = dict((k.lower(), v)
for k, v in (container.metadata or {}).items())
for k, v in (container.metadata or {}))
new_metadata = dict((k, v) for k, v in metadata.items()
if k.lower() not in old_metadata
or v != old_metadata[k.lower()])

View File

@@ -404,7 +404,7 @@ class QuotaModule(OpenStackModule):
def _get_quotas(self, project):
quota = {}
if self.conn.has_service('block-storage'):
quota['volume'] = self.conn.block_storage.get_quota_set(project.id)
quota['volume'] = self.conn.block_storage.get_quota_set(project)
else:
self.warn('Block storage service aka volume service is not'
' supported by your cloud. Ignoring volume quotas.')
@@ -477,11 +477,11 @@ class QuotaModule(OpenStackModule):
if changes:
if 'volume' in changes:
quotas['volume'] = self.conn.block_storage.update_quota_set(
project.id, **changes['volume'])
self.conn.block_storage.update_quota_set(
quotas['volume'], **changes['volume'])
if 'compute' in changes:
quotas['compute'] = self.conn.compute.update_quota_set(
project.id, **changes['compute'])
self.conn.compute.update_quota_set(
quotas['compute'], **changes['compute'])
if 'network' in changes:
quotas['network'] = self.conn.network.update_quota(
project.id, **changes['network'])

View File

@@ -616,13 +616,9 @@ class RouterModule(OpenStackModule):
router = self.conn.network.find_router(name, **query_filters)
network = None
if network_name_or_id:
# First try to find a network in the specified project.
network = self.conn.network.find_network(network_name_or_id,
ignore_missing=False,
**query_filters)
if not network:
# Fall back to a global search for the network.
network = self.conn.network.find_network(network_name_or_id,
ignore_missing=False)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.

View File

@@ -1032,7 +1032,7 @@ class ServerModule(OpenStackModule):
def _create(self):
for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
if self.params[k] \
if self.params[k] is not None \
and self.params['wait'] is False:
# floating ip addresses will only be added if
# we wait until the server has been created
@@ -1091,15 +1091,6 @@ class ServerModule(OpenStackModule):
server.id,
**dict((k, self.params[k])
for k in ['wait', 'timeout', 'delete_ips']))
# Nova returns server for some time with the "DELETED" state. Our tests
# are not able to handle this, so wait for server to really disappear.
if self.params['wait']:
for count in self.sdk.utils.iterate_timeout(
timeout=self.params['timeout'],
message="Timeout waiting for server to be absent"
):
if self.conn.compute.find_server(server.id) is None:
break
def _update(self, server, update):
server = self._update_ips(server, update)

View File

@@ -377,9 +377,7 @@ class ServerInfoModule(OpenStackModule):
kwargs['name_or_id'] = self.params['name']
self.exit(changed=False,
servers=[server.to_dict(computed=False)
if hasattr(server, "to_dict") else server
for server in
servers=[server.to_dict(computed=False) for server in
self.conn.search_servers(**kwargs)])

View File

@@ -28,12 +28,6 @@ options:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
type: str
allocation_pools:
description:
- List of allocation pools to assign to the subnet. Each element
consists of a 'start' and 'end' value.
type: list
elements: dict
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
@@ -305,7 +299,6 @@ class SubnetModule(OpenStackModule):
dns_nameservers=dict(type='list', elements='str'),
allocation_pool_start=dict(),
allocation_pool_end=dict(),
allocation_pools=dict(type='list', elements='dict'),
host_routes=dict(type='list', elements='dict'),
ipv6_ra_mode=dict(choices=ipv6_mode_choices),
ipv6_address_mode=dict(choices=ipv6_mode_choices),
@@ -328,9 +321,7 @@ class SubnetModule(OpenStackModule):
('cidr', 'use_default_subnet_pool', 'subnet_pool'), True),
],
mutually_exclusive=[
('use_default_subnet_pool', 'subnet_pool'),
('allocation_pool_start', 'allocation_pools'),
('allocation_pool_end', 'allocation_pools')
('cidr', 'use_default_subnet_pool', 'subnet_pool')
]
)
@@ -376,10 +367,7 @@ class SubnetModule(OpenStackModule):
params['project_id'] = project.id
if subnet_pool:
params['subnet_pool_id'] = subnet_pool.id
if self.params['allocation_pool_start']:
params['allocation_pools'] = self._build_pool()
else:
params['allocation_pools'] = self.params['allocation_pools']
params['allocation_pools'] = self._build_pool()
params = self._add_extra_attrs(params)
params = {k: v for k, v in params.items() if v is not None}
return params
@@ -394,10 +382,6 @@ class SubnetModule(OpenStackModule):
params['host_routes'].sort(key=lambda r: sorted(r.items()))
subnet['host_routes'].sort(key=lambda r: sorted(r.items()))
if 'allocation_pools' in params:
params['allocation_pools'].sort(key=lambda r: sorted(r.items()))
subnet['allocation_pools'].sort(key=lambda r: sorted(r.items()))
updates = {k: params[k] for k in params if params[k] != subnet[k]}
if self.params['disable_gateway_ip'] and subnet.gateway_ip:
updates['gateway_ip'] = None

View File

@@ -1,306 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2024 Binero AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = '''
---
module: trunk
short_description: Add or delete trunks from an OpenStack cloud.
author: OpenStack Ansible SIG
description:
- Add or delete trunk from an OpenStack cloud.
options:
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
name:
description:
- Name that has to be given to the trunk.
- This port attribute cannot be updated.
type: str
required: true
port:
description:
- The name or ID of the port for the trunk.
type: str
required: false
sub_ports:
description:
- The sub ports on the trunk.
type: list
required: false
elements: dict
suboptions:
port:
description: The ID or name of the port.
type: str
segmentation_type:
description: The segmentation type to use.
type: str
segmentation_id:
description: The segmentation ID to use.
type: int
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
# Create a trunk
- openstack.cloud.trunk:
state: present
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: trunk1
port: port1
# Create a trunk with a subport
- openstack.cloud.trunk:
state: present
cloud: my-cloud
name: trunk1
port: port1
sub_ports:
- name: subport1
segmentation_type: vlan
segmentation_id: 123
# Remove a trunk
- openstack.cloud.trunk:
state: absent
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: trunk1
'''
RETURN = '''
trunk:
description: Dictionary describing the trunk.
type: dict
returned: On success when I(state) is C(present).
contains:
created_at:
description: Timestamp when the trunk was created.
returned: success
type: str
sample: "2022-02-03T13:28:25Z"
description:
description: The trunk description.
returned: success
type: str
id:
description: The trunk ID.
returned: success
type: str
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
is_admin_state_up:
description: |
The administrative state of the trunk, which is up C(True) or
down C(False).
returned: success
type: bool
sample: true
name:
description: The trunk name.
returned: success
type: str
sample: "trunk_name"
port_id:
description: The ID of the port for the trunk
returned: success
type: str
sample: "5ec25c97-7052-4ab8-a8ba-92faf84148df"
project_id:
description: The ID of the project who owns the trunk.
returned: success
type: str
sample: "aa1ede4f-3952-4131-aab6-3b8902268c7d"
revision_number:
description: The revision number of the resource.
returned: success
type: int
sample: 0
status:
description: The trunk status. Value is C(ACTIVE) or C(DOWN).
returned: success
type: str
sample: "ACTIVE"
sub_ports:
description: List of sub ports on the trunk.
returned: success
type: list
sample: []
tags:
description: The list of tags on the resource.
returned: success
type: list
sample: []
tenant_id:
description: Same as I(project_id). Deprecated.
returned: success
type: str
sample: "51fce036d7984ba6af4f6c849f65ef00"
updated_at:
description: Timestamp when the trunk was last updated.
returned: success
type: str
sample: "2022-02-03T13:28:25Z"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class TrunkModule(OpenStackModule):
argument_spec = dict(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
port=dict(),
sub_ports=dict(type='list', elements='dict'),
)
module_kwargs = dict(
required_if=[
('state', 'present', ('port',)),
],
supports_check_mode=True
)
def run(self):
port_name_or_id = self.params['port']
name_or_id = self.params['name']
state = self.params['state']
port = None
if port_name_or_id:
port = self.conn.network.find_port(
port_name_or_id, ignore_missing=False)
trunk = self.conn.network.find_trunk(name_or_id)
sub_ports = []
psp = self.params['sub_ports'] or []
for sp in psp:
subport = self.conn.network.find_port(
sp['port'], ignore_missing=False)
sub_ports.append(subport)
if self.ansible.check_mode:
self.exit_json(changed=self._will_change(state, trunk, sub_ports))
if state == 'present' and not trunk:
# create trunk
trunk = self._create(name_or_id, port)
self.exit_json(changed=True,
trunk=trunk.to_dict(computed=False))
elif state == 'present' and trunk:
# update trunk
update = self._build_update(trunk, sub_ports)
if update:
trunk = self._update(trunk, update)
self.exit_json(changed=bool(update),
trunk=trunk.to_dict(computed=False))
elif state == 'absent' and trunk:
# delete trunk
self._delete(trunk)
self.exit_json(changed=True)
elif state == 'absent' and not trunk:
# do nothing
self.exit_json(changed=False)
def _build_update(self, trunk, sub_ports):
add_sub_ports = []
del_sub_ports = []
for sp in sub_ports:
found = False
for tsp in trunk['sub_ports']:
if tsp['port_id'] == sp['id']:
found = True
break
if found is False:
psp = self.params['sub_ports'] or []
for k in psp:
if sp['name'] == k['port']:
spobj = {
'port_id': sp['id'],
'segmentation_type': k['segmentation_type'],
'segmentation_id': k['segmentation_id'],
}
add_sub_ports.append(spobj)
break
for tsp in trunk['sub_ports']:
found = False
for sp in sub_ports:
if sp['id'] == tsp['port_id']:
found = True
break
if found is False:
del_sub_ports.append({'port_id': tsp['port_id']})
update = {}
if len(add_sub_ports) > 0:
update['add_sub_ports'] = add_sub_ports
if len(del_sub_ports) > 0:
update['del_sub_ports'] = del_sub_ports
return update
def _create(self, name, port):
args = {}
args['name'] = name
args['port_id'] = port.id
return self.conn.network.create_trunk(**args)
def _delete(self, trunk):
sub_ports = []
for sp in trunk['sub_ports']:
sub_ports.append({'port_id': sp['port_id']})
self.conn.network.delete_trunk_subports(trunk.id, sub_ports)
self.conn.network.delete_trunk(trunk.id)
def _update(self, trunk, update):
if update.get('add_sub_ports', None):
self.conn.network.add_trunk_subports(
trunk, update['add_sub_ports'])
if update.get('del_sub_ports', None):
self.conn.network.delete_trunk_subports(
trunk, update['del_sub_ports'])
return self.conn.network.find_trunk(trunk.id)
def _will_change(self, state, trunk, sub_ports):
if state == 'present' and not trunk:
return True
elif state == 'present' and trunk:
return bool(self._build_update(trunk, sub_ports))
elif state == 'absent' and trunk:
return True
else:
return False
def main():
module = TrunkModule()
module()
if __name__ == '__main__':
main()

View File

@@ -1,103 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2023 Bitswalk, inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: volume_service_info
short_description: Fetch OpenStack Volume (Cinder) services
author: OpenStack Ansible SIG
description:
- Fetch OpenStack Volume (Cinder) services.
options:
binary:
description:
- Filter the service list result by binary name of the service.
type: str
host:
description:
- Filter the service list result by the host name.
type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
- name: Fetch all OpenStack Volume (Cinder) services
openstack.cloud.volume_service_info:
cloud: awesomecloud
- name: Fetch a subset of OpenStack Volume (Cinder) services
openstack.cloud.volume_service_info:
cloud: awesomecloud
binary: "cinder-volume"
host: "localhost"
'''
RETURN = r'''
volume_services:
description: List of dictionaries describing Volume (Cinder) services.
returned: always
type: list
elements: dict
contains:
availability_zone:
description: The availability zone name.
type: str
binary:
description: The binary name of the service.
type: str
disabled_reason:
description: The reason why the service is disabled
type: str
host:
description: The name of the host.
type: str
name:
description: Service name
type: str
state:
description: The state of the service. One of up or down.
type: str
status:
description: The status of the service. One of enabled or disabled.
type: str
update_at:
description: The date and time when the resource was updated
type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class VolumeServiceInfoModule(OpenStackModule):
argument_spec = dict(
binary=dict(),
host=dict(),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
kwargs = {k: self.params[k]
for k in ['binary', 'host']
if self.params[k] is not None}
volume_services = self.conn.block_storage.services(**kwargs)
self.exit_json(changed=False,
volume_services=[s.to_dict(computed=False)
for s in volume_services])
def main():
module = VolumeServiceInfoModule()
module()
if __name__ == '__main__':
main()

View File

@@ -4,6 +4,5 @@
import setuptools
setuptools.setup(
setup_requires=['pbr', 'setuptools'],
pbr=True,
py_modules=[])
setup_requires=['pbr'],
pbr=True)

View File

@@ -1,12 +0,0 @@
ansible-core>=2.16.0,<2.17.0
flake8
galaxy-importer
openstacksdk
pycodestyle
pylint
rstcheck
ruamel.yaml
tox
voluptuous
yamllint
setuptools

View File

@@ -1,12 +0,0 @@
ansible-core>=2.18.0,<2.19.0
flake8
galaxy-importer
openstacksdk
pycodestyle
pylint
rstcheck
ruamel.yaml
tox
voluptuous
yamllint
setuptools

View File

@@ -220,45 +220,3 @@ class TestCreateServer(object):
os_server._create_server(self.module, self.cloud)
assert 'missing_network' in self.module.fail_json.call_args[1]['msg']
def test_create_server_auto_ip_wait(self):
'''
- openstack.cloud.server:
image: cirros
auto_ip: true
wait: false
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert 'auto_ip' in self.module.fail_json.call_args[1]['msg']
def test_create_server_floating_ips_wait(self):
'''
- openstack.cloud.server:
image: cirros
floating_ips: ['0.0.0.0']
wait: false
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert 'floating_ips' in self.module.fail_json.call_args[1]['msg']
def test_create_server_floating_ip_pools_wait(self):
'''
- openstack.cloud.server:
image: cirros
floating_ip_pools: ['name-of-pool']
wait: false
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert 'floating_ip_pools' in self.module.fail_json.call_args[1]['msg']

View File

@@ -28,19 +28,9 @@ cp -a ${TOXDIR}/{plugins,meta,tests,docs} ${ANSIBLE_COLLECTIONS_PATH}/ansible_co
cd ${ANSIBLE_COLLECTIONS_PATH}/ansible_collections/openstack/cloud/
echo "Running ansible-test with version:"
ansible --version
# Ansible-core 2.17 dropped support for the metaclass-boilerplate and future-import-boilerplate tests.
# TODO(mgoddard): Drop this workaround when ansible-core 2.16 is EOL.
ANSIBLE_VER=$(python3 -m pip show ansible-core | awk '$1 == "Version:" { print $2 }')
ANSIBLE_MAJOR_VER=$(echo "$ANSIBLE_VER" | sed 's/^\([0-9]\)\..*/\1/g')
SKIP_TESTS=""
if [[ $ANSIBLE_MAJOR_VER -eq 2 ]]; then
ANSIBLE_MINOR_VER=$(echo "$ANSIBLE_VER" | sed 's/^2\.\([^\.]*\)\..*/\1/g')
if [[ $ANSIBLE_MINOR_VER -le 16 ]]; then
SKIP_TESTS="--skip-test metaclass-boilerplate --skip-test future-import-boilerplate"
fi
fi
ansible-test sanity -v \
--venv \
--python ${PY_VER} \
$SKIP_TESTS \
--skip-test metaclass-boilerplate \
--skip-test future-import-boilerplate \
plugins/ docs/ meta/

View File

@@ -36,14 +36,13 @@ deps =
galaxy-importer
pbr
ruamel.yaml
setuptools
commands =
python {toxinidir}/tools/build.py
ansible --version
ansible-galaxy collection build --force {toxinidir} --output-path {toxinidir}/build_artifact
bash {toxinidir}/tools/check-import.sh {toxinidir}
[testenv:linters_{2_9,2_11,2_12,2_16,2_18,latest}]
[testenv:linters_{2_9,2_11,2_12,latest}]
allowlist_externals = bash
commands =
{[testenv:build]commands}
@@ -57,8 +56,6 @@ deps =
linters_2_9: -r{toxinidir}/tests/requirements-ansible-2.9.txt
linters_2_11: -r{toxinidir}/tests/requirements-ansible-2.11.txt
linters_2_12: -r{toxinidir}/tests/requirements-ansible-2.12.txt
linters_2_16: -r{toxinidir}/tests/requirements-ansible-2.16.txt
linters_2_16: -r{toxinidir}/tests/requirements-ansible-2.18.txt
passenv = *
[flake8]
@@ -72,7 +69,7 @@ ignore = W503,H4,E501,E402,H301
show-source = True
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible_collections
[testenv:ansible_{2_9,2_11,2_12,2_16,2_18,latest}]
[testenv:ansible_{2_9,2_11,2_12,latest}]
allowlist_externals = bash
commands =
bash {toxinidir}/ci/run-ansible-tests-collection.sh -e {envdir} {posargs}
@@ -82,8 +79,6 @@ deps =
ansible_2_9: -r{toxinidir}/tests/requirements-ansible-2.9.txt
ansible_2_11: -r{toxinidir}/tests/requirements-ansible-2.11.txt
ansible_2_12: -r{toxinidir}/tests/requirements-ansible-2.12.txt
ansible_2_16: -r{toxinidir}/tests/requirements-ansible-2.16.txt
ansible_2_18: -r{toxinidir}/tests/requirements-ansible-2.18.txt
# Need to pass some env vars for the Ansible playbooks
passenv =
HOME