5 Commits

Author SHA1 Message Date
Sagi Shnaidman
8a395a04cf Remove semver check from release job
Jinja in Zuul Ansible version doesn't support semver key for versions.
Change-Id: I8bf9aa17e18f9b6d2c8e307f4368d1cea3362cb5
2021-07-29 10:20:32 +03:00
Zuul
441a61fd8c Merge "Change the pipeline of releasing to tag pipeline" 2021-07-28 23:14:40 +00:00
Sagi Shnaidman
c05b1fdbaf Change the pipeline of releasing to tag pipeline
Because zuul pre-release pipeline is pep400 compliant, but no
semver compliant, we can't push pre-release tags. Galaxy expects
from tags to be pure semver (not pep440).
Change the pipeline to be 'tag' and be triggered by any tag.

Change-Id: Ia288452668179723e35452d6e9579fb1dd0c4c3a
2021-07-28 23:43:51 +03:00
Zuul
9cd92208d6 Merge "Wait for pool to be active and online" 2021-07-28 16:55:56 +00:00
Jesper Schmitz Mouridsen
aed60716ee Wait for pool to be active and online
In order to use a newly generated pool for a new
healthmonitor the pool must be online and active,
to avoid 409 conflict, when trying to update a pool
with e.g pending update as a status.

Change-Id: I160a75c6fbbf1555f3adcb444c77057f59b4cdfb
2021-07-19 13:05:33 +02:00
2 changed files with 32 additions and 1 deletions

View File

@@ -453,6 +453,6 @@
- ansible-collections-openstack-functional-devstack-rocky-ansible-2.11
- ansible-collections-openstack-functional-devstack-queens-ansible-devel
pre-release:
tag:
jobs:
- ansible-collections-openstack-release

View File

@@ -296,6 +296,35 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class LoadBalancerModule(OpenStackModule):
def _wait_for_pool(self, pool, provisioning_status, operating_status, failures, interval=5):
"""Wait for pool to be in a particular provisioning and operating status."""
timeout = self.params['timeout'] # reuse loadbalancer timeout
total_sleep = 0
if failures is None:
failures = []
while total_sleep < timeout:
pool = self.conn.load_balancer.find_pool(name_or_id=pool.id)
if pool:
if pool.provisioning_status == provisioning_status and pool.operating_status == operating_status:
return None
if pool.provisioning_status in failures:
self.fail_json(
msg="Pool %s transitioned to failure state %s" %
(pool.id, pool.provisioning_status)
)
else:
if provisioning_status == "DELETED":
return None
else:
self.fail_json(
msg="Pool %s transitioned to DELETED" % pool.id
)
time.sleep(interval)
total_sleep += interval
def _wait_for_lb(self, lb, status, failures, interval=5):
"""Wait for load balancer to be in a particular provisioning status."""
timeout = self.params['timeout']
@@ -497,6 +526,7 @@ class LoadBalancerModule(OpenStackModule):
protocol=protocol,
lb_algorithm=lb_algorithm
)
self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
changed = True
# Ensure members in the pool
@@ -538,6 +568,7 @@ class LoadBalancerModule(OpenStackModule):
protocol_port=protocol_port,
subnet_id=subnet_id
)
self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
changed = True
# Associate public ip to the load balancer VIP. If