mirror of
https://github.com/ansible-collections/kubernetes.core.git
synced 2026-03-26 21:33:02 +00:00
waiter.py Add ClusterOperator Test (#879)
SUMMARY Fixes #869 During an OpenShift installation, one of the checks to see that the cluster is ready to proceed with configuration is to check to ensure that the Cluster Operators are in an Available: True Degraded: False Progressing: False state. While you can currently use the k8s_info module to get a json response, the resulting json needs to be iterated over several times to get the appropriate status. This PR adds functionality into waiter.py which loops over all resource instances of the cluster operators. If any of them is not ready, waiter returns False and the task false. If the task returns, you can assume that all the cluster operators are healthy. ISSUE TYPE Feature Pull Request COMPONENT NAME waiter.py ADDITIONAL INFORMATION A simple playbook will trigger the waiter.py to watch the ClusterOperator object --- - name: get operators hosts: localhost gather_facts: false tasks: - name: Get cluster operators kubernetes.core.k8s_info: api_version: v1 kind: ClusterOperator kubeconfig: "/home/ocp/one/auth/kubeconfig" wait: true wait_timeout: 30 register: cluster_operators This will produce the simple response if everything is functioning properly: PLAY [get operators] ************************************************************************************************* TASK [Get cluster operators] ***************************************************************************************** ok: [localhost] PLAY RECAP *********************************************************************************************************** localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 If the timeout is reached: PLAY [get operators] ************************************************************************************************* TASK [Get cluster operators] ***************************************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions.CoreException: Failed to gather information about ClusterOperator(s) even after waiting for 30 seconds fatal: [localhost]: FAILED! => {"changed": false, "msg": "Failed to gather information about ClusterOperator(s) even after waiting for 30 seconds"} PLAY RECAP *********************************************************************************************************** localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 UNSOLVED: How to know which Operators are failing Reviewed-by: Mandar Kulkarni <mandar242@gmail.com> Reviewed-by: Bikouo Aubin
This commit is contained in:
5
changelogs/fragments/879-clusteroperator-waiter.py.yaml
Normal file
5
changelogs/fragments/879-clusteroperator-waiter.py.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
minor_changes:
|
||||
- >-
|
||||
waiter.py - add ClusterOperator support. The module can now check OpenShift cluster health
|
||||
by verifying ClusterOperator status requiring 'Available: True', 'Degraded: False', and
|
||||
'Progressing: False' for success. (https://github.com/ansible-collections/kubernetes.core/issues/869)
|
||||
@@ -117,11 +117,34 @@ def exists(resource: Optional[ResourceInstance]) -> bool:
|
||||
return bool(resource) and not empty_list(resource)
|
||||
|
||||
|
||||
def cluster_operator_ready(resource: ResourceInstance) -> bool:
|
||||
"""
|
||||
Predicate to check if a single ClusterOperator is healthy.
|
||||
Returns True if:
|
||||
- "Available" is True
|
||||
- "Degraded" is False
|
||||
- "Progressing" is False
|
||||
"""
|
||||
if not resource:
|
||||
return False
|
||||
|
||||
# Extract conditions from the resource's status
|
||||
conditions = resource.get("status", {}).get("conditions", [])
|
||||
|
||||
status = {x.get("type", ""): x.get("status") for x in conditions}
|
||||
return (
|
||||
(status.get("Degraded") == "False")
|
||||
and (status.get("Progressing") == "False")
|
||||
and (status.get("Available") == "True")
|
||||
)
|
||||
|
||||
|
||||
RESOURCE_PREDICATES = {
|
||||
"DaemonSet": daemonset_ready,
|
||||
"Deployment": deployment_ready,
|
||||
"Pod": pod_ready,
|
||||
"StatefulSet": statefulset_ready,
|
||||
"ClusterOperator": cluster_operator_ready,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ plugins/module_utils/k8sdynamicclient.py import-3.11!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/pods.yml yamllint!skip
|
||||
|
||||
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
|
||||
|
||||
@@ -14,6 +14,7 @@ plugins/module_utils/version.py pylint!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
|
||||
|
||||
@@ -14,6 +14,7 @@ plugins/module_utils/version.py pylint!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
|
||||
|
||||
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
|
||||
|
||||
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
|
||||
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
|
||||
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
|
||||
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
|
||||
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
|
||||
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
|
||||
|
||||
99
tests/unit/module_utils/fixtures/clusteroperator.yml
Normal file
99
tests/unit/module_utils/fixtures/clusteroperator.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: ClusterOperator
|
||||
metadata:
|
||||
name: authentication
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- message: All is well
|
||||
reason: AsExpected
|
||||
status: 'False'
|
||||
type: Degraded
|
||||
- message: 'AuthenticatorCertKeyProgressing: All is well'
|
||||
reason: AsExpected
|
||||
status: 'False'
|
||||
type: Progressing
|
||||
- message: All is well
|
||||
reason: AsExpected
|
||||
status: 'True'
|
||||
type: Available
|
||||
- message: All is well
|
||||
reason: AsExpected
|
||||
status: 'True'
|
||||
type: Upgradeable
|
||||
- reason: NoData
|
||||
status: Unknown
|
||||
type: EvaluationConditionsDetected
|
||||
---
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: ClusterOperator
|
||||
metadata:
|
||||
name: dns
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- message: DNS "default" is available.
|
||||
reason: AsExpected
|
||||
status: 'True'
|
||||
type: Available
|
||||
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
|
||||
pods, want 3."'
|
||||
reason: DNSReportsProgressingIsTrue
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- reason: DNSNotDegraded
|
||||
status: 'False'
|
||||
type: Degraded
|
||||
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
|
||||
reason: DNSUpgradeable
|
||||
status: 'True'
|
||||
type: Upgradeable
|
||||
---
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: ClusterOperator
|
||||
metadata:
|
||||
name: dns
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- message: DNS "default" is available.
|
||||
reason: AsExpected
|
||||
status: 'True'
|
||||
type: Available
|
||||
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
|
||||
pods, want 3."'
|
||||
reason: DNSReportsProgressingIsTrue
|
||||
status: 'False'
|
||||
type: Progressing
|
||||
- reason: DNSNotDegraded
|
||||
status: 'True'
|
||||
type: Degraded
|
||||
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
|
||||
reason: DNSUpgradeable
|
||||
status: 'False'
|
||||
type: Upgradeable
|
||||
---
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: ClusterOperator
|
||||
metadata:
|
||||
name: dns
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- message: DNS "default" is available.
|
||||
reason: AsExpected
|
||||
status: 'False'
|
||||
type: Available
|
||||
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
|
||||
pods, want 3."'
|
||||
reason: DNSReportsProgressingIsTrue
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- reason: DNSNotDegraded
|
||||
status: 'True'
|
||||
type: Degraded
|
||||
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
|
||||
reason: DNSUpgradeable
|
||||
status: 'False'
|
||||
type: Upgradeable
|
||||
@@ -9,6 +9,7 @@ from ansible_collections.kubernetes.core.plugins.module_utils.k8s.waiter import
|
||||
DummyWaiter,
|
||||
Waiter,
|
||||
clock,
|
||||
cluster_operator_ready,
|
||||
custom_condition,
|
||||
deployment_ready,
|
||||
exists,
|
||||
@@ -29,6 +30,7 @@ def resources(filepath):
|
||||
RESOURCES = resources("fixtures/definitions.yml")
|
||||
PODS = resources("fixtures/pods.yml")
|
||||
DEPLOYMENTS = resources("fixtures/deployments.yml")
|
||||
CLUSTER_OPERATOR = resources("fixtures/clusteroperator.yml")
|
||||
|
||||
|
||||
def test_clock_times_out():
|
||||
@@ -119,3 +121,10 @@ def test_get_waiter_returns_correct_waiter():
|
||||
).predicate.func
|
||||
== custom_condition
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"clusteroperator,expected", zip(CLUSTER_OPERATOR, [True, False, False, False])
|
||||
)
|
||||
def test_cluster_operator(clusteroperator, expected):
|
||||
assert cluster_operator_ready(clusteroperator) is expected
|
||||
|
||||
Reference in New Issue
Block a user