From 44ab1fc478b5d6defbe752b740510637b6d39f42 Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 20:35:22 +0000 Subject: [PATCH] Add `check_mode` support for `k8s_drain` module (#1086) (#1091) This is a backport of PR #1086 as merged into main (d239adb). SUMMARY Closes #1037 added support for check_mode Converted warnings into informational display when user has explicitly requested to delete daemontset-managed pods, unmanaged pods or pods with local storage ISSUE TYPE Feature Pull Request COMPONENT NAME k8s_drain Reviewed-by: Bianca Henderson --- .../20260203-k8s_drain-warning-fixes.yaml | 5 + plugins/modules/k8s_drain.py | 82 ++-- .../targets/k8s_drain/defaults/main.yml | 4 + .../targets/k8s_drain/tasks/cordon.yml | 65 +++ .../targets/k8s_drain/tasks/drain.yml | 389 ++++++++++++++++ .../targets/k8s_drain/tasks/main.yml | 414 +----------------- .../targets/k8s_drain/tasks/uncordon.yml | 54 +++ .../k8s_drain/tasks/validate_node_status.yml | 18 + .../k8s_drain/templates/daemonset.yml.j2 | 28 ++ .../k8s_drain/templates/deployment.yml.j2 | 40 ++ .../targets/k8s_drain/templates/pod1.yml.j2 | 24 + 11 files changed, 685 insertions(+), 438 deletions(-) create mode 100644 changelogs/fragments/20260203-k8s_drain-warning-fixes.yaml create mode 100644 tests/integration/targets/k8s_drain/tasks/cordon.yml create mode 100644 tests/integration/targets/k8s_drain/tasks/drain.yml create mode 100644 tests/integration/targets/k8s_drain/tasks/uncordon.yml create mode 100644 tests/integration/targets/k8s_drain/tasks/validate_node_status.yml create mode 100644 tests/integration/targets/k8s_drain/templates/daemonset.yml.j2 create mode 100644 tests/integration/targets/k8s_drain/templates/deployment.yml.j2 create mode 100644 tests/integration/targets/k8s_drain/templates/pod1.yml.j2 diff --git a/changelogs/fragments/20260203-k8s_drain-warning-fixes.yaml b/changelogs/fragments/20260203-k8s_drain-warning-fixes.yaml new file mode 100644 index 00000000..3447ea0a --- /dev/null +++ b/changelogs/fragments/20260203-k8s_drain-warning-fixes.yaml @@ -0,0 +1,5 @@ +--- +minor_changes: + - k8s_drain - Add support for `check_mode` (https://github.com/ansible-collections/kubernetes.core/pull/1086). + - k8s_drain - Convert module warnings into informational displays when users explicitly request the deletion of + unmanaged pods, pods with local storage, or those managed by a `DaemonSet` (https://github.com/ansible-collections/kubernetes.core/issues/1037). diff --git a/plugins/modules/k8s_drain.py b/plugins/modules/k8s_drain.py index 22378168..a0671a25 100644 --- a/plugins/modules/k8s_drain.py +++ b/plugins/modules/k8s_drain.py @@ -230,7 +230,7 @@ def filter_pods(pods, force, ignore_daemonset, delete_emptydir_data): else: to_delete.append((pod.metadata.namespace, pod.metadata.name)) - warnings, errors = [], [] + warnings, errors, info = [], [], [] if unmanaged: pod_names = ",".join([pod[0] + "/" + pod[1] for pod in unmanaged]) if not force: @@ -242,7 +242,7 @@ def filter_pods(pods, force, ignore_daemonset, delete_emptydir_data): ) else: # Pod not managed will be deleted as 'force' is true - warnings.append( + info.append( "Deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: {0}.".format( pod_names ) @@ -264,7 +264,7 @@ def filter_pods(pods, force, ignore_daemonset, delete_emptydir_data): "cannot delete Pods with local storage: {0}.".format(pod_names) ) else: - warnings.append("Deleting Pods with local storage: {0}.".format(pod_names)) + info.append("Deleting Pods with local storage: {0}.".format(pod_names)) for pod in localStorage: to_delete.append((pod[0], pod[1])) @@ -278,8 +278,8 @@ def filter_pods(pods, force, ignore_daemonset, delete_emptydir_data): ) ) else: - warnings.append("Ignoring DaemonSet-managed Pods: {0}.".format(pod_names)) - return to_delete, warnings, errors + info.append("Ignoring DaemonSet-managed Pods: {0}.".format(pod_names)) + return to_delete, warnings, errors, info class K8sDrainAnsible(object): @@ -334,18 +334,19 @@ class K8sDrainAnsible(object): def evict_pods(self, pods): for namespace, name in pods: try: - if self._drain_options.get("disable_eviction"): - self._api_instance.delete_namespaced_pod( - name=name, namespace=namespace, body=self._delete_options - ) - else: - body = v1_eviction( - delete_options=self._delete_options, - metadata=V1ObjectMeta(name=name, namespace=namespace), - ) - self._api_instance.create_namespaced_pod_eviction( - name=name, namespace=namespace, body=body - ) + if not self._module.check_mode: + if self._drain_options.get("disable_eviction"): + self._api_instance.delete_namespaced_pod( + name=name, namespace=namespace, body=self._delete_options + ) + else: + body = v1_eviction( + delete_options=self._delete_options, + metadata=V1ObjectMeta(name=name, namespace=namespace), + ) + self._api_instance.create_namespaced_pod_eviction( + name=name, namespace=namespace, body=body + ) self._changed = True except ApiException as exc: if exc.reason != "Not Found": @@ -362,11 +363,7 @@ class K8sDrainAnsible(object): ) def list_pods(self): - params = { - "field_selector": "spec.nodeName={name}".format( - name=self._module.params.get("name") - ) - } + params = {"field_selector": "spec.nodeName=" + self._module.params.get("name")} pod_selectors = self._module.params.get("pod_selectors") if pod_selectors: params["label_selector"] = ",".join(pod_selectors) @@ -376,7 +373,8 @@ class K8sDrainAnsible(object): # Mark node as unschedulable result = [] if not node_unschedulable: - self.patch_node(unschedulable=True) + if not self._module.check_mode: + self.patch_node(unschedulable=True) result.append( "node {0} marked unschedulable.".format(self._module.params.get("name")) ) @@ -391,7 +389,8 @@ class K8sDrainAnsible(object): def _revert_node_patch(): if self._changed: self._changed = False - self.patch_node(unschedulable=False) + if not self._module.check_mode: + self.patch_node(unschedulable=False) try: pod_list = self.list_pods() @@ -401,7 +400,7 @@ class K8sDrainAnsible(object): delete_emptydir_data = self._drain_options.get( "delete_emptydir_data", False ) - pods, warnings, errors = filter_pods( + pods, warnings, errors, info = filter_pods( pod_list.items, force, ignore_daemonset, delete_emptydir_data ) if errors: @@ -431,18 +430,25 @@ class K8sDrainAnsible(object): if pods: self.evict_pods(pods) number_pod = len(pods) - if self._drain_options.get("wait_timeout") is not None: - warn = self.wait_for_pod_deletion( - pods, - self._drain_options.get("wait_timeout"), - self._drain_options.get("wait_sleep"), + if self._module.check_mode: + result.append( + "Would have deleted {0} Pod(s) from node if not in check mode.".format( + number_pod + ) ) - if warn: - warnings.append(warn) - result.append("{0} Pod(s) deleted from node.".format(number_pod)) + else: + wait_timeout = self._drain_options.get("wait_timeout") + wait_sleep = self._drain_options.get("wait_sleep") + if wait_timeout is not None: + warn = self.wait_for_pod_deletion(pods, wait_timeout, wait_sleep) + if warn: + warnings.append(warn) + result.append("{0} Pod(s) deleted from node.".format(number_pod)) if warnings: for warning in warnings: self._module.warn(warning) + for line in info: + self._module.debug(line) return dict(result=" ".join(result)) def patch_node(self, unschedulable): @@ -483,7 +489,8 @@ class K8sDrainAnsible(object): self._module.exit_json( result="node {0} already marked unschedulable.".format(name) ) - self.patch_node(unschedulable=True) + if not self._module.check_mode: + self.patch_node(unschedulable=True) result["result"] = "node {0} marked unschedulable.".format(name) self._changed = True @@ -492,7 +499,8 @@ class K8sDrainAnsible(object): self._module.exit_json( result="node {0} already marked schedulable.".format(name) ) - self.patch_node(unschedulable=False) + if not self._module.check_mode: + self.patch_node(unschedulable=False) result["result"] = "node {0} marked schedulable.".format(name) self._changed = True @@ -535,7 +543,9 @@ def argspec(): def main(): - module = AnsibleK8SModule(module_class=AnsibleModule, argument_spec=argspec()) + module = AnsibleK8SModule( + module_class=AnsibleModule, argument_spec=argspec(), supports_check_mode=True + ) if not HAS_EVICTION_API: module.fail_json( diff --git a/tests/integration/targets/k8s_drain/defaults/main.yml b/tests/integration/targets/k8s_drain/defaults/main.yml index 918c67d2..43fb11db 100644 --- a/tests/integration/targets/k8s_drain/defaults/main.yml +++ b/tests/integration/targets/k8s_drain/defaults/main.yml @@ -1,3 +1,7 @@ --- test_namespace: "drain" k8s_wait_timeout: 400 +daemonset_name: promotheus +deployment_name: busybox-emptydir +pod1_name: "busybox-1" +pod2_name: "busybox-2" diff --git a/tests/integration/targets/k8s_drain/tasks/cordon.yml b/tests/integration/targets/k8s_drain/tasks/cordon.yml new file mode 100644 index 00000000..276f82f2 --- /dev/null +++ b/tests/integration/targets/k8s_drain/tasks/cordon.yml @@ -0,0 +1,65 @@ +--- +- name: Cordon node (check mode) + k8s_drain: + state: cordon + name: '{{ node_to_drain }}' + register: cordon_check_mode + check_mode: true + +- name: assert that module reported change while running in check_mode + assert: + that: + - cordon_check_mode is changed + +- name: Ensure the node remain schedulable (cordon run on check mode) + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Cordon node + k8s_drain: + state: cordon + name: '{{ node_to_drain }}' + register: cordon + +- name: assert that cordon is changed + assert: + that: + - cordon is changed + +- name: Ensure the node is unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Test cordon idempotency (check_mode=true) + k8s_drain: + state: cordon + name: '{{ node_to_drain }}' + register: cordon_checkmode_idempotency + check_mode: true + +- name: Assert that module is idempotent while running in check mode + assert: + that: + - cordon_checkmode_idempotency is not changed + +- name: Test cordon idempotency + k8s_drain: + state: cordon + name: '{{ node_to_drain }}' + register: cordon + +- name: assert that cordon is not changed + assert: + that: + - cordon is not changed + +- name: Get pods + k8s_info: + kind: Pod + namespace: '{{ test_namespace }}' + register: Pod + +- name: assert that pods are running on cordoned node + assert: + that: + - Pod.resources | selectattr('status.phase', 'equalto', 'Running') | selectattr('spec.nodeName', 'equalto', node_to_drain) | list | length > 0 diff --git a/tests/integration/targets/k8s_drain/tasks/drain.yml b/tests/integration/targets/k8s_drain/tasks/drain.yml new file mode 100644 index 00000000..8134a2f8 --- /dev/null +++ b/tests/integration/targets/k8s_drain/tasks/drain.yml @@ -0,0 +1,389 @@ +--- +# Drain the node (Should failed) +- name: Drain node with expected failure (check_mode=true) + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + ignore_errors: true + register: drain_failed_check_mode + check_mode: true + +- name: Assert that drain failed due to DaemonSet managed Pods + assert: + that: + - drain_failed_check_mode is failed + - '"cannot delete DaemonSet-managed Pods" in drain_failed_check_mode.msg' + - '"cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" in drain_failed_check_mode.msg' + - '"cannot delete Pods with local storage" in drain_failed_check_mode.msg' + +- name: Ensure that the node remains schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Drain node with expected failure + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + ignore_errors: true + register: drain_failed + +- name: Assert that drain failed due to DaemonSet managed Pods + assert: + that: + - drain_failed is failed + - '"cannot delete DaemonSet-managed Pods" in drain_failed.msg' + - '"cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" in drain_failed.msg' + - '"cannot delete Pods with local storage" in drain_failed.msg' + +- name: Ensure that the node remains schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +# Drain the node ignoring non-candidate Pods +# check_mode +- name: Drain node using ignore_daemonsets, force, and delete_emptydir_data options (check_mode=true) + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + ignore_daemonsets: true + delete_emptydir_data: true + wait_timeout: 0 + register: drain_force_check_mode + check_mode: true + +- name: Assert that module reported changed while node was not drained + assert: + that: + - drain_force_check_mode is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_force_check_mode.result' + +- name: Ensure node remains schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Assert that running with check_mode did not delete any Pod + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - "{{ item }}" + register: pods + failed_when: pods.resources | length == 0 + loop: + - drain=unmanaged-pod + - drain=daemonset-pod + - drain=emptyDir + +# Apply +- name: Drain node using ignore_daemonsets, force, and delete_emptydir_data options + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + ignore_daemonsets: true + delete_emptydir_data: true + wait_timeout: 0 + register: drain_force + +- name: Assert that module reported changed + assert: + that: + - drain_force is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_force.result' + +- name: Ensure node is now unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Assert that unmanaged Pod were deleted + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + register: pods + failed_when: pods.resources | length > 0 + +- name: Assert that Pod with local storage are not Pending + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=emptyDir + register: pods + failed_when: pods.resources | map(attribute='status.phase') | unique != ['Pending'] + +- name: Assert that DaemonSet-managed pod were not deleted + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=daemonset-pod + register: pods + failed_when: pods.resources | length == 0 + +# Idempotency +- name: Test drain idempotency (check_mode=true) + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + ignore_daemonsets: true + delete_emptydir_data: true + register: drain_force_idempotency_check_mode + check_mode: true + +- name: Validate idempotency with check_mode + assert: + that: + - drain_force_idempotency_check_mode is not changed + +- name: Ensure node remains unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Assert that DaemonSet-managed pod were not deleted + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=daemonset-pod + register: pods + failed_when: pods.resources | length == 0 + +# Drain with disable_eviction = true +# check_mode +- name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + +- name: Create once again the Pod deleted before + k8s: + namespace: '{{ test_namespace }}' + wait: true + wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" + template: pod1.yml.j2 + +- name: Drain node using disable_eviction (check_mode) + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + disable_eviction: true + terminate_grace_period: 0 + ignore_daemonsets: true + wait_timeout: 0 + delete_emptydir_data: true + register: disable_evict_check_mode + check_mode: true + +- name: Assert that node has been drained + assert: + that: + - disable_evict_check_mode is changed + - '"node "+node_to_drain+" marked unschedulable." in disable_evict_check_mode.result' + +- name: Ensure node remains schedulable (check_mode) + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Assert that unmanaged Pod were not deleted + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + register: pods + failed_when: pods.resources | length == 0 + +# apply +- name: Drain node using disable_eviction + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + delete_options: + force: true + disable_eviction: true + terminate_grace_period: 0 + ignore_daemonsets: true + wait_timeout: 0 + delete_emptydir_data: true + register: disable_evict + +- name: Assert that node has been drained + assert: + that: + - disable_evict is changed + - '"node "+node_to_drain+" marked unschedulable." in disable_evict_check_mode.result' + +- name: Ensure the node is unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +# Drain using pod_selectors +- name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + +- name: Create Pod with label selector + k8s: + namespace: "{{ test_namespace }}" + wait: true + template: pod1.yml.j2 + +# check_mode +- name: Drain the node using pod_selectors matching no Pod + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - drain=no_match_selector + delete_options: + terminate_grace_period: 0 + delete_emptydir_data: true + force: true + ignore_daemonsets: true + register: drain_pod_selector_no_match_check_mode + check_mode: true + +- name: Assert that module reported change while running in check_mode + assert: + that: + - drain_pod_selector_no_match_check_mode is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_pod_selector_no_match_check_mode.result' + +- name: Ensure that the node remains schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Validate that Pod are still running + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + field_selectors: + - status.phase=Running + register: pods + failed_when: pods.resources | length == 0 + +# apply +- name: Drain the node using pod_selectors matching no Pod + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - drain=no_match_selector + delete_options: + terminate_grace_period: 0 + delete_emptydir_data: true + force: true + ignore_daemonsets: true + register: drain_pod_selector_no_match + +- name: Assert that node has been drained + assert: + that: + - drain_pod_selector_no_match is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_pod_selector_no_match.result' + +- name: Ensure the node is unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Validate that Pod are still running + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + field_selectors: + - status.phase=Running + register: pods + failed_when: pods.resources | length == 0 + +# Drain the node using matching pod_selector +- name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + +# check_mode +- name: Drain the node using matching pod_selectors + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - drain=unmanaged-pod + delete_options: + terminate_grace_period: 0 + delete_emptydir_data: true + force: true + ignore_daemonsets: true + register: drain_pod_selector_match_check_mode + check_mode: true + +- name: Assert that module reported change while running in check_mode + assert: + that: + - drain_pod_selector_match_check_mode is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_pod_selector_match_check_mode.result' + +- name: Ensure that the node remains schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Validate that Pod are still running + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + field_selectors: + - status.phase=Running + register: pods + failed_when: pods.resources | length == 0 + +# apply +- name: Drain the node using matching pod_selectors + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - drain=unmanaged-pod + delete_options: + terminate_grace_period: 0 + delete_emptydir_data: true + force: true + ignore_daemonsets: true + wait_timeout: 0 + register: drain_pod_selector_match + +- name: Assert that node has been drained + assert: + that: + - drain_pod_selector_match is changed + - '"node "+node_to_drain+" marked unschedulable." in drain_pod_selector_match.result' + +- name: Ensure the node is unschedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Validate that Pod are not running + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - drain=unmanaged-pod + field_selectors: + - status.phase=Running + register: pods + failed_when: pods.resources | length > 0 diff --git a/tests/integration/targets/k8s_drain/tasks/main.yml b/tests/integration/targets/k8s_drain/tasks/main.yml index 5841993e..104b1666 100644 --- a/tests/integration/targets/k8s_drain/tasks/main.yml +++ b/tests/integration/targets/k8s_drain/tasks/main.yml @@ -1,11 +1,5 @@ --- - block: - - name: Set common facts - set_fact: - drain_daemonset_name: "promotheus-dset" - drain_pod_name: "pod-drain" - drain_deployment_emptydir_name: "deployment-emptydir-drain" - # It seems that the default ServiceAccount can take a bit to be created # right after a cluster is brought up. This can lead to the ServiceAccount # admission controller rejecting a Pod creation request because the @@ -35,407 +29,23 @@ set_fact: node_to_drain: '{{ uncordoned_nodes[0] }}' - - name: Deploy daemonset on cluster + - name: Create resources k8s: - namespace: '{{ test_namespace }}' - definition: - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: '{{ drain_daemonset_name }}' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - '{{ node_to_drain }}' - selector: - matchLabels: - name: prometheus-exporter - template: - metadata: - labels: - name: prometheus-exporter - spec: - containers: - - name: prometheus - image: prom/node-exporter - ports: - - containerPort: 80 - - - name: Create Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet. - k8s: - namespace: '{{ test_namespace }}' - wait: yes - wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" - definition: - apiVersion: v1 - kind: Pod - metadata: - name: '{{ drain_pod_name }}' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - '{{ node_to_drain }}' - containers: - - name: c0 - image: busybox - command: - - /bin/sh - - -c - - while true;do date;sleep 5; done - - - name: Create Deployment with an emptyDir volume. - k8s: - namespace: '{{ test_namespace }}' - wait: yes - wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" - definition: - apiVersion: apps/v1 - kind: Deployment - metadata: - name: '{{ drain_deployment_emptydir_name }}' - spec: - replicas: 1 - selector: - matchLabels: - drain: emptyDir - template: - metadata: - labels: - drain: emptyDir - spec: - metadata: - labels: - drain: emptyDir - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - '{{ node_to_drain }}' - containers: - - name: c0 - image: busybox - command: - - /bin/sh - - -c - - while true;do date;sleep 5; done - volumeMounts: - - mountPath: /emptydir - name: emptydir - volumes: - - name: emptydir - emptyDir: {} - - - name: Register emptyDir Pod name - k8s_info: - namespace: '{{ test_namespace }}' - kind: Pod - label_selectors: - - "drain = emptyDir" - register: emptydir_pod_result - failed_when: - - emptydir_pod_result.resources | length != 1 - - - name: Cordon node - k8s_drain: - state: cordon - name: '{{ node_to_drain }}' - register: cordon - - - name: assert that cordon is changed - assert: - that: - - cordon is changed - - - name: Test cordon idempotency - k8s_drain: - state: cordon - name: '{{ node_to_drain }}' - register: cordon - - - name: assert that cordon is not changed - assert: - that: - - cordon is not changed - - - name: Get pods - k8s_info: - kind: Pod - namespace: '{{ test_namespace }}' - register: Pod - - - name: assert that pods are running on cordoned node - assert: - that: - - Pod.resources | selectattr('status.phase', 'equalto', 'Running') | selectattr('spec.nodeName', 'equalto', node_to_drain) | list | length > 0 - - - name: Uncordon node - k8s_drain: - state: uncordon - name: '{{ node_to_drain }}' - register: uncordon - - - name: assert that uncordon is changed - assert: - that: - - uncordon is changed - - - name: Test uncordon idempotency - k8s_drain: - state: uncordon - name: '{{ node_to_drain }}' - register: uncordon - - - name: assert that uncordon is not changed - assert: - that: - - uncordon is not changed - - - name: Drain node - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - ignore_errors: true - register: drain_result - - - name: assert that drain failed due to DaemonSet managed Pods - assert: - that: - - drain_result is failed - - '"cannot delete DaemonSet-managed Pods" in drain_result.msg' - - '"cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" in drain_result.msg' - - '"cannot delete Pods with local storage" in drain_result.msg' - - - name: Drain node using ignore_daemonsets, force, and delete_emptydir_data options - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - delete_options: - force: true - ignore_daemonsets: true - delete_emptydir_data: true - wait_timeout: 0 - register: drain_result - - - name: assert that node has been drained - assert: - that: - - drain_result is changed - - '"node "+node_to_drain+" marked unschedulable." in drain_result.result' - - - name: assert that unmanaged pod were deleted - k8s_info: - namespace: '{{ test_namespace }}' - kind: Pod - name: '{{ drain_pod_name }}' - register: _result - failed_when: _result.resources | length > 0 - - - name: assert that emptyDir pod was deleted - k8s_info: - namespace: '{{ test_namespace }}' - kind: Pod - name: "{{ emptydir_pod_result.resources[0].metadata.name }}" - register: _result - failed_when: _result.resources | length != 0 - - - name: Test drain idempotency - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - delete_options: - force: true - ignore_daemonsets: true - delete_emptydir_data: true - register: drain_result - - - name: Check idempotency - assert: - that: - - drain_result is not changed - - - name: Get DaemonSet - k8s_info: - kind: DaemonSet - namespace: '{{ test_namespace }}' - name: '{{ drain_daemonset_name }}' - register: dset_result - - - name: assert that daemonset managed pods were not removed - assert: - that: - - dset_result.resources | list | length > 0 - - # test: drain using disable_eviction=true - - name: Uncordon node - k8s_drain: - state: uncordon - name: '{{ node_to_drain }}' - - - name: Create another Pod - k8s: - namespace: '{{ test_namespace }}' - wait: yes - wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" - definition: - apiVersion: v1 - kind: Pod - metadata: - name: '{{ drain_pod_name }}-01' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - '{{ node_to_drain }}' - containers: - - name: c0 - image: busybox - command: - - /bin/sh - - -c - - while true;do date;sleep 5; done - volumeMounts: - - mountPath: /emptydir - name: emptydir - volumes: - - name: emptydir - emptyDir: {} - - - name: Drain node using disable_eviction set to yes - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - delete_options: - force: true - disable_eviction: yes - terminate_grace_period: 0 - ignore_daemonsets: yes - wait_timeout: 0 - delete_emptydir_data: true - register: disable_evict - - - name: assert that node has been drained - assert: - that: - - disable_evict is changed - - '"node "+node_to_drain+" marked unschedulable." in disable_evict.result' - - - name: assert that unmanaged pod were deleted - k8s_info: - namespace: '{{ test_namespace }}' - kind: Pod - name: '{{ drain_pod_name }}-01' - register: _result - failed_when: _result.resources | length > 0 - - # test: drain using pod_selectors - - name: Uncordon node - k8s_drain: - state: uncordon - name: '{{ node_to_drain }}' - - - name: create a Pod for test - k8s: - namespace: '{{ test_namespace }}' wait: true - wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" - definition: - apiVersion: v1 - kind: Pod - metadata: - name: 'ansible-drain-pod' - labels: - app: ansible-drain - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - '{{ node_to_drain }}' - containers: - - name: ansible-container - image: busybox - command: - - '/bin/sh' - - '-c' - - 'while true; do echo $(date); sleep 10; done' + namespace: "{{ test_namespace }}" + template: + - daemonset.yml.j2 + - deployment.yml.j2 + - pod1.yml.j2 - - name: Drain node using pod_selectors 'app!=ansible-drain' - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - pod_selectors: - - app!=ansible-drain - delete_options: - terminate_grace_period: 0 - delete_emptydir_data: true - force: true - ignore_daemonsets: true - register: drain_pod_selector + - name: Test Cordon node + ansible.builtin.include_tasks: tasks/cordon.yml - - name: assert that node has been drained - assert: - that: - - drain_pod_selector is changed - - '"node "+node_to_drain+" marked unschedulable." in drain_pod_selector.result' + - name: Test Uncordon node + ansible.builtin.include_tasks: tasks/uncordon.yml - - name: assert that pod created before is still running - k8s_info: - namespace: '{{ test_namespace }}' - kind: Pod - label_selectors: - - app=ansible-drain - field_selectors: - - status.phase=Running - register: pods - failed_when: pods.resources == [] - - - name: Drain node using pod_selectors 'app=ansible-drain' - k8s_drain: - state: drain - name: '{{ node_to_drain }}' - pod_selectors: - - app=ansible-drain - delete_options: - terminate_grace_period: 0 - force: true - register: drain_pod_selector_equal - - - name: assert that node was not drained - assert: - that: - - drain_pod_selector_equal is changed - - '"node "+node_to_drain+" already marked unschedulable." in drain_pod_selector_equal.result' - - '"Deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: "+test_namespace+"/ansible-drain-pod." in drain_pod_selector_equal.warnings' - - - name: Uncordon node - k8s_drain: - state: uncordon - name: '{{ node_to_drain }}' + - name: Test drain node + ansible.builtin.include_tasks: tasks/drain.yml always: - name: Uncordon node diff --git a/tests/integration/targets/k8s_drain/tasks/uncordon.yml b/tests/integration/targets/k8s_drain/tasks/uncordon.yml new file mode 100644 index 00000000..4b4aa1cc --- /dev/null +++ b/tests/integration/targets/k8s_drain/tasks/uncordon.yml @@ -0,0 +1,54 @@ +--- +- name: Uncordon node (check_mode=true) + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + register: uncordon_check_mode + check_mode: true + +- name: Assert that module reported change while running in check_mode + assert: + that: + - uncordon_check_mode is changed + +- name: Ensure the node is still unschedulable (uncordon run in check_mode) + ansible.builtin.include_tasks: tasks/validate_node_status.yml + +- name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + register: uncordon + +- name: Assert that module reported change + assert: + that: + - uncordon is changed + +- name: Ensure the node is now schedulable + ansible.builtin.include_tasks: tasks/validate_node_status.yml + vars: + schedulable: true + +- name: Test uncordon idempotency (check_mode=true) + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + register: uncordon_checkmode_idempotency + check_mode: true + +- name: assert that uncordon is not changed (idempotency with check mode) + assert: + that: + - uncordon_checkmode_idempotency is not changed + +- name: Test uncordon idempotency + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + register: uncordon + +- name: assert that uncordon is not changed + assert: + that: + - uncordon is not changed diff --git a/tests/integration/targets/k8s_drain/tasks/validate_node_status.yml b/tests/integration/targets/k8s_drain/tasks/validate_node_status.yml new file mode 100644 index 00000000..9b3a099f --- /dev/null +++ b/tests/integration/targets/k8s_drain/tasks/validate_node_status.yml @@ -0,0 +1,18 @@ +--- +- name: Retrieve node information + k8s_info: + kind: Node + name: "{{ node_to_drain }}" + register: node_info + +- name: Validate that node is schedulable + ansible.builtin.assert: + that: + - node_info.resources.0.spec.unschedulable is undefined + when: schedulable | default('false') | bool + +- name: Validate that node is unschedulable + ansible.builtin.assert: + that: + - node_info.resources.0.spec.unschedulable | bool + when: not (schedulable | default('false') | bool) diff --git a/tests/integration/targets/k8s_drain/templates/daemonset.yml.j2 b/tests/integration/targets/k8s_drain/templates/daemonset.yml.j2 new file mode 100644 index 00000000..74898eb8 --- /dev/null +++ b/tests/integration/targets/k8s_drain/templates/daemonset.yml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: '{{ daemonset_name }}' +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - '{{ node_to_drain }}' + selector: + matchLabels: + drain: daemonset-pod + template: + metadata: + labels: + drain: daemonset-pod + spec: + containers: + - name: prometheus + image: prom/node-exporter + ports: + - containerPort: 80 \ No newline at end of file diff --git a/tests/integration/targets/k8s_drain/templates/deployment.yml.j2 b/tests/integration/targets/k8s_drain/templates/deployment.yml.j2 new file mode 100644 index 00000000..5c0b6a16 --- /dev/null +++ b/tests/integration/targets/k8s_drain/templates/deployment.yml.j2 @@ -0,0 +1,40 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: '{{ deployment_name }}' +spec: + replicas: 1 + selector: + matchLabels: + drain: emptyDir + template: + metadata: + labels: + drain: emptyDir + spec: + metadata: + labels: + drain: emptyDir + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - '{{ node_to_drain }}' + containers: + - name: busybox-container + image: busybox + command: + - /bin/sh + - -c + - while true;do date;sleep 5; done + volumeMounts: + - mountPath: /emptydir + name: emptydir + volumes: + - name: emptydir + emptyDir: {} diff --git a/tests/integration/targets/k8s_drain/templates/pod1.yml.j2 b/tests/integration/targets/k8s_drain/templates/pod1.yml.j2 new file mode 100644 index 00000000..d74b0526 --- /dev/null +++ b/tests/integration/targets/k8s_drain/templates/pod1.yml.j2 @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: '{{ pod1_name }}' + labels: + drain: unmanaged-pod +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - '{{ node_to_drain }}' + containers: + - name: busybox-container + image: busybox + command: + - /bin/sh + - -c + - while true;do date;sleep 5; done \ No newline at end of file