diff --git a/changelogs/fragments/606-k8s_drain-add-pod_selectors-parameter.yaml b/changelogs/fragments/606-k8s_drain-add-pod_selectors-parameter.yaml new file mode 100644 index 00000000..7faf433a --- /dev/null +++ b/changelogs/fragments/606-k8s_drain-add-pod_selectors-parameter.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s_drain - add ability to filter the list of pods to be drained by a pod label selector (https://github.com/ansible-collections/kubernetes.core/issues/474). diff --git a/plugins/modules/k8s_drain.py b/plugins/modules/k8s_drain.py index 3165dad4..a8c688e1 100644 --- a/plugins/modules/k8s_drain.py +++ b/plugins/modules/k8s_drain.py @@ -41,6 +41,15 @@ options: - The name of the node. required: true type: str + pod_selectors: + description: + - Label selector to filter pods on the node. + - This option has effect only when C(state) is set to I(drain). + type: list + elements: str + version_added: 2.5.0 + aliases: + - label_selectors delete_options: type: dict default: {} @@ -116,6 +125,14 @@ EXAMPLES = r""" state: cordon name: foo +- name: Drain node "foo" using label selector to filter the list of pods to be drained. + kubernetes.core.k8s_drain: + state: drain + name: foo + pod_selectors: + - 'app!=csi-attacher' + - 'app!=csi-provisioner' + """ RETURN = r""" @@ -329,6 +346,17 @@ class K8sDrainAnsible(object): ) ) + def list_pods(self): + params = { + "field_selector": "spec.nodeName={name}".format( + name=self._module.params.get("name") + ) + } + pod_selectors = self._module.params.get("pod_selectors") + if pod_selectors: + params["label_selector"] = ",".join(pod_selectors) + return self._api_instance.list_pod_for_all_namespaces(**params) + def delete_or_evict_pods(self, node_unschedulable): # Mark node as unschedulable result = [] @@ -351,12 +379,7 @@ class K8sDrainAnsible(object): self.patch_node(unschedulable=False) try: - field_selector = "spec.nodeName={name}".format( - name=self._module.params.get("name") - ) - pod_list = self._api_instance.list_pod_for_all_namespaces( - field_selector=field_selector - ) + pod_list = self.list_pods() # Filter pods force = self._drain_options.get("force", False) ignore_daemonset = self._drain_options.get("ignore_daemonsets", False) @@ -487,6 +510,11 @@ def argspec(): wait_sleep=dict(type="int", default=5), ), ), + pod_selectors=dict( + type="list", + elements="str", + aliases=["label_selectors"], + ), ) ) return argument_spec diff --git a/tests/integration/targets/k8s_drain/tasks/main.yml b/tests/integration/targets/k8s_drain/tasks/main.yml index f16f8aff..5891011f 100644 --- a/tests/integration/targets/k8s_drain/tasks/main.yml +++ b/tests/integration/targets/k8s_drain/tasks/main.yml @@ -281,6 +281,7 @@ that: - dset_result.resources | list | length > 0 + # test: drain using disable_eviction=true - name: Uncordon node k8s_drain: state: uncordon @@ -347,6 +348,90 @@ register: _result failed_when: _result.resources + # test: drain using pod_selectors + - name: Uncordon node + k8s_drain: + state: uncordon + name: '{{ node_to_drain }}' + + - name: create a Pod for test + k8s: + namespace: '{{ test_namespace }}' + wait: true + wait_timeout: "{{ k8s_wait_timeout | default(omit) }}" + definition: + apiVersion: v1 + kind: Pod + metadata: + name: 'ansible-drain-pod' + labels: + app: ansible-drain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchFields: + - key: metadata.name + operator: In + values: + - '{{ node_to_drain }}' + containers: + - name: ansible-container + image: busybox + command: + - '/bin/sh' + - '-c' + - 'while true; do echo $(date); sleep 10; done' + + - name: Drain node using pod_selectors 'app!=ansible-drain' + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - app!=ansible-drain + delete_options: + terminate_grace_period: 0 + delete_emptydir_data: true + force: true + ignore_daemonsets: true + register: drain_pod_selector + + - name: assert that node has been drained + assert: + that: + - drain_pod_selector is changed + - '"node {{ node_to_drain }} marked unschedulable." in drain_pod_selector.result' + + - name: assert that pod created before is still running + k8s_info: + namespace: '{{ test_namespace }}' + kind: Pod + label_selectors: + - app=ansible-drain + field_selectors: + - status.phase=Running + register: pods + failed_when: pods.resources == [] + + - name: Drain node using pod_selectors 'app=ansible-drain' + k8s_drain: + state: drain + name: '{{ node_to_drain }}' + pod_selectors: + - app=ansible-drain + delete_options: + terminate_grace_period: 0 + force: true + register: drain_pod_selector_equal + + - name: assert that node was not drained + assert: + that: + - drain_pod_selector_equal is changed + - '"node {{ node_to_drain }} already marked unschedulable." in drain_pod_selector_equal.result' + - '"Deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: {{ test_namespace }}/ansible-drain-pod." in drain_pod_selector_equal.warnings' + - name: Uncordon node k8s_drain: state: uncordon