Replace passing `warnings to exit_json with AnsibleModule.warn` for the few modules (#1033) (#1074)

This is a backport of PR #1033 as merged into main (3e32c12).
SUMMARY
Using exit_json or fail_json for warnings is deprecated in ansible-core>=2.19.0 and will be removed in ansible-core>=2.23.0
Tested with ansible-core 2.19.3 as the latest released version at the time of the start of this PR and with 2.16.0 as the lowest version supported by kubernetes.core 6.x
Resolves: #1031
ISSUE TYPE

Bugfix Pull Request

COMPONENT NAME
k8s_drain
k8s_rollback
k8s_scale
ADDITIONAL INFORMATION
The initial version of this PR covers only the module k8s_drain, with the following commits extended to k8s_rollback
k8s_scale

Reviewed-by: Bianca Henderson <beeankha@gmail.com>
Reviewed-by: Mike Graves <mgraves@redhat.com>
This commit is contained in:
Bianca Henderson
2026-01-26 16:43:02 -06:00
committed by GitHub
parent 8ddb19aade
commit 12847a1555
8 changed files with 319 additions and 9 deletions

View File

@@ -0,0 +1,2 @@
bugfixes:
- Replace passing ``warnings`` to ``exit_json`` with ``AnsibleModule.warn`` in the ``k8s_drain``, ``k8s_rollback.py`` and ``k8s_scale.py`` modules as it deprecated in ``ansible-core>=2.19.0`` and will be removed in ``ansible-core>=2.23.0`` (https://github.com/ansible-collections/kubernetes.core/pull/1033).

View File

@@ -278,11 +278,15 @@ class K8SCopyFromPod(K8SCopy):
def run(self):
self.files_to_copy = self.list_remote_files()
if self.files_to_copy == []:
# Using warn method instead of passing warnings to exit_json as it is
# deprecated in ansible-core>=2.19.0
self._module.warn(
"No file found from directory '{0}' into remote Pod.".format(
self.remote_path
)
)
self.module.exit_json(
changed=False,
warning="No file found from directory '{0}' into remote Pod.".format(
self.remote_path
),
)
self.copy()

View File

@@ -441,7 +441,8 @@ class K8sDrainAnsible(object):
warnings.append(warn)
result.append("{0} Pod(s) deleted from node.".format(number_pod))
if warnings:
return dict(result=" ".join(result), warnings=warnings)
for warning in warnings:
self._module.warn(warning)
return dict(result=" ".join(result))
def patch_node(self, unschedulable):

View File

@@ -168,7 +168,9 @@ def perform_action(svc, resource):
module.params["kind"],
resource["metadata"]["name"],
)
result = {"changed": False, "warnings": [warn]}
if warn:
module.warn(warn)
result = {"changed": False}
return result
if module.params["kind"] == "Deployment":

View File

@@ -243,10 +243,12 @@ def execute_module(client, module):
module.fail_json(msg=error, **return_attributes)
def _continue_or_exit(warn):
if warn:
module.warn(warn)
if multiple_scale:
return_attributes["results"].append({"warning": warn, "changed": False})
return_attributes["results"].append({"changed": False})
else:
module.exit_json(warning=warn, **return_attributes)
module.exit_json(**return_attributes)
for existing in existing_items:
if kind.lower() == "job":

View File

@@ -81,6 +81,7 @@
replicas: 1
current_replicas: 3
wait: true
wait_timeout: 60
register: scale
- name: Read deployment

View File

@@ -291,6 +291,301 @@
that:
- failed_version | int + 1 == result.resources[0].metadata.annotations['deprecated.daemonset.template.generation'] | int
- name: Create deployment with specific labels for selector testing
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-labeled
namespace: "{{ namespace }}"
labels:
app: nginx
test-group: label-selector-test
spec:
replicas: 2
selector:
matchLabels:
app: nginx-labeled
template:
metadata:
labels:
app: nginx-labeled
spec:
containers:
- name: nginx
image: nginx:1.17
ports:
- containerPort: 80
- name: Update deployment to create second revision
k8s:
state: present
wait: yes
wait_timeout: 30
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-labeled
namespace: "{{ namespace }}"
labels:
app: nginx
test-group: label-selector-test
spec:
replicas: 2
selector:
matchLabels:
app: nginx-labeled
template:
metadata:
labels:
app: nginx-labeled
spec:
containers:
- name: nginx
image: nginx:1.18
ports:
- containerPort: 80
- name: Test rollback with label selectors
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: nginx-labeled
namespace: "{{ namespace }}"
label_selectors:
- "test-group=label-selector-test"
register: result
- name: Assert label selector rollback worked
assert:
that:
- result is changed
- result.rollback_info | length == 1
- result.rollback_info[0].method == "patch"
- name: Create deployment with single revision
k8s:
state: present
wait: yes
wait_timeout: 30
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: single-revision
namespace: "{{ namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: single-revision
template:
metadata:
labels:
app: single-revision
spec:
containers:
- name: nginx
image: nginx:1.17
ports:
- containerPort: 80
- name: Try to rollback deployment with no previous revisions
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: single-revision
namespace: "{{ namespace }}"
register: result
- name: Debug result
debug:
var: result
- name: Assert warning is returned for no rollout history
assert:
that:
- not result.changed
- result.warnings is defined
- "'No rollout history found' in result.warnings[0]"
- name: Create a service for unsupported resource test
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: test-service
namespace: "{{ namespace }}"
spec:
selector:
app: nginx
ports:
- port: 80
targetPort: 80
- name: Test rollback on unsupported resource type
k8s_rollback:
api_version: v1
kind: Service
name: test-service
namespace: "{{ namespace }}"
register: result
ignore_errors: yes
- name: Assert error message for unsupported resource
assert:
that:
- not result.changed
- "'Cannot perform rollback on resource of kind Service' in result.msg"
- name: Test rollback on non-existent deployment
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: non-existent
namespace: "{{ namespace }}"
register: result
- name: Assert no resources found
assert:
that:
- not result.changed
- result.rollback_info | length == 0
- name: Create multiple deployments with same label
k8s:
state: present
wait: yes
wait_timeout: 30
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: "multi-{{ item }}"
namespace: "{{ namespace }}"
labels:
group: multi-test
app: multi
spec:
replicas: 1
selector:
matchLabels:
app: "multi-{{ item }}"
template:
metadata:
labels:
app: "multi-{{ item }}"
spec:
containers:
- name: nginx
image: nginx:1.17
ports:
- containerPort: 80
loop: [1, 2, 3]
- name: Update multiple deployments to create second revisions
k8s:
state: present
wait: yes
wait_timeout: 30
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: "multi-{{ item }}"
namespace: "{{ namespace }}"
labels:
group: multi-test
app: multi
spec:
replicas: 1
selector:
matchLabels:
app: "multi-{{ item }}"
template:
metadata:
labels:
app: "multi-{{ item }}"
spec:
containers:
- name: nginx
image: nginx:1.18
ports:
- containerPort: 80
loop: [1, 2, 3]
- name: Rollback multiple deployments using label selector
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: "multi-{{ item }}"
namespace: "{{ namespace }}"
label_selectors:
- "group=multi-test"
register: result
loop: [1, 2, 3]
- name: Assert multiple resources were rolled back
assert:
that:
- result.results | length == 3
- result.results | selectattr('changed', 'equalto', true) | list | length == 3
- result.results | selectattr('rollback_info', 'defined') | list | length == 3
- result.results | map(attribute='rollback_info') | map('first') | map(attribute='method') | select('equalto', 'patch') | list | length == 3
- name: Validate rollback_info structure for deployment
assert:
that:
- result.results is defined
- result.results[0].rollback_info is defined
- result.results[0].rollback_info | length > 0
- result.results[0].rollback_info[0].method == "patch"
- result.results[0].rollback_info[0].body is defined
- result.results[0].rollback_info[0].resources is defined
- result.results[0].rollback_info[0].resources.metadata is defined
- result.results[0].rollback_info[0].resources.spec is defined
- name: Test rollback with field selectors
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: multi-1
namespace: "{{ namespace }}"
field_selectors:
- "metadata.name=multi-1"
register: result
- name: Assert field selector rollback worked
assert:
that:
- result is changed
- result.rollback_info | length == 1
- result.rollback_info[0].resources.metadata.name == "multi-1"
- name: Test check mode return values
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: multi-2
namespace: "{{ namespace }}"
register: result
check_mode: yes
- name: Validate check mode returns expected structure
assert:
that:
- result is changed
- result.rollback_info is defined
- result.rollback_info[0].method == "patch"
- result.rollback_info[0].body is defined
always:
- name: Delete {{ namespace }} namespace
k8s:

View File

@@ -49,6 +49,7 @@
namespace: "{{ scale_namespace }}"
replicas: 0
wait: true
wait_timeout: 60
register: scale_down
check_mode: true
@@ -80,6 +81,7 @@
namespace: "{{ scale_namespace }}"
replicas: 0
wait: true
wait_timeout: 60
register: scale_down
check_mode: true
@@ -143,6 +145,7 @@
namespace: "{{ scale_namespace }}"
replicas: 0
wait: true
wait_timeout: 60
register: scale_down_idempotency
diff: true
@@ -233,6 +236,7 @@
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: 60
register: scale_up_noop
diff: false
@@ -308,12 +312,12 @@
resource_version: 0
label_selectors:
- app=nginx
wait_timeout: 60
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (wrong value)
kubernetes.core.k8s_scale:
@@ -328,7 +332,6 @@
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (right value)
kubernetes.core.k8s_scale: