diff --git a/changelogs/fragments/255-k8s_scale-k8s_rollback-add-support-for-check_mode.yml b/changelogs/fragments/255-k8s_scale-k8s_rollback-add-support-for-check_mode.yml new file mode 100644 index 00000000..17d2ad34 --- /dev/null +++ b/changelogs/fragments/255-k8s_scale-k8s_rollback-add-support-for-check_mode.yml @@ -0,0 +1,3 @@ +minor_changes: + - k8s_scale - add support for check_mode. (https://github.com/ansible-collections/kubernetes/core/issues/244). + - k8s_rollback - add support for check_mode. (https://github.com/ansible-collections/kubernetes/core/issues/243). diff --git a/molecule/default/tasks/rollback.yml b/molecule/default/tasks/rollback.yml index 743ff53c..e562a4d5 100644 --- a/molecule/default/tasks/rollback.yml +++ b/molecule/default/tasks/rollback.yml @@ -8,18 +8,12 @@ k8s: name: "{{ namespace }}" kind: Namespace - api_version: v1 - apply: no - register: output - - - name: show output - debug: - var: output - name: Create a deployment k8s: state: present wait: yes + wait_timeout: 30 inline: &deploy apiVersion: apps/v1 kind: Deployment @@ -43,11 +37,7 @@ image: nginx:1.17 ports: - containerPort: 80 - register: output - - name: Show output - debug: - var: output - name: Crash the existing deployment k8s: @@ -77,7 +67,47 @@ ports: - containerPort: 80 ignore_errors: yes - register: output + register: crash + + - name: Assert that the Deployment failed + assert: + that: + - crash is failed + + - name: Read the deployment + k8s_info: + kind: Deployment + name: nginx-deploy + namespace: "{{ namespace }}" + register: deployment + + - set_fact: + failed_version: "{{ deployment.resources[0].metadata.annotations['deployment.kubernetes.io/revision'] }}" + + - name: Rolling Back the crashed deployment (check mode) + k8s_rollback: + api_version: apps/v1 + kind: Deployment + name: nginx-deploy + namespace: "{{ namespace }}" + register: result + check_mode: yes + + - assert: + that: + - result is changed + + - name: Read the deployment + k8s_info: + kind: Deployment + name: nginx-deploy + namespace: "{{ namespace }}" + register: deployment + + - name: Validate that Rollback using check_mode did not changed the Deployment + assert: + that: + - failed_version == deployment.resources[0].metadata.annotations['deployment.kubernetes.io/revision'] - name: Rolling Back the crashed deployment k8s_rollback: @@ -85,12 +115,24 @@ kind: Deployment name: nginx-deploy namespace: "{{ namespace }}" - when: output.failed - register: output + register: result - - name: Show output - debug: - var: output + - name: assert rollback is changed + assert: + that: + - result is changed + + - name: Read the deployment once again + k8s_info: + kind: Deployment + name: nginx-deploy + namespace: "{{ namespace }}" + register: deployment + + - name: Validate that Rollback changed the Deployment + assert: + that: + - failed_version | int + 1 == deployment.resources[0].metadata.annotations['deployment.kubernetes.io/revision'] | int - name: Create a DaemonSet k8s: @@ -139,16 +181,12 @@ - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - register: output - - - name: Show output - debug: - var: output - name: Crash the existing DaemonSet k8s: state: present wait: yes + wait_timeout: 30 definition: apiVersion: apps/v1 kind: DaemonSet @@ -192,8 +230,44 @@ - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - ignore_errors: yes - register: output + register: crash + ignore_errors: true + + - name: Assert that the Daemonset failed + assert: + that: + - crash is failed + + - name: Read the crashed DaemonSet + k8s_info: + kind: DaemonSet + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + register: result + + - set_fact: + failed_version: "{{ result.resources[0].metadata.annotations['deprecated.daemonset.template.generation'] }}" + + - name: Rolling Back the crashed DaemonSet (check_mode) + k8s_rollback: + api_version: apps/v1 + kind: DaemonSet + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + register: result + check_mode: yes + + - name: Read the DaemonSet + k8s_info: + kind: DaemonSet + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + register: result + + - name: Validate that Rollback using check_mode did not changed the DaemonSet version + assert: + that: + - failed_version == result.resources[0].metadata.annotations['deprecated.daemonset.template.generation'] - name: Rolling Back the crashed DaemonSet k8s_rollback: @@ -201,12 +275,24 @@ kind: DaemonSet name: fluentd-elasticsearch namespace: "{{ namespace }}" - when: output.failed - register: output + register: result - - name: Show output - debug: - var: output + - name: assert rollback is changed + assert: + that: + - result is changed + + - name: Read the DaemonSet + k8s_info: + kind: DaemonSet + name: fluentd-elasticsearch + namespace: "{{ namespace }}" + register: result + + - name: Validate that Rollback changed the Daemonset version + assert: + that: + - failed_version | int + 1 == result.resources[0].metadata.annotations['deprecated.daemonset.template.generation'] | int always: - name: Delete {{ namespace }} namespace diff --git a/molecule/default/tasks/scale.yml b/molecule/default/tasks/scale.yml index 8fd16834..eaa8f5cf 100644 --- a/molecule/default/tasks/scale.yml +++ b/molecule/default/tasks/scale.yml @@ -45,6 +45,68 @@ field_selectors: - status.phase=Running + - name: Scale the deployment (check_mode) + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down + check_mode: true + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_down_deploy_pods + ignore_errors: true + until: scale_down_deploy_pods.resources | length == 0 + retries: 6 + delay: 5 + + - name: Ensure the deployment did not changed and pods are still running + assert: + that: + - scale_down is changed + - scale_down_deploy_pods.resources | length > 0 + + - name: Scale the deployment (check_mode) once again - validate idempotency + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down + check_mode: true + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_down_deploy_pods + ignore_errors: true + until: scale_down_deploy_pods.resources | length == 0 + retries: 6 + delay: 5 + + - name: Ensure the deployment did not changed and pods are still running + assert: + that: + - scale_down is changed + - scale_down_deploy_pods.resources | length > 0 + - name: Scale the deployment k8s_scale: api_version: apps/v1 @@ -76,6 +138,22 @@ - '"duration" in scale_down' - scale_down.diff + - name: Scale the deployment once again (idempotency) + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down_idempotency + diff: true + + - name: Ensure that scale down did not took effect + assert: + that: + - scale_down_idempotency is not changed + - name: Reapply the earlier deployment k8s: definition: diff --git a/plugins/modules/k8s_rollback.py b/plugins/modules/k8s_rollback.py index 236ec997..df4f419d 100644 --- a/plugins/modules/k8s_rollback.py +++ b/plugins/modules/k8s_rollback.py @@ -115,11 +115,13 @@ def execute_module(module, k8s_ansible_mixin): module.params["field_selectors"], ) + changed = False for resource in resources["resources"]: result = perform_action(module, k8s_ansible_mixin, resource) + changed = result["changed"] or changed results.append(result) - module.exit_json(**{"changed": True, "rollback_info": results}) + module.exit_json(**{"changed": changed, "rollback_info": results}) def perform_action(module, k8s_ansible_mixin, resource): @@ -143,6 +145,13 @@ def perform_action(module, k8s_ansible_mixin, resource): prev_managed_resource = get_previous_revision( managed_resources["resources"], current_revision ) + if not prev_managed_resource: + warn = "No rollout history found for resource %s/%s" % ( + module.params["kind"], + resource["metadata"]["name"], + ) + result = {"changed": False, "warnings": [warn]} + return result if module.params["kind"] == "Deployment": del prev_managed_resource["spec"]["template"]["metadata"]["labels"][ @@ -174,22 +183,24 @@ def perform_action(module, k8s_ansible_mixin, resource): api_target = "daemonsets" content_type = "application/strategic-merge-patch+json" - rollback = k8s_ansible_mixin.client.request( - "PATCH", - "/apis/{0}/namespaces/{1}/{2}/{3}".format( - module.params["api_version"], - module.params["namespace"], - api_target, - module.params["name"], - ), - body=resource_patch, - content_type=content_type, - ) + rollback = resource + if not module.check_mode: + rollback = k8s_ansible_mixin.client.request( + "PATCH", + "/apis/{0}/namespaces/{1}/{2}/{3}".format( + module.params["api_version"], + module.params["namespace"], + api_target, + module.params["name"], + ), + body=resource_patch, + content_type=content_type, + ).to_dict() result = {"changed": True} result["method"] = "patch" result["body"] = resource_patch - result["resources"] = rollback.to_dict() + result["resources"] = rollback return result diff --git a/plugins/modules/k8s_scale.py b/plugins/modules/k8s_scale.py index 49f13937..66239cf1 100644 --- a/plugins/modules/k8s_scale.py +++ b/plugins/modules/k8s_scale.py @@ -236,7 +236,7 @@ def execute_module( module.exit_json(warning=warn, **return_attributes) for existing in existing_items: - if module.params["kind"] == "job": + if module.params["kind"].lower() == "job": existing_count = existing.spec.parallelism elif hasattr(existing.spec, "replicas"): existing_count = existing.spec.replicas @@ -271,22 +271,25 @@ def execute_module( continue if existing_count != replicas: - if not module.check_mode: - if module.params["kind"] == "job": - existing.spec.parallelism = replicas - result = resource.patch(existing.to_dict()).to_dict() + if module.params["kind"].lower() == "job": + existing.spec.parallelism = replicas + result = {"changed": True} + if module.check_mode: + result["result"] = existing.to_dict() else: - result = scale( - module, - k8s_ansible_mixin, - resource, - existing, - replicas, - wait, - wait_time, - wait_sleep, - ) - changed = changed or result["changed"] + result["result"] = resource.patch(existing.to_dict()).to_dict() + else: + result = scale( + module, + k8s_ansible_mixin, + resource, + existing, + replicas, + wait, + wait_time, + wait_sleep, + ) + changed = changed or result["changed"] else: name = existing.metadata.name namespace = existing.metadata.namespace @@ -342,25 +345,34 @@ def scale( existing = resource.get(name=name, namespace=namespace) - try: - resource.scale.patch(body=scale_obj) - except Exception as exc: - module.fail_json(msg="Scale request failed: {0}".format(exc)) - - k8s_obj = resource.get(name=name, namespace=namespace).to_dict() - match, diffs = k8s_ansible_mixin.diff_objects(existing.to_dict(), k8s_obj) result = dict() - result["result"] = k8s_obj + if module.check_mode: + k8s_obj = copy.deepcopy(existing.to_dict()) + k8s_obj["spec"]["replicas"] = replicas + match, diffs = k8s_ansible_mixin.diff_objects(existing.to_dict(), k8s_obj) + if wait: + result["duration"] = 0 + result["result"] = k8s_obj + else: + try: + resource.scale.patch(body=scale_obj) + except Exception as exc: + module.fail_json(msg="Scale request failed: {0}".format(exc)) + + k8s_obj = resource.get(name=name, namespace=namespace).to_dict() + result["result"] = k8s_obj + if wait and not module.check_mode: + success, result["result"], result["duration"] = k8s_ansible_mixin.wait( + resource, scale_obj, wait_sleep, wait_time + ) + if not success: + module.fail_json(msg="Resource scaling timed out", **result) + + match, diffs = k8s_ansible_mixin.diff_objects(existing.to_dict(), k8s_obj) result["changed"] = not match if module._diff: result["diff"] = diffs - if wait: - success, result["result"], result["duration"] = k8s_ansible_mixin.wait( - resource, scale_obj, wait_sleep, wait_time - ) - if not success: - module.fail_json(msg="Resource scaling timed out", **result) return result