Fix scale wait and add tests

Move wait logic out of raw and into common and use that
logic in scale

Fix a few broken wait condition cases highlighted by scaling
up and down

Move scale-related tests into dedicated test task file

Additional service related tests
This commit is contained in:
Will Thames
2020-05-22 12:39:43 +10:00
parent 3bdfb4745a
commit beebe98ce6
7 changed files with 486 additions and 170 deletions

View File

@@ -21,6 +21,7 @@
that: (pod_list.resources | count) > 5
- include_tasks: tasks/delete.yml
- include_tasks: tasks/scale.yml
- include_tasks: tasks/apply.yml
- include_tasks: tasks/waiter.yml
- include_tasks: tasks/full.yml

View File

@@ -403,6 +403,162 @@
that:
- deploy_after_serviceaccount_removal is failed
- name: Insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: Remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
check_mode: yes
register: k8s_service_check
- name: Check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: Remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_5
- name: Check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
- name: Add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 2
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy_after_serviceaccount_removal
ignore_errors: yes
- name: Ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed
always:
- name: Remove namespace
k8s:

View File

@@ -0,0 +1,214 @@
---
- block:
- set_fact:
scale_namespace: scale
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ scale_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 60
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
- name: Scale the deployment
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 0
wait: yes
register: scale_down
# It looks like the Deployment is updated to have the desired state *before* the pods are terminated
# Wait a couple of seconds to allow the pods to at least get to Terminating state
- name: Avoid race condition
pause:
seconds: 2
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_down_deploy_pods
- name: Ensure that scale down took effect
assert:
that:
- scale_down is changed
- '"duration" in scale_down'
- scale_down.diff
- scale_down_deploy_pods.resources | length == 0
- name: Reapply the earlier deployment
k8s:
definition:
api_version: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 60
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: reapply_after_scale
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_up_deploy_pods
- name: Ensure that reapply after scale worked
assert:
that:
- reapply_after_scale is changed
- scale_up_deploy_pods.resources | length == 1
- name: Scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: 60
register: scale_up
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_further_deploy_pods
- name: Ensure that scale up worked
assert:
that:
- scale_up is changed
- '"duration" in scale_up'
- scale_up.diff
- scale_up_further_deploy_pods.resources | length == 2
- name: Don't scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
register: scale_up_noop
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_noop_pods
- name: Ensure that no-op scale up worked
assert:
that:
- scale_up_noop is not changed
- not scale_up_noop.diff
- scale_up_noop_pods.resources | length == 2
- '"duration" in scale_up_noop'
- name: Scale deployment down without wait
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 1
wait: no
register: scale_down_no_wait
- name: Ensure that scale down succeeds
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
register: scale_down_no_wait_pods
retries: 6
delay: 5
until: "{{ scale_down_no_wait_pods.resources | length == 1 }}"
- name: Ensure that scale down without wait worked
assert:
that:
- scale_down_no_wait is changed
- scale_down_no_wait.diff
- scale_down_no_wait_pods.resources | length == 1
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ scale_namespace }}"
state: absent