[backport/2.2] Move integration test suite from molecule to ansible-test (#392) (#457)

[backport/2.2] Move integration test suite from molecule to ansible-test (#392)

Move integration test suite from molecule to ansible-test
SUMMARY
molecule has been replaced with ansible-test
some test cases have been updated
k8s_apply : remove duplicated tasks increasing the running time of the test
helm: use different namespaces for different test cases in order to wait for the namespace deletion before moving to the next test.
all: remove wait: yes at the end of each test when deleting namespace, the role used to create namespace will ensure that it is deleted before if existing.
ISSUE TYPE
Feature Pull Request
COMPONENT NAME
integration testing
Reviewed-by: Mike Graves mgraves@redhat.com
Reviewed-by: Gonéri Le Bouder goneri@lebouder.net
Reviewed-by: None 
(cherry picked from commit fd61f8b)
SUMMARY


ISSUE TYPE


Bugfix Pull Request
Docs Pull Request
Feature Pull Request
New Module Pull Request

COMPONENT NAME

ADDITIONAL INFORMATION
This commit is contained in:
Mike Graves
2022-05-11 14:56:23 -04:00
committed by GitHub
parent 0d9c4d3459
commit 11c800d6ed
190 changed files with 1261 additions and 1768 deletions

View File

@@ -0,0 +1,4 @@
k8s_scale
k8s
k8s_info
time=210

View File

@@ -0,0 +1,42 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
test_namespace: "scale"
k8s_wait_timeout: 400

View File

@@ -0,0 +1,50 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test0
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test0" && sleep 3600']
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test1
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test1" && sleep 3600']

View File

@@ -0,0 +1,2 @@
dependencies:
- setup_namespace

View File

@@ -0,0 +1,272 @@
---
- block:
- set_fact:
scale_namespace: "{{ test_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
- name: Scale the deployment
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 0
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_down
diff: true
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_down_deploy_pods
until: scale_down_deploy_pods.resources | length == 0
retries: 6
delay: 5
- name: Ensure that scale down took effect
assert:
that:
- scale_down is changed
- '"duration" in scale_down'
- scale_down.diff
- name: Reapply the earlier deployment
k8s:
definition:
api_version: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: reapply_after_scale
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_up_deploy_pods
- name: Ensure that reapply after scale worked
assert:
that:
- reapply_after_scale is changed
- scale_up_deploy_pods.resources | length == 1
- name: Scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_up
diff: no
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_further_deploy_pods
- name: Ensure that scale up worked
assert:
that:
- scale_up is changed
- '"duration" in scale_up'
- scale_up.diff is not defined
- scale_up_further_deploy_pods.resources | length == 2
- name: Don't scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
register: scale_up_noop
diff: no
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_noop_pods
- name: Ensure that no-op scale up worked
assert:
that:
- scale_up_noop is not changed
- scale_up_noop.diff is not defined
- scale_up_noop_pods.resources | length == 2
- '"duration" in scale_up_noop'
- name: Scale deployment down without wait
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 1
wait: no
register: scale_down_no_wait
diff: true
- name: Ensure that scale down succeeds
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
register: scale_down_no_wait_pods
retries: 6
delay: 5
until: scale_down_no_wait_pods.resources | length == 1
- name: Ensure that scale down without wait worked
assert:
that:
- scale_down_no_wait is changed
- scale_down_no_wait.diff
- scale_down_no_wait_pods.resources | length == 1
# scale multiple resource using label selectors
- name: create deployment
kubernetes.core.k8s:
namespace: "{{ scale_namespace }}"
src: files/deployment.yaml
- name: list deployment
kubernetes.core.k8s_info:
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: resource
- assert:
that:
- resource.resources | list | length == 2
- name: scale deployment using resource version
kubernetes.core.k8s_scale:
replicas: 2
kind: Deployment
namespace: "{{ scale_namespace }}"
resource_version: 0
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (wrong value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 4
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (right value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 3
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_out
- assert:
that:
- scale_out.changed
- scale_out.results | map(attribute='result.status.replicas') | list | unique == [2]
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ scale_namespace }}"
state: absent
ignore_errors: true