Issue #10: Move integration tests into molecule playbook.

This commit is contained in:
Jeff Geerling
2020-02-13 11:21:26 -06:00
parent 1200460f26
commit b2e639b823
15 changed files with 205 additions and 191 deletions

View File

@@ -84,8 +84,12 @@ jobs:
mkdir -p /home/runner/.ansible
ln -s /home/runner/work/kubernetes /home/runner/.ansible/collections
- name: Copy kubernetes integration test role into roles directory.
run: cp -R tests/integration/targets/kubernetes roles/
# TODO: Once community.general is on public Galaxy, drop the -s.
- name: Install community.general role to get json_query filter.
run: |
cp tests/integration/targets/kubernetes/files/manifest-example/MANIFEST.json MANIFEST.json
pip install jmespath
ansible-galaxy collection install -vvv -s https://sivel.eng.ansible.com/api community.general
- name: Run molecule default test scenario
run: molecule --debug test
run: molecule test

View File

@@ -23,12 +23,10 @@ provisioner:
localhost:
ansible_python_interpreter: '{{ ansible_playbook_python }}'
env:
ANSIBLE_ROLES_PATH: ${ANSIBLE_ROLES_PATH}:../../tests/integration/targets
ANSIBLE_FORCE_COLOR: 'true'
scenario:
name: default
test_sequence:
# - lint
- syntax
- converge
- idempotence
- verify

View File

@@ -7,17 +7,21 @@
collections:
- community.kubernetes
vars_files:
- vars/main.yml
tasks:
- name: Testing.
- name: Verify cluster is working.
k8s_info:
namespace: kube-system
kind: Pod
register: pod_list
- name: Testing listing.
debug:
msg: "{{ pod_list.resources | count }}"
- name: Verify cluster has more than 5 pods running.
assert:
that: (pod_list.resources | count) > 5
- name: Include Kubernetes integration testing role.
include_role:
name: kubernetes
- include_tasks: tasks/delete.yml
- include_tasks: tasks/apply.yml
- include_tasks: tasks/waiter.yml
- include_tasks: tasks/full.yml

View File

@@ -1,14 +1,9 @@
---
- block:
# TODO: Not available in ansible-base
# - python_requirements_info:
# dependencies:
# - openshift
# - kubernetes
- set_fact:
apply_namespace: apply
- name: ensure namespace exists
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
@@ -16,7 +11,7 @@
metadata:
name: "{{ apply_namespace }}"
- name: add a configmap
- name: Add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
@@ -30,13 +25,13 @@
apply: yes
register: k8s_configmap
- name: check configmap was created
- name: Check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: add same configmap again
- name: Add same configmap again
k8s:
definition:
kind: ConfigMap
@@ -51,12 +46,12 @@
apply: yes
register: k8s_configmap_2
- name: check nothing changed
- name: Check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: add same configmap again with check mode on
- name: Add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
@@ -72,12 +67,12 @@
check_mode: yes
register: k8s_configmap_check
- name: check nothing changed
- name: Check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: add same configmap again but using name and namespace args
- name: Add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
@@ -91,12 +86,12 @@
apply: yes
register: k8s_configmap_2a
- name: check nothing changed
- name: Check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: update configmap
- name: Update configmap
k8s:
definition:
kind: ConfigMap
@@ -111,14 +106,14 @@
apply: yes
register: k8s_configmap_3
- name: ensure that configmap has been correctly updated
- name: Ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: add a service
- name: Add a service
k8s:
definition:
apiVersion: v1
@@ -136,7 +131,7 @@
apply: yes
register: k8s_service
- name: add exactly same service
- name: Add exactly same service
k8s:
definition:
apiVersion: v1
@@ -154,12 +149,12 @@
apply: yes
register: k8s_service_2
- name: check nothing changed
- name: Check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: change service ports
- name: Change service ports
k8s:
definition:
apiVersion: v1
@@ -177,7 +172,7 @@
apply: yes
register: k8s_service_3
- name: check ports are correct
- name: Check ports are correct
assert:
that:
- k8s_service_3 is changed
@@ -185,7 +180,7 @@
- k8s_service_3.result.spec.ports[0].port == 8081
always:
- name: remove namespace
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"

View File

@@ -1,33 +1,28 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
---
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: install custom resource definitions
- name: Install custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/setup-crd.yml') }}"
- name: pause 5 seconds to avoid race condition
- name: Pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: create custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: patch custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
@@ -40,14 +35,14 @@
- block:
- name: recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
merge_type: merge
namespace: crd
register: recreate_crd_with_merge
- name: recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
@@ -58,7 +53,7 @@
- name: remove crd
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
namespace: crd
state: absent

View File

@@ -1,13 +1,9 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
---
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
@@ -15,10 +11,10 @@
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
- name: Add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: delete-daemonset
@@ -35,12 +31,12 @@
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
- name: Check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
@@ -50,12 +46,12 @@
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
- name: Assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
- name: Remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
@@ -63,7 +59,7 @@
state: absent
wait: yes
- name: show status of pods
- name: Show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
@@ -72,11 +68,11 @@
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
- name: Wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
- name: Check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
@@ -86,16 +82,14 @@
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
- name: Assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

View File

@@ -1,13 +1,4 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: apply.yml
- include_tasks: waiter.yml
---
- block:
- name: Create a namespace
k8s:
@@ -19,24 +10,25 @@
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
# TODO: See https://github.com/ansible-collections/kubernetes/pull/22#issuecomment-585852073
# - name: Setting validate_certs to true causes a failure
# k8s:
# name: testing
# kind: Namespace
# validate_certs: yes
# ignore_errors: yes
# register: output
#
# - name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
# assert:
# that:
# - output is failed
- name: k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: extensions/v1beta1
api_version: apps/v1
register: k8s_info
- name: assert that k8s_info is in correct format
@@ -134,7 +126,7 @@
k8s:
state: present
inline: &deployment
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: elastic
@@ -143,6 +135,11 @@
service: elastic
namespace: testing
spec:
replicas: 1
selector:
matchLabels:
app: galaxy
service: elastic
template:
metadata:
labels:
@@ -160,7 +157,6 @@
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: RollingUpdate
register: output
@@ -179,12 +175,6 @@
assert:
that: not output.changed
- debug:
var: k8s_openshift
- include: openshift.yml
when: k8s_openshift | bool
### Type tests
- name: Create a namespace from a string
k8s:

View File

@@ -1,13 +1,9 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
---
- block:
- set_fact:
wait_namespace: wait
- name: ensure namespace exists
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
@@ -15,7 +11,7 @@
metadata:
name: "{{ wait_namespace }}"
- name: add a simple pod
- name: Add a simple pod
k8s:
definition:
apiVersion: v1
@@ -34,15 +30,15 @@
register: wait_pod
ignore_errors: yes
- name: assert that pod creation succeeded
- name: Assert that pod creation succeeded
assert:
that:
- wait_pod is successful
- name: add a daemonset
- name: Add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
@@ -60,15 +56,15 @@
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: update a daemonset in check_mode
- name: Update a daemonset in check_mode
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
@@ -88,15 +84,15 @@
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
- name: Check that check_mode returned changed
assert:
that:
- update_ds_check_mode is changed
- name: update a daemonset
- name: Update a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
@@ -116,7 +112,7 @@
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
- name: Get updated pods
k8s_info:
api_version: v1
kind: Pod
@@ -125,13 +121,13 @@
- app=wait-ds
register: updated_ds_pods
- name: check that daemonset wait worked
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
- name: Add a crashing pod
k8s:
definition:
apiVersion: v1
@@ -151,12 +147,12 @@
register: crash_pod
ignore_errors: yes
- name: check that task failed
- name: Check that task failed
assert:
that:
- crash_pod is failed
- name: use a non-existent image
- name: Use a non-existent image
k8s:
definition:
apiVersion: v1
@@ -174,15 +170,15 @@
register: no_image_pod
ignore_errors: yes
- name: check that task failed
- name: Check that task failed
assert:
that:
- no_image_pod is failed
- name: add a deployment
- name: Add a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
@@ -204,15 +200,15 @@
register: deploy
- name: check that deployment wait worked
- name: Check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: update a deployment
- name: Update a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
@@ -233,7 +229,7 @@
protocol: TCP
register: update_deploy
- name: get updated pods
- name: Get updated pods
k8s_info:
api_version: v1
kind: Pod
@@ -242,16 +238,16 @@
- app=wait-deploy
register: updated_deploy_pods
- name: check that deployment wait worked
- name: Check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2")
- name: pause a deployment
- name: Pause a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
@@ -266,15 +262,15 @@
reason: DeploymentPaused
register: pause_deploy
- name: check that paused deployment wait worked
- name: Check that paused deployment wait worked
assert:
that:
- condition.reason == "DeploymentPaused"
- condition.status == "Unknown"
vars:
condition: '{{ pause_deploy.result.status.conditions | json_query("[?type==`Progressing`]") | first }}'
condition: '{{ pause_deploy.result.status.conditions | community.general.json_query("[?type==`Progressing`]") | first }}'
- name: add a service based on the deployment
- name: Add a service based on the deployment
k8s:
definition:
apiVersion: v1
@@ -294,15 +290,15 @@
k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
- name: Assert that waiting for service works
assert:
that:
- service is successful
- name: add a crashing deployment
- name: Add a crashing deployment
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-crash-deploy
@@ -322,12 +318,12 @@
register: wait_crash_deploy
ignore_errors: yes
- name: check that task failed
- name: Check that task failed
assert:
that:
- wait_crash_deploy is failed
- name: remove Pod with very short timeout
- name: Remove Pod with very short timeout
k8s:
api_version: v1
kind: Pod
@@ -340,16 +336,14 @@
ignore_errors: yes
register: short_wait_remove_pod
- name: check that task failed
- name: Check that task failed
assert:
that:
- short_wait_remove_pod is failed
always:
- name: remove namespace
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent
when: (nodes.resources | length) > 0

View File

@@ -0,0 +1,33 @@
---
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
kubernetes_role_path: ../../tests/integration/targets/kubernetes

View File

@@ -1,32 +1,4 @@
---
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes
k8s_openshift: true

View File

@@ -0,0 +1,43 @@
{
"collection_info": {
"namespace": "community",
"name": "kubernetes",
"version": "1.0.0",
"authors": [
"chouseknecht (https://github.com/chouseknecht)",
"geerlingguy (https://www.jeffgeerling.com/)",
"maxamillion (https://github.com/maxamillion)",
"jmontleon (https://github.com/jmontleon)",
"fabianvf (https://github.com/fabianvf)",
"willthames (https://github.com/willthames)",
"mmazur (https://github.com/mmazur)",
"jamescassell (https://github.com/jamescassell)"
],
"readme": "README.md",
"tags": [
"kubernetes",
"k8s",
"cloud",
"infrastructure",
"openshift",
"okd",
"cluster"
],
"description": "Kubernetes Collection for Ansible.",
"license": [],
"license_file": "LICENSE",
"dependencies": {},
"repository": "https://github.com/ansible-collections/kubernetes",
"documentation": "",
"homepage": "",
"issues": "https://github.com/ansible-collections/kubernetes/issues"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "38d130899a6e46be25cbde550675c62b55827a79c3cfe29dc43fecd8e1de7ef6",
"format": 1
},
"format": 1
}

View File

@@ -0,0 +1,5 @@
# MANIFEST.json README
This manifest file is used in the test environment to overcome a bug currently in Ansible core: https://github.com/ansible/ansible/issues/67399
Once that bug is fixed, this file can be removed, along with the `cp` command used in the CI GitHub Actions workflow for this repository.

View File

@@ -72,23 +72,10 @@
state: absent
no_log: yes
# Run full test suite
# Test openshift
- pip:
name:
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- debug:
var: k8s_openshift
- include_tasks: full_test.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
create_crd_with_apply: no
playbook_namespace: ansible-test-k8s-full
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
- include: openshift.yml
when: k8s_openshift | bool