Issue #2: Get tests working with GitHub Actions for now.

This commit is contained in:
Jeff Geerling
2020-01-30 11:30:34 -06:00
parent 4b6fdb1615
commit 2e807f946a
21 changed files with 26 additions and 0 deletions

View File

@@ -0,0 +1,23 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd test/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

View File

@@ -0,0 +1,2 @@
cloud/openshift
shippable/cloud/group1

View File

@@ -0,0 +1,32 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes

View File

@@ -0,0 +1,20 @@
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: acme-crt
spec:
secretName: acme-crt-secret
dnsNames:
- foo.example.com
- bar.example.com
acme:
config:
- ingressClass: nginx
domains:
- foo.example.com
- bar.example.com
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer

View File

@@ -0,0 +1,21 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -0,0 +1,20 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -0,0 +1,14 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

View File

@@ -0,0 +1,2 @@
dependencies:
- setup_remote_tmp_dir

View File

@@ -0,0 +1,68 @@
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: append-hash
- name: create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: ensure that namespace is removed
k8s:
kind: Namespace
name: append-hash
state: absent

View File

@@ -0,0 +1,191 @@
- block:
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- set_fact:
apply_namespace: apply
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ apply_namespace }}"
- name: add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service
- name: add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service_2
- name: check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_3
- name: check ports are correct
assert:
that:
- k8s_service_3 is changed
- k8s_service_3.result.spec.ports | length == 1
- k8s_service_3.result.spec.ports[0].port == 8081
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"
state: absent

View File

@@ -0,0 +1,71 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: install custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
- name: pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: create custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: patch custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
- name: assert that recreating crd is as expected
assert:
that:
- recreate_crd_default_merge_expectation
- block:
- name: recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type: merge
namespace: crd
register: recreate_crd_with_merge
- name: recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: crd
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: remove crd
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
state: absent
always:
- name: remove crd namespace
k8s:
kind: Namespace
name: crd
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,101 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

View File

@@ -0,0 +1,375 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: apply.yml
- include_tasks: waiter.yml
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- name: k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: extensions/v1beta1
register: k8s_info
- name: assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: PVC creation should be idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Deployment creation should be idempotent
assert:
that: not output.changed
- debug:
var: k8s_openshift
- include: openshift.yml
when: k8s_openshift | bool
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
- include_tasks: crd.yml
- include_tasks: lists.yml
- include_tasks: append_hash.yml
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
ignore_errors: yes

View File

@@ -0,0 +1,140 @@
---
- name: Ensure testing1 namespace exists
k8s:
api_version: v1
kind: Namespace
name: testing1
- block:
- name: Create configmaps
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should exist
assert:
that: item.data.a is defined
with_items: '{{ cms.resources }}'
- name: Delete configmaps
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should not exist
assert:
that: not cms.resources
vars:
configmaps:
- metadata:
name: list-example-1
labels:
app: test
data:
a: first
- metadata:
name: list-example-2
labels:
app: test
data:
a: second
- metadata:
name: list-example-3
labels:
app: test
data:
a: third
- block:
- name: Create list of arbitrary resources
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the created resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: All resources should exist
assert:
that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
- name: Delete list of arbitrary resources
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: The resources should not exist
assert:
that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
vars:
resources:
- apiVersion: v1
kind: ConfigMap
metadata:
name: list-example-4
data:
key: value
- apiVersion: v1
kind: Service
metadata:
name: list-example-svc
labels:
app: test
spec:
selector:
app: test
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000

View File

@@ -0,0 +1,92 @@
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
- pip:
name: virtualenv
# Test graceful failure for missing kubernetes-validate
- pip:
name:
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test validate with kubernetes-validate
- pip:
name:
- kubernetes-validate==1.12.0
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-validate
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test graceful failure for older versions of openshift
- pip:
name:
- openshift==0.6.0
- kubernetes==6.0.0
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
recreate_crd_default_merge_expectation: recreate_crd is failed
playbook_namespace: ansible-test-k8s-older-openshift
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Run full test suite
- pip:
name:
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
create_crd_with_apply: no
playbook_namespace: ansible-test-k8s-full
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes

View File

@@ -0,0 +1,69 @@
- python_requirements_info:
dependencies:
- openshift==0.6.0
- kubernetes==6.0.0
# append_hash
- name: use append_hash with ConfigMap
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
append_hash: yes
ignore_errors: yes
register: k8s_append_hash
- name: assert that append_hash fails gracefully
assert:
that:
- k8s_append_hash is failed
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg"
# validate
- name: attempt to use validate with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_validate
- name: assert that validate fails gracefully
assert:
that:
- k8s_validate is failed
- "k8s_validate.msg == 'openshift >= 0.8.0 is required for validate'"
# apply
- name: attempt to use apply with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
apply: yes
ignore_errors: yes
register: k8s_apply
- name: assert that apply fails gracefully
assert:
that:
- k8s_apply is failed
- "k8s_apply.msg.startswith('Failed to import the required Python library (openshift >= 0.9.2)')"

View File

@@ -0,0 +1,61 @@
# OpenShift Resources
- name: Create a project
k8s:
name: testing
kind: Project
api_version: v1
apply: no
register: output
- name: show output
debug:
var: output
- name: Create deployment config
k8s:
state: present
inline: &dc
apiVersion: v1
kind: DeploymentConfig
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: Rolling
register: output
- name: Show output
debug:
var: output
- name: Create deployment config again
k8s:
state: present
inline: *dc
register: output
- name: DC creation should be idempotent
assert:
that: not output.changed

View File

@@ -0,0 +1,125 @@
- block:
- name: Create a namespace
k8s:
name: "{{ playbook_namespace }}"
kind: Namespace
- copy:
src: files
dest: "{{ remote_tmp_dir }}"
- name: incredibly simple ConfigMap
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
register: k8s_with_validate
- name: assert that k8s_with_validate succeeds
assert:
that:
- k8s_with_validate is successful
- name: extra property does not fail without strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
- name: extra property fails with strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
ignore_errors: yes
register: extra_property
- name: check that extra property fails with strict
assert:
that:
- extra_property is failed
- name: invalid type fails at validation stage
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
ignore_errors: yes
register: invalid_type
- name: check that invalid type fails
assert:
that:
- invalid_type is failed
- name: invalid type fails with warnings when fail_on_error is False
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: no
strict: no
ignore_errors: yes
register: invalid_type_no_fail
- name: check that invalid type fails
assert:
that:
- invalid_type_no_fail is failed
- name: setup custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/setup-crd.yml"
- name: wait a few seconds
pause:
seconds: 5
- name: add custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/crd-resource.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
register: unknown_kind
- name: check that unknown kind warns
assert:
that:
- unknown_kind is successful
- "'warnings' in unknown_kind"
always:
- name: remove custom resource
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: "{{ playbook_namespace }}"
state: absent
ignore_errors: yes
- name: remove custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
state: absent
- name: Delete namespace
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

View File

@@ -0,0 +1,23 @@
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- kubernetes-validate
- k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: default
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_no_validate
- name: assert that k8s_no_validate fails gracefully
assert:
that:
- k8s_no_validate is failed
- "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"

View File

@@ -0,0 +1,355 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
wait_namespace: wait
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ wait_namespace }}"
- name: add a simple pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
vars:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- sleep
- "10000"
register: wait_pod
ignore_errors: yes
- name: assert that pod creation succeeded
assert:
that:
- wait_pod is successful
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: update a daemonset in check_mode
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
assert:
that:
- update_ds_check_mode is changed
- name: update a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-ds
register: updated_ds_pods
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-crash-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- crash_pod is failed
- name: use a non-existent image
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-no-image-pod
k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- no_image_pod is failed
- name: add a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: update a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: update_deploy
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-deploy
register: updated_deploy_pods
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2")
- name: pause a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
paused: True
apply: no
wait: yes
wait_condition:
type: Progressing
status: Unknown
reason: DeploymentPaused
register: pause_deploy
- name: check that paused deployment wait worked
assert:
that:
- condition.reason == "DeploymentPaused"
- condition.status == "Unknown"
vars:
condition: '{{ pause_deploy.result.status.conditions | json_query("[?type==`Progressing`]") | first }}'
- name: add a service based on the deployment
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: wait-svc
namespace: "{{ wait_namespace }}"
spec:
selector:
app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
assert:
that:
- service is successful
- name: add a crashing deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-crash-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-crash-deploy
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes
- name: check that task failed
assert:
that:
- wait_crash_deploy is failed
- name: remove Pod with very short timeout
k8s:
api_version: v1
kind: Pod
name: wait-pod
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_sleep: 2
wait_timeout: 5
ignore_errors: yes
register: short_wait_remove_pod
- name: check that task failed
assert:
that:
- short_wait_remove_pod is failed
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent
when: (nodes.resources | length) > 0