[backport/2.1] molecule to ansible-test CI migration (#398)

integration testing migration from molecule to ansible-test
This commit is contained in:
abikouo
2022-03-08 17:25:50 +01:00
committed by GitHub
parent abcc3e884c
commit 4b8b3fa1ee
113 changed files with 665 additions and 1258 deletions

View File

@@ -0,0 +1,7 @@
slow
time=609
helm_info
helm_repository
helm_template
helm_plugin
helm_plugin_info

View File

@@ -0,0 +1,16 @@
---
helm_archive_name: "helm-{{ helm_version }}-{{ ansible_system | lower }}-amd64.tar.gz"
helm_binary: "/tmp/helm/{{ ansible_system | lower }}-amd64/helm"
helm_namespace: helm
chart_test: "ingress-nginx"
chart_test_local_path: "nginx-ingress"
chart_test_version: 3.8.0
chart_test_version_local_path: 1.32.0
chart_test_version_upgrade: 3.9.0
chart_test_version_upgrade_local_path: 1.33.0
chart_test_repo: "https://kubernetes.github.io/ingress-nginx"
chart_test_git_repo: "http://github.com/helm/charts.git"
chart_test_values:
revisionHistoryLimit: 0
myValue: "changed"

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: appversionless-chart
description: A chart used in molecule tests
type: application
version: 0.2.0

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}
myOtherValue: {{ default "foo" .Values.myOtherValue }}

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: appversionless-chart
description: A chart used in molecule tests
type: application
version: 0.1.0

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}

View File

@@ -0,0 +1,11 @@
name: "sample_plugin"
version: "0.0.1"
usage: "Sample Helm Plugin"
description: |-
This plugin provides sample plugin to Helm.
usage:
This is new line
This is another line
ignoreFlags: false
useTunnel: false
command: "$HELM_PLUGIN_DIR/main.sh"

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: test-chart
description: A chart used in molecule tests
type: application
version: 0.2.0
appVersion: "default"

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}
myOtherValue: {{ default "foo" .Values.myOtherValue }}

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: test-chart
description: A chart used in molecule tests
type: application
version: 0.1.0
appVersion: "default"

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: test-crds
description: A chart with CRDs
type: application
version: 0.1.0

View File

@@ -0,0 +1,21 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: foos.example.com
spec:
group: example.com
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
foobar:
type: string
scope: Namespaced
names:
plural: foos
singular: foo
kind: Foo

View File

@@ -0,0 +1,2 @@
---
revisionHistoryLimit: 0

View File

@@ -0,0 +1,15 @@
---
- name: Init Helm folders
file:
path: /tmp/helm/
state: directory
- name: Unarchive Helm binary
unarchive:
src: 'https://get.helm.sh/{{ helm_archive_name }}'
dest: /tmp/helm/
remote_src: yes
retries: 10
delay: 5
register: result
until: result is not failed

View File

@@ -0,0 +1,7 @@
---
- name: Run tests
include_tasks: run_test.yml
loop_control:
loop_var: helm_version
with_items:
- "v3.2.4"

View File

@@ -0,0 +1,45 @@
---
- name: Ensure helm is not installed
file:
path: "{{ item }}"
state: absent
with_items:
- "/tmp/helm"
- name: Check failed if helm is not installed
include_tasks: test_helm_not_installed.yml
- name: "Install {{ helm_version }}"
include_tasks: install.yml
- name: "Ensure we honor the environment variables"
include_tasks: test_read_envvars.yml
- name: tests_repository
include_tasks: tests_repository.yml
- name: Deploy charts
include_tasks: "tests_chart/{{ test_chart_type }}.yml"
loop_control:
loop_var: test_chart_type
with_items:
- from_local_path
- from_repository
- from_url
- name: Test helm plugin
include_tasks: tests_helm_plugin.yml
- name: Test helm diff
include_tasks: tests_helm_diff.yml
# https://github.com/ansible-collections/community.kubernetes/issues/296
- name: Test Skip CRDS feature in helm chart install
include_tasks: test_crds.yml
- name: Clean helm install
file:
path: "{{ item }}"
state: absent
with_items:
- "/tmp/helm/"

View File

@@ -0,0 +1,100 @@
---
- name: Test CRDs
vars:
test_chart: "test-crds"
block:
- name: Create namespace
k8s:
kind: Namespace
name: "{{ helm_namespace }}"
- name: Copy test chart
copy:
src: "{{ test_chart }}"
dest: "/tmp/helm_test_crds/"
- name: Install chart while skipping CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ helm_namespace }}"
name: test-crds
skip_crds: true
register: install
- assert:
that:
- install is changed
- install.status.name == "test-crds"
- name: Fail to create custom resource
k8s:
definition:
apiVersion: example.com/v1
kind: Foo
metadata:
namespace: "{{ helm_namespace }}"
name: test-foo
foobar: footest
ignore_errors: true
register: result
- assert:
that:
- result is failed
- "result.msg.startswith('Failed to find exact match for example.com/v1.Foo')"
# Helm won't install CRDs into an existing release, so we need to delete this, first
- name: Uninstall chart
helm:
binary_path: "{{ helm_binary }}"
namespace: "{{ helm_namespace }}"
name: test-crds
state: absent
- name: Install chart with CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ helm_namespace }}"
name: test-crds
- name: Create custom resource
k8s:
definition:
apiVersion: example.com/v1
kind: Foo
metadata:
namespace: "{{ helm_namespace }}"
name: test-foo
foobar: footest
register: result
- assert:
that:
- result is changed
- result.result.foobar == "footest"
always:
- name: Remove chart
file:
path: "/tmp/helm_test_crds"
state: absent
ignore_errors: true
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ helm_namespace }}"
state: absent
wait: true
wait_timeout: 180
ignore_errors: true
# CRDs aren't deleted with a namespace, so we need to manually delete it
- name: Remove CRD
k8s:
kind: CustomResourceDefinition
name: foos.example.com
state: absent
ignore_errors: true

View File

@@ -0,0 +1,15 @@
---
- name: Failed test when helm is not installed
helm:
binary_path: "{{ helm_binary}}_fake"
name: test
chart_ref: "{{ chart_test }}"
namespace: "{{ helm_namespace }}"
ignore_errors: yes
register: helm_missing_binary
- name: Assert that helm is not installed
assert:
that:
- helm_missing_binary is failed
- "'No such file or directory' in helm_missing_binary.msg"

View File

@@ -0,0 +1,10 @@
- name: Pass a bogus server through the K8S_AUTH_HOST environment variable and ensure helm fails as expected
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: does-not-exist
namespace: "{{ helm_namespace }}"
environment:
K8S_AUTH_HOST: somewhere
register: _helm_result
failed_when: '"http://somewhere/version" not in _helm_result.stderr'

View File

@@ -0,0 +1,383 @@
---
- name: Chart tests
block:
- name: Create temp directory
tempfile:
state: directory
register: tmpdir
- name: Set temp directory fact
set_fact:
temp_dir: "{{ tmpdir.path }}"
- name: Check helm_info empty
helm_info:
binary_path: "{{ helm_binary }}"
name: test
namespace: "{{ helm_namespace }}"
register: empty_info
- name: "Assert that no charts are installed with helm_info"
assert:
that:
- empty_info.status is undefined
- name: "Install fail {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
ignore_errors: yes
register: install_fail
- name: "Assert that Install fail {{ chart_test }} from {{ source }}"
assert:
that:
- install_fail is failed
- "'Error: create: failed to create: namespaces \"' + helm_namespace + '\" not found' in install_fail.stderr"
- name: "Install {{ chart_test }} from {{ source }} in check mode"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
register: install_check_mode
check_mode: true
- name: "Assert that {{ chart_test }} chart is installed from {{ source }} in check mode"
assert:
that:
- install_check_mode is changed
- install_check_mode.status is defined
- install_check_mode.status.values is defined
- name: "Install {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status.status | lower == 'deployed'
- name: Check helm_info content
helm_info:
binary_path: "{{ helm_binary }}"
name: test
namespace: "{{ helm_namespace }}"
register: content_info
- name: "Assert that {{ chart_test }} is installed from {{ source }} with helm_info"
assert:
that:
- content_info.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- content_info.status.status | lower == 'deployed'
- name: Check idempotency
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency
assert:
that:
- install is not changed
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status.status | lower == 'deployed'
- name: "Add vars to {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values: "{{ chart_test_values }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: Check idempotency after adding vars
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values: "{{ chart_test_values }}"
register: install
- name: Assert idempotency after add vars
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: "Remove Vars to {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status['values'] == {}
- name: Check idempotency after removing vars
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency after removing vars
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status['values'] == {}
- name: "Upgrade {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new version from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
- name: Check idempotency after upgrade
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency after upgrade
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
- name: "Remove {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: test
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is removed from {{ source }}"
assert:
that:
- install is changed
- name: Check idempotency after remove
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: test
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency
assert:
that:
- install is not changed
# Test --replace
- name: Install chart for replace option
helm:
binary_path: "{{ helm_binary }}"
name: test-0001
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- name: Remove {{ chart_test }} with --purge
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: test-0001
purge: False
namespace: "{{ helm_namespace }}"
register: install
- name: Check if chart is removed
assert:
that:
- install is changed
- name: Install chart again with same name test-0001
helm:
binary_path: "{{ helm_binary }}"
name: test-0001
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
replace: True
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- name: Remove {{ chart_test }} (cleanup)
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: test-0001
namespace: "{{ helm_namespace }}"
register: install
- name: Check if chart is removed
assert:
that:
- install is changed
- name: "Install {{ chart_test }} from {{ source }} with values_files"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: install
- name: "Assert that {{ chart_test }} chart has var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: "Install {{ chart_test }} from {{ source }} with values_files (again)"
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: install
- name: "Assert the result is consistent"
assert:
that:
- not (install is changed)
- name: Render templates
helm_template:
binary_path: "{{ helm_binary }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
output_dir: "{{ temp_dir }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: result
- assert:
that:
- result is changed
- result is not failed
- result.rc == 0
- result.command is match("{{ helm_binary }} template {{ chart_source }}")
- name: Check templates created
stat:
path: "{{ temp_dir }}/{{ chart_test }}/templates"
register: result
- assert:
that:
result.stat.exists
- name: Release using non-existent context
helm:
binary_path: "{{ helm_binary }}"
name: test
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
context: does-not-exist
ignore_errors: yes
register: result
- name: Assert that release fails with non-existent context
assert:
that:
- result is failed
- "'context \"does-not-exist\" does not exist' in result.stderr"
always:
- name: Clean up temp dir
file:
state: absent
path: "{{ temp_dir }}"
ignore_errors: true
- name: Remove helm namespace
k8s:
api_version: v1
kind: Namespace
name: "{{ helm_namespace }}"
state: absent
wait: true
wait_timeout: 180

View File

@@ -0,0 +1,105 @@
---
- name: Git clone stable repo
git:
repo: "{{ chart_test_git_repo }}"
dest: /tmp/helm_test_repo
version: 631eb8413f6728962439488f48d7d6fbb954a6db
depth: 1
- name: Git clone stable repo upgrade
git:
repo: "{{ chart_test_git_repo }}"
dest: /tmp/helm_test_repo_upgrade
version: d37b5025ffc8be49699898369fbb59661e2a8ffb
depth: 1
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_test: "{{ chart_test_local_path }}"
chart_source: "/tmp/helm_test_repo/stable/{{ chart_test_local_path }}/"
chart_source_upgrade: "/tmp/helm_test_repo_upgrade/stable/{{ chart_test_local_path }}/"
chart_test_version: "{{ chart_test_version_local_path }}"
chart_test_version_upgrade: "{{ chart_test_version_upgrade_local_path }}"
- name: Test appVersion idempotence
vars:
chart_test: "test-chart"
chart_test_upgrade: "test-chart-v2"
chart_test_version: "0.1.0"
chart_test_version_upgrade: "0.2.0"
chart_test_app_version: "v1"
chart_test_upgrade_app_version: "v2"
block:
- name: Copy test chart
copy:
src: "{{ chart_test }}"
dest: "/tmp/helm_test_appversion/test-chart/"
- name: Copy test chart v2
copy:
src: "{{ chart_test_upgrade }}"
dest: "/tmp/helm_test_appversion/test-chart/"
# create package with appVersion v1
- name: "Package chart into archive with appVersion {{ chart_test_app_version }}"
command: "{{ helm_binary }} package --app-version {{ chart_test_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}"
- name: "Move appVersion {{ chart_test_app_version }} chart archive"
copy:
remote_src: true
src: "test-chart-{{ chart_test_version }}.tgz"
dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
# create package with appVersion v2
- name: "Package chart into archive with appVersion {{ chart_test_upgrade_app_version }}"
command: "{{ helm_binary }} package --app-version {{ chart_test_upgrade_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test_upgrade }}"
- name: "Move appVersion {{ chart_test_upgrade_app_version }} chart archive"
copy:
remote_src: true
src: "test-chart-{{ chart_test_version_upgrade }}.tgz"
dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version_upgrade }}.tgz"
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_source: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
chart_source_upgrade: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version_upgrade }}.tgz"
- name: Test appVersion handling when null
vars:
chart_test: "appversionless-chart"
chart_test_upgrade: "appversionless-chart-v2"
chart_test_version: "0.1.0"
chart_test_version_upgrade: "0.2.0"
block:
- name: Copy test chart
copy:
src: "{{ chart_test }}"
dest: "/tmp/helm_test_appversion/test-null/"
- name: Copy test chart v2
copy:
src: "{{ chart_test_upgrade }}"
dest: "/tmp/helm_test_appversion/test-null/"
# create package with appVersion v1
- name: "Package chart into archive with appVersion v1"
command: "{{ helm_binary }} package --app-version v1 /tmp/helm_test_appversion/test-null/{{ chart_test_upgrade }}"
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_source: "/tmp/helm_test_appversion/test-null/{{ chart_test }}/"
chart_source_upgrade: "{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz"
- name: Remove clone repos
file:
path: "{{ item }}"
state: absent
with_items:
- /tmp/helm_test_repo
- /tmp/helm_test_repo_upgrade
- /tmp/helm_test_appversion

View File

@@ -0,0 +1,21 @@
---
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
- name: Install Chart from repository
include_tasks: "../tests_chart.yml"
vars:
source: repository
chart_source: "test_helm/{{ chart_test }}"
chart_source_version: "{{ chart_test_version }}"
chart_source_version_upgrade: "{{ chart_test_version_upgrade }}"
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
state: absent

View File

@@ -0,0 +1,7 @@
---
- name: Install Chart from URL
include_tasks: "../tests_chart.yml"
vars:
source: url
chart_source: "https://github.com/kubernetes/ingress-nginx/releases/download/{{ chart_test }}-{{ chart_test_version }}/{{ chart_test }}-{{ chart_test_version }}.tgz"
chart_source_upgrade: "https://github.com/kubernetes/ingress-nginx/releases/download/{{ chart_test }}-{{ chart_test_version_upgrade }}/{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz"

View File

@@ -0,0 +1,153 @@
---
- name: Test helm diff functionality
vars:
test_chart_ref: "/tmp/test-chart"
block:
- name: Install helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/databus23/helm-diff
- name: Copy test chart
copy:
src: "test-chart/"
dest: "{{ test_chart_ref }}"
- name: Install local chart
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
create_namespace: yes
register: install
- assert:
that:
- install is changed
- name: Modify local chart
blockinfile:
create: yes
path: "{{ test_chart_ref }}/templates/anothermap.yaml"
block: !unsafe |
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-another-configmap
data:
foo: {{ .Values.foo | default "bar" }}
- name: Upgrade local chart with modifications
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
register: install
- assert:
that:
- install is changed
- name: Upgrade modified local chart idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
register: install
- assert:
that:
- install is not changed
- name: Modify values
blockinfile:
create: yes
path: "{{ test_chart_ref }}/values.yml"
block: |
---
foo: baz
- name: Upgrade with values file
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values_files:
- "{{ test_chart_ref }}/values.yml"
register: install
- assert:
that:
- install is changed
- name: Upgrade with values file idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values_files:
- "{{ test_chart_ref }}/values.yml"
register: install
- assert:
that:
- install is not changed
- name: Upgrade with values
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values:
foo: gaz
register: install
- assert:
that:
- install is changed
- name: Upgrade with values idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values:
foo: gaz
register: install
- assert:
that:
- install is not changed
always:
- name: Remove chart directory
file:
path: "{{ test_chart_ref }}"
state: absent
ignore_errors: yes
- name: Uninstall helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: diff
ignore_errors: yes
- name: Remove helm namespace
k8s:
api_version: v1
kind: Namespace
name: "{{ helm_namespace }}"
state: absent
wait: yes
wait_timeout: 180
ignore_errors: yes

View File

@@ -0,0 +1,119 @@
---
- name: Install env plugin in check mode
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: check_install_env
check_mode: true
- assert:
that:
- check_install_env.changed
- name: Install env plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: install_env
- assert:
that:
- install_env.changed
- name: Gather info about all plugin
helm_plugin_info:
binary_path: "{{ helm_binary }}"
register: plugin_info
- assert:
that:
- plugin_info.plugin_list is defined
- name: Install env plugin again
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: install_env
- assert:
that:
- not install_env.changed
- name: Uninstall env plugin in check mode
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: check_uninstall_env
check_mode: true
- assert:
that:
- check_uninstall_env.changed
- name: Uninstall env plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: uninstall_env
- assert:
that:
- uninstall_env.changed
- name: Uninstall env plugin again
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: uninstall_env
- assert:
that:
- not uninstall_env.changed
# https://github.com/ansible-collections/community.kubernetes/issues/399
- block:
- name: Copy required plugin files
copy:
src: "files/sample_plugin"
dest: "/tmp/helm_plugin_test/"
- name: Install sample_plugin from the directory
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: "/tmp/helm_plugin_test/sample_plugin"
register: sample_plugin_output
- name: Assert that sample_plugin is installed or not
assert:
that:
- sample_plugin_output.changed
- name: Gather Helm plugin info
helm_plugin_info:
binary_path: "{{ helm_binary }}"
register: r
- name: Set sample_plugin version
set_fact:
plugin_version: "{{ ( r.plugin_list | selectattr('name', 'equalto', plugin_name) | list )[0].version }}"
vars:
plugin_name: "sample_plugin"
- name: Assert if sample_plugin with multiline comment is installed
assert:
that:
- plugin_version == "0.0.1"
always:
- name: Uninstall sample_plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: sample_plugin
ignore_errors: yes

View File

@@ -0,0 +1,67 @@
---
- name: "Ensure test_helm_repo doesn't exist"
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
- name: Add test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
- name: Assert that test_helm_repo repository is added
assert:
that:
- repository is changed
- name: Check idempotency
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
- name: Assert idempotency
assert:
that:
- repository is not changed
- name: Failed to add repository with the same name
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "https://other-charts.url"
register: repository_errors
ignore_errors: yes
- name: Assert that adding repository with the same name failed
assert:
that:
- repository_errors is failed
- name: Remove test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository
- name: Assert that test_helm_repo repository is removed
assert:
that:
- repository is changed
- name: Check idempotency after remove
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository
- name: Assert idempotency
assert:
that:
- repository is not changed

View File

@@ -0,0 +1,2 @@
context/target
k8s

View File

@@ -0,0 +1,94 @@
---
- name: Converge
hosts: localhost
connection: local
collections:
- kubernetes.core
vars_files:
- vars/main.yml
tasks:
- name: Ensure namespace exists
k8s:
api_version: v1
kind: Namespace
name: inventory
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: inventory
namespace: inventory
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 240
vars:
k8s_pod_name: inventory
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
- meta: refresh_inventory
- name: Verify inventory and connection plugins
hosts: namespace_inventory_pods
gather_facts: no
vars:
file_content: |
Hello world
tasks:
- name: End play if host not running (TODO should we not add these to the inventory?)
meta: end_host
when: pod_phase != "Running"
- debug: var=hostvars
- setup:
- debug: var=ansible_facts
- name: Assert the TEST environment variable was retrieved
assert:
that: ansible_facts.env.TEST == 'test'
- name: Copy a file into the host
copy:
content: '{{ file_content }}'
dest: /tmp/test_file
- name: Retrieve the file from the host
slurp:
src: /tmp/test_file
register: slurped_file
- name: Assert the file content matches expectations
assert:
that: (slurped_file.content|b64decode) == file_content
- name: Delete inventory namespace
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Remove inventory namespace
k8s:
api_version: v1
kind: Namespace
name: inventory
state: absent

View File

@@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -eux
export ANSIBLE_INVENTORY_ENABLED=kubernetes.core.k8s,yaml
export ANSIBLE_PYTHON_INTERPRETER=auto_silent
ansible-playbook playbooks/play.yml -i playbooks/test.inventory_k8s.yml "$@"

View File

@@ -0,0 +1,2 @@
---
plugin: kubernetes.core.k8s

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
kubernetes_role_path: ../../tests/integration/targets/kubernetes

View File

@@ -0,0 +1,2 @@
time=5
k8s

View File

@@ -0,0 +1,22 @@
---
- name: Create a SelfSubjectAccessReview resource
register: can_i_create_namespaces
ignore_errors: yes
k8s:
state: present
definition:
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectAccessReview
spec:
resourceAttributes:
group: v1
resource: Namespace
verb: create
- name: Assert that the SelfSubjectAccessReview request succeded
assert:
that:
- can_i_create_namespaces is successful
- can_i_create_namespaces.result.status is defined
- can_i_create_namespaces.result.status.allowed is defined
- can_i_create_namespaces.result.status.allowed

View File

@@ -0,0 +1,2 @@
time=6
k8s

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,69 @@
---
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: append-hash
- name: Create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: Check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: Recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: Check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: Add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: Check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: Ensure that namespace is removed
k8s:
kind: Namespace
name: append-hash
state: absent

View File

@@ -0,0 +1,4 @@
slow
k8s_service
k8s
time=192

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 240

View File

@@ -0,0 +1,595 @@
---
- block:
- set_fact:
apply_namespace: apply
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ apply_namespace }}"
- name: Add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: Check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: Add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: Check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: Add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: Check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: Add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: Check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: Update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: Ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: Add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service
- name: Add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service_2
- name: Check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: Add exactly same service in check mode
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service_3
check_mode: yes
- name: Check nothing changed
assert:
that:
- k8s_service_3 is not changed
- name: Change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 1
- k8s_service_4.result.spec.ports[0].port == 8081
- name: Insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: Remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
check_mode: yes
register: k8s_service_check
- name: Check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: Remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_5
- name: Check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
- name: Add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
- name: Update the earlier deployment in check mode
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
check_mode: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: update_deploy_check_mode
- name: Ensure check mode change took
assert:
that:
- update_deploy_check_mode is changed
- "update_deploy_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
- name: Update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: update_deploy_for_real
- name: Ensure change took
assert:
that:
- update_deploy_for_real is changed
- "update_deploy_for_real.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
- name: Remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Apply deployment after service account removed
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: deploy_after_serviceaccount_removal
ignore_errors: yes
- name: Ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed
- name: Add a secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ apply_namespace }}"
type: Opaque
stringData:
foo: bar
register: k8s_secret
- name: Check secret was created
assert:
that:
- k8s_secret is changed
- k8s_secret.result.data.foo
- name: Add same secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ apply_namespace }}"
type: Opaque
stringData:
foo: bar
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Add same secret with check mode on
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ apply_namespace }}"
type: Opaque
stringData:
foo: bar
check_mode: yes
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Add same secret with check mode on using data
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ apply_namespace }}"
type: Opaque
data:
foo: YmFy
check_mode: yes
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Create network policy (egress array with empty dict)
k8s:
namespace: "{{ apply_namespace }}"
apply: true
definition:
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: apply-netpolicy
labels:
app: apply-netpolicy
annotations:
{}
spec:
podSelector:
matchLabels:
app: apply-netpolicy
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- port: 9093
protocol: TCP
egress:
- {}
- name: Apply network policy
k8s:
namespace: "{{ apply_namespace }}"
definition:
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: apply-netpolicy
labels:
app: apply-netpolicy
annotations:
{}
spec:
podSelector:
matchLabels:
app: apply-netpolicy
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- port: 9093
protocol: TCP
egress:
- {}
apply: true
register: k8s_networkpolicy
- name: Check that nothing changed
assert:
that:
- k8s_networkpolicy is not changed
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"
state: absent

View File

@@ -0,0 +1,2 @@
k8s_cluster_info
time=6

View File

@@ -0,0 +1,24 @@
---
- name: Get Information about All APIs
k8s_cluster_info:
register: api_details
- name: Print all APIs for debugging
debug:
msg: "{{ api_details.apis }}"
- name: Get core API version
set_fact:
crd: "{{ api_details.apis['apiextensions.k8s.io/v1'] }}"
host: "{{ api_details.connection['host'] }}"
client_version: "{{ api_details.version['client'] }}"
- name: Check if all APIs are present
assert:
that:
- api_details.apis is defined
- api_details.apis.v1.Secret is defined
- api_details.apis.v1.Service is defined
- crd is defined
- host is defined
- client_version is defined

View File

@@ -0,0 +1,2 @@
k8s
time=15

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,67 @@
---
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: Install custom resource definitions
k8s:
definition: "{{ lookup('file', 'setup-crd.yml') }}"
- name: Pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: Create custom resource definition
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: Patch custom resource definition
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
- name: Assert that recreating crd is as expected
assert:
that:
- recreate_crd is not failed
- block:
- name: Recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- merge
namespace: crd
register: recreate_crd_with_merge
- name: Recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: crd
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: Remove crd
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
state: absent
always:
- name: Remove crd namespace
k8s:
kind: Namespace
name: crd
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,3 @@
k8s_info
k8s
time=64

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
kubernetes_role_path: ../../tests/integration/targets/kubernetes

View File

@@ -0,0 +1,95 @@
---
- block:
- set_fact:
delete_namespace: delete
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: Add a daemonset
k8s:
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: Check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: Assert that there are pods
assert:
that:
- pods_create.resources
- name: Remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: Show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: Wait for background deletion
pause:
seconds: 30
- name: Check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: Assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
k8s_exec
k8s
time=11

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,64 @@
---
- vars:
exec_namespace: k8s-exec
pod: sleep-pod
exec_pod_definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ pod }}"
namespace: "{{ exec_namespace }}"
spec:
containers:
- name: sleeper
image: busybox
command: ["sleep", "infinity"]
block:
- name: "Ensure that {{ exec_namespace }} namespace exists"
k8s:
kind: Namespace
name: "{{ exec_namespace }}"
- name: "Create a pod"
k8s:
definition: "{{ exec_pod_definition }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
- name: "Execute a command"
k8s_exec:
pod: "{{ pod }}"
namespace: "{{ exec_namespace }}"
command: cat /etc/resolv.conf
register: output
- name: "Show k8s_exec output"
debug:
var: output
- name: "Assert k8s_exec output is correct"
assert:
that:
- "'nameserver' in output.stdout"
- name: Check if rc is returned for the given command
k8s_exec:
namespace: "{{ exec_namespace }}"
pod: "{{ pod }}"
command: 'false'
register: command_status
ignore_errors: True
- name: Check last command status
assert:
that:
- command_status.return_code != 0
always:
- name: "Cleanup namespace"
k8s:
kind: Namespace
name: "{{ exec_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
k8s
k8s_info
time=37

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,401 @@
---
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: Show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ca_cert: /dev/null # invalid CA certificate
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- block:
- name: Copy default kubeconfig
copy:
remote_src: yes
src: ~/.kube/config
dest: ~/.kube/customconfig
- name: Delete default kubeconfig
file:
path: ~/.kube/config
state: absent
- name: Using custom config location should succeed
kubernetes.core.k8s:
name: testing
kind: Namespace
kubeconfig: ~/.kube/customconfig
always:
- name: Return kubeconfig
copy:
remote_src: yes
src: ~/.kube/customconfig
dest: ~/.kube/config
ignore_errors: yes
- name: Delete custom config
file:
path: ~/.kube/customconfig
state: absent
ignore_errors: yes
- name: Ensure k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: apps/v1
register: k8s_info
- name: Assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: Show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: Ensure PVC creation is idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
replicas: 1
selector:
matchLabels:
app: galaxy
service: elastic
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Ensure Deployment creation is idempotent
assert:
that: not output.changed
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
### https://github.com/ansible-collections/community.kubernetes/issues/111
- set_fact:
api_groups: "{{ lookup('kubernetes.core.k8s', cluster_info='api_groups') }}"
- debug:
var: api_groups
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Get info about terminating resources
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Ensure resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
ignore_errors: yes

View File

@@ -0,0 +1,2 @@
k8s
time=142

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,240 @@
---
- vars:
gc_namespace: garbage
gc_name: garbage-job
# This is a job definition that runs for 10 minutes and won't gracefully
# shutdown. It allows us to test foreground vs background deletion.
job_definition:
apiVersion: v1
kind: Job
metadata:
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
spec:
template:
metadata:
labels:
job: gc
spec:
containers:
- name: "{{ gc_name }}"
image: busybox
command:
- sleep
- "600"
restartPolicy: Never
block:
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ gc_namespace }}"
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Delete job in foreground
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: 100
delete_options:
propagationPolicy: Foreground
- name: Test job's pod does not exist
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod does not exist
assert:
that: not job.resources
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Delete job in background
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: 100
delete_options:
propagationPolicy: "Background"
# The default grace period is 30s so this pod should still be running.
- name: Test job's pod exists
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod still running
assert:
that: job.resources[0].status.phase == "Running"
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Orphan the job's pod
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: 100
delete_options:
propagationPolicy: "Orphan"
- name: Ensure grace period has expired
pause:
seconds: 60
- name: Test that job's pod is still running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod is still running
assert:
that: job.resources[0].status.phase == "Running"
- name: Add a job
k8s:
definition: "{{ job_definition }}"
register: job
- name: Delete a job with failing precondition
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
delete_options:
preconditions:
uid: not-a-valid-uid
ignore_errors: yes
register: result
- name: Assert that deletion failed
assert:
that: result is failed
- name: Delete a job using a valid precondition
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
delete_options:
preconditions:
uid: "{{ job.result.metadata.uid }}"
wait: yes
wait_timeout: 100
- name: Check that job is deleted
k8s_info:
kind: Job
namespace: "{{ gc_namespace }}"
name: "{{ gc_name }}"
register: job
- name: Assert job is deleted
assert:
that: not job.resources
always:
- name: Delete namespace
k8s:
kind: Namespace
name: "{{ gc_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
k8s
k8s_info
time=13

View File

@@ -0,0 +1,210 @@
---
- block:
- set_fact:
wait_namespace: wait
multi_pod_one: multi-pod-1
multi_pod_two: multi-pod-2
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ wait_namespace }}"
- name: Add a simple pod with initContainer
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 20']
containers:
- name: utilitypod-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Wait and gather information about new pod
k8s_info:
name: "{{ k8s_pod_name }}"
kind: Pod
namespace: "{{ wait_namespace }}"
wait: yes
wait_sleep: 5
wait_timeout: 400
register: wait_info
- name: Assert that pod creation succeeded
assert:
that:
- wait_info is successful
- not wait_info.changed
- wait_info.resources[0].status.phase == "Running"
- name: Remove Pod
k8s:
api_version: v1
kind: Pod
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
ignore_errors: yes
register: short_wait_remove_pod
- name: Check if pod is removed
assert:
that:
- short_wait_remove_pod is successful
- short_wait_remove_pod.changed
- name: Create multiple pod with initContainer
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
labels:
run: multi-box
name: "{{ multi_pod_one }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 25']
containers:
- name: multi-pod-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Create another pod with same label as previous pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
labels:
run: multi-box
name: "{{ multi_pod_two }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-02
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 25']
containers:
- name: multi-pod-02
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Wait and gather information about new pods
k8s_info:
kind: Pod
namespace: "{{ wait_namespace }}"
wait: yes
wait_sleep: 5
wait_timeout: 400
label_selectors:
- run == multi-box
register: wait_info
- name: Assert that pod creation succeeded
assert:
that:
- wait_info is successful
- not wait_info.changed
- wait_info.resources[0].status.phase == "Running"
- wait_info.resources[1].status.phase == "Running"
- name: "Remove Pod {{ multi_pod_one }}"
k8s:
api_version: v1
kind: Pod
name: "{{ multi_pod_one }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
ignore_errors: yes
register: multi_pod_one_remove
- name: "Check if {{ multi_pod_one }} pod is removed"
assert:
that:
- multi_pod_one_remove is successful
- multi_pod_one_remove.changed
- name: "Remove Pod {{ multi_pod_two }}"
k8s:
api_version: v1
kind: Pod
name: "{{ multi_pod_two }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
ignore_errors: yes
register: multi_pod_two_remove
- name: "Check if {{ multi_pod_two }} pod is removed"
assert:
that:
- multi_pod_two_remove is successful
- multi_pod_two_remove.changed
- name: "Look for existing API"
k8s_info:
api_version: apps/v1
kind: Deployment
register: existing_api
- name: Check if we informed the user the api does exist
assert:
that:
- existing_api.api_found
- name: "Look for non-existent API"
k8s_info:
api_version: pleasedonotcreatethisresource.example.com/v7
kind: DoesNotExist
register: dne_api
- name: Check if we informed the user the api does not exist
assert:
that:
- not dne_api.resources
- not dne_api.api_found
- name: Start timer
set_fact:
start: "{{ lookup('pipe', 'date +%s') }}"
- name: Wait for non-existent pod to be created
k8s_info:
kind: Pod
name: does-not-exist
namespace: "{{ wait_namespace }}"
wait: yes
wait_timeout: 45
register: result
- name: Check that module waited
assert:
that:
- "{{ lookup('pipe', 'date +%s') }} - {{ start }} > 30"
vars:
k8s_pod_name: pod-info-1
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
k8s_json_patch
k8s
time=33

View File

@@ -0,0 +1,174 @@
- vars:
namespace: json-patch
pod: json-patch
deployment: json-patch
k8s_wait_timeout: 240
block:
- name: Ensure namespace exists
kubernetes.core.k8s:
kind: namespace
name: "{{ namespace }}"
- name: Create a simple pod
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
namespace: "{{ namespace }}"
name: "{{ pod }}"
labels:
label1: foo
spec:
containers:
- image: busybox:musl
name: busybox
command:
- sh
- -c
- while true; do echo $(date); sleep 10; done
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
- name: Add a label and replace the image in checkmode
kubernetes.core.k8s_json_patch:
kind: Pod
namespace: "{{ namespace }}"
name: "{{ pod }}"
patch:
- op: add
path: /metadata/labels/label2
value: bar
- op: replace
path: /spec/containers/0/image
value: busybox:glibc
check_mode: yes
register: result
- name: Assert patch was made
assert:
that:
- result.changed
- result.result.metadata.labels.label2 == "bar"
- result.result.spec.containers[0].image == "busybox:glibc"
- name: Describe pod
kubernetes.core.k8s_info:
kind: Pod
name: "{{ pod }}"
namespace: "{{ namespace }}"
register: result
- name: Assert pod has not changed
assert:
that:
- result.resources[0].metadata.labels.label2 is not defined
- result.resources[0].spec.containers[0].image == "busybox:musl"
- name: Add a label and replace the image
kubernetes.core.k8s_json_patch:
kind: Pod
namespace: "{{ namespace }}"
name: "{{ pod }}"
patch:
- op: add
path: /metadata/labels/label2
value: bar
- op: replace
path: /spec/containers/0/image
value: busybox:glibc
register: result
- name: Assert patch was made
assert:
that:
- result.changed
- name: Describe pod
kubernetes.core.k8s_info:
kind: Pod
name: "{{ pod }}"
namespace: "{{ namespace }}"
register: result
- name: Assert that both patch operations have been applied
assert:
that:
- result.resources[0].metadata.labels.label2 == "bar"
- result.resources[0].spec.containers[0].image == "busybox:glibc"
- name: Apply the same patch to the pod
kubernetes.core.k8s_json_patch:
kind: Pod
namespace: "{{ namespace }}"
name: "{{ pod }}"
patch:
- op: add
path: /metadata/labels/label2
value: bar
- op: replace
path: /spec/containers/0/image
value: busybox:glibc
register: result
- name: Assert that no changes were made
assert:
that:
- not result.changed
- name: Create a simple deployment
kubernetes.core.k8s:
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: "{{ namespace }}"
name: "{{ deployment }}"
labels:
name: "{{ deployment }}"
spec:
replicas: 2
selector:
matchLabels:
app: busybox
template:
metadata:
labels:
app: busybox
spec:
containers:
- name: busybox
image: busybox
command:
- sh
- -c
- while true; do echo $(date); sleep 10; done
- name: Apply patch and wait for deployment to be ready
kubernetes.core.k8s_json_patch:
kind: Deployment
namespace: "{{ namespace }}"
name: "{{ deployment }}"
patch:
- op: replace
path: /spec/replicas
value: 3
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: result
- name: Assert all replicas are available
assert:
that:
- result.result.status.availableReplicas == 3
always:
- name: Ensure namespace has been deleted
kubernetes.core.k8s:
kind: Namespace
name: "{{ namespace }}"
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,3 @@
k8s_info
k8s
time=22

View File

@@ -0,0 +1,147 @@
---
- block:
- name: Ensure testing1 namespace exists
k8s:
api_version: v1
kind: Namespace
name: testing1
- block:
- name: Create configmaps
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should exist
assert:
that: item.data.a is defined
with_items: '{{ cms.resources }}'
- name: Delete configmaps
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should not exist
assert:
that: not cms.resources
vars:
configmaps:
- metadata:
name: list-example-1
labels:
app: test
data:
a: first
- metadata:
name: list-example-2
labels:
app: test
data:
a: second
- metadata:
name: list-example-3
labels:
app: test
data:
a: third
- block:
- name: Create list of arbitrary resources
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the created resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: All resources should exist
assert:
that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
- name: Delete list of arbitrary resources
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: The resources should not exist
assert:
that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
vars:
resources:
- apiVersion: v1
kind: ConfigMap
metadata:
name: list-example-4
data:
key: value
- apiVersion: v1
kind: Service
metadata:
name: list-example-svc
labels:
app: test
spec:
selector:
app: test
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
always:
- name: Remove testing1 namespace
k8s:
kind: Namespace
name: testing1
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,2 @@
k8s_log
time=27

View File

@@ -0,0 +1,128 @@
---
- block:
- set_fact:
k8s_wait_timeout: 240
- name: ensure that k8s-log namespace exists
k8s:
kind: Namespace
name: k8s-log
- name: create hello-world deployment
k8s:
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
namespace: k8s-log
spec:
selector:
matchLabels:
app: hello-world
template:
metadata:
labels:
app: hello-world
spec:
containers:
- image: busybox
name: hello-world
command: ['sh']
args: ['-c', 'while true ; do echo "hello world" && sleep 10 ; done']
restartPolicy: Always
- name: retrieve the log by providing the deployment
k8s_log:
api_version: apps/v1
kind: Deployment
namespace: k8s-log
name: hello-world
register: deployment_log
- name: verify that the log can be retrieved via the deployment
assert:
that:
- "'hello world' in deployment_log.log"
- item == 'hello world' or item == ''
with_items: '{{ deployment_log.log_lines }}'
- name: retrieve the log with a label selector
k8s_log:
namespace: k8s-log
label_selectors:
- 'app=hello-world'
register: label_selector_log
- name: verify that the log can be retrieved via the label
assert:
that:
- "'hello world' in label_selector_log.log"
- item == 'hello world' or item == ''
with_items: '{{ label_selector_log.log_lines }}'
- name: get the hello-world pod
k8s_info:
kind: Pod
namespace: k8s-log
label_selectors:
- 'app=hello-world'
register: k8s_log_pods
- name: retrieve the log directly with the pod name
k8s_log:
namespace: k8s-log
name: '{{ k8s_log_pods.resources.0.metadata.name }}'
register: pod_log
- name: verify that the log can be retrieved via the pod name
assert:
that:
- "'hello world' in pod_log.log"
- item == 'hello world' or item == ''
with_items: '{{ pod_log.log_lines }}'
- name: Create a job that calculates 7
k8s:
state: present
wait: yes
wait_timeout: 120
wait_condition:
type: Complete
status: 'True'
definition:
apiVersion: batch/v1
kind: Job
metadata:
name: int-log
namespace: k8s-log
spec:
template:
spec:
containers:
- name: busybox
image: busybox
command: ["echo", "7"]
restartPolicy: Never
backoffLimit: 4
- name: retrieve logs from the job
k8s_log:
api_version: batch/v1
kind: Job
namespace: k8s-log
name: int-log
register: job_logs
- name: verify the log was successfully retrieved
assert:
that: job_logs.log_lines[0] == "7"
always:
- name: ensure that namespace is removed
k8s:
kind: Namespace
name: k8s-log
state: absent

View File

@@ -0,0 +1,3 @@
time=19
k8s
k8s_info

View File

@@ -0,0 +1,143 @@
- block:
- name: Define common facts
set_fact:
k8s_patch_namespace: "patch"
k8s_strategic_merge: "strategic-merge"
k8s_merge: "json-merge"
k8s_json: "json-patch"
- name: Ensure the namespace exist
kubernetes.core.k8s:
kind: namespace
name: "{{ k8s_patch_namespace }}"
# Strategic merge
- name: create a simple nginx deployment
kubernetes.core.k8s:
namespace: "{{ k8s_patch_namespace }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ k8s_strategic_merge }}"
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: "{{ k8s_strategic_merge }}-ctr"
image: nginx
tolerations:
- effect: NoSchedule
key: dedicated
value: "test-strategic-merge"
- name: patch service using strategic merge
kubernetes.core.k8s:
kind: Deployment
namespace: "{{ k8s_patch_namespace }}"
name: "{{ k8s_strategic_merge }}"
definition:
spec:
template:
spec:
containers:
- name: "{{ k8s_strategic_merge }}-ctr-2"
image: redis
register: depl_patch
- name: validate that resource was patched
assert:
that:
- depl_patch.changed
- name: describe "{{ k8s_strategic_merge }}" deployment
kubernetes.core.k8s_info:
kind: Deployment
name: "{{ k8s_strategic_merge }}"
namespace: "{{ k8s_patch_namespace }}"
register: deployment_out
- name: assert that deployment contains expected images
assert:
that:
- deployment_out.resources[0].spec.template.spec.containers | selectattr('image','equalto','nginx') | list | length == 1
- deployment_out.resources[0].spec.template.spec.containers | selectattr('image','equalto','redis') | list | length == 1
# Json merge
- name: create a simple nginx deployment (testing merge patch)
kubernetes.core.k8s:
namespace: "{{ k8s_patch_namespace }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ k8s_merge }}"
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: "{{ k8s_merge }}-ctr"
image: nginx
tolerations:
- effect: NoSchedule
key: dedicated
value: "test-strategic-merge"
- name: patch service using json merge patch
kubernetes.core.k8s:
kind: Deployment
namespace: "{{ k8s_patch_namespace }}"
name: "{{ k8s_merge }}"
merge_type:
- merge
definition:
spec:
template:
spec:
containers:
- name: "{{ k8s_merge }}-ctr-2"
image: python
register: merge_patch
- name: validate that resource was patched
assert:
that:
- merge_patch.changed
- name: describe "{{ k8s_merge }}" deployment
kubernetes.core.k8s_info:
kind: Deployment
name: "{{ k8s_merge }}"
namespace: "{{ k8s_patch_namespace }}"
register: merge_out
- name: assert that deployment contains expected images
assert:
that:
- merge_out.resources[0].spec.template.spec.containers | list | length == 1
- merge_out.resources[0].spec.template.spec.containers[0].image == 'python'
always:
- name: Ensure namespace has been deleted
kubernetes.core.k8s:
kind: namespace
name: "{{ k8s_patch_namespace }}"
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,3 @@
time=20
k8s
k8s_info

View File

@@ -0,0 +1,123 @@
---
- block:
- set_fact:
patch_only_namespace:
first: patched-namespace-1
second: patched-namespace-2
- name: Ensure namespace {{ patch_only_namespace.first }} exist
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ patch_only_namespace.first }}"
labels:
existingLabel: "labelValue"
annotations:
existingAnnotation: "annotationValue"
wait: yes
- name: Ensure namespace {{ patch_only_namespace.second }} does not exist
kubernetes.core.k8s_info:
kind: namespace
name: "{{ patch_only_namespace.second }}"
register: second_namespace
- name: assert that second namespace does not exist
assert:
that:
- second_namespace.resources | length == 0
- name: apply patch on existing resource
kubernetes.core.k8s:
state: patched
wait: yes
definition: |
---
apiVersion: v1
kind: Namespace
metadata:
name: "{{ patch_only_namespace.first }}"
labels:
ansible: patched
---
apiVersion: v1
kind: Namespace
metadata:
name: "{{ patch_only_namespace.second }}"
labels:
ansible: patched
register: patch_resource
- name: assert that patch succeed
assert:
that:
- patch_resource.changed
- patch_resource.result.results | selectattr('warning', 'defined') | list | length == 1
- name: Ensure namespace {{ patch_only_namespace.first }} was patched correctly
kubernetes.core.k8s_info:
kind: namespace
name: "{{ patch_only_namespace.first }}"
register: first_namespace
- name: assert labels are as expected
assert:
that:
- first_namespace.resources[0].metadata.labels.ansible == "patched"
- first_namespace.resources[0].metadata.labels.existingLabel == "labelValue"
- first_namespace.resources[0].metadata.annotations.existingAnnotation == "annotationValue"
- name: Ensure namespace {{ patch_only_namespace.second }} was not created
kubernetes.core.k8s_info:
kind: namespace
name: "{{ patch_only_namespace.second }}"
register: second_namespace
- name: assert that second namespace does not exist
assert:
that:
- second_namespace.resources | length == 0
- name: patch all resources (create if does not exist)
kubernetes.core.k8s:
state: present
definition: |
---
apiVersion: v1
kind: Namespace
metadata:
name: "{{ patch_only_namespace.first }}"
labels:
patch: ansible
---
apiVersion: v1
kind: Namespace
metadata:
name: "{{ patch_only_namespace.second }}"
labels:
patch: ansible
wait: yes
register: patch_resource
- name: Ensure namespace {{ patch_only_namespace.second }} was created
kubernetes.core.k8s_info:
kind: namespace
name: "{{ patch_only_namespace.second }}"
register: second_namespace
- name: assert that second namespace exist
assert:
that:
- second_namespace.resources | length == 1
always:
- name: Remove namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ item }}"
state: absent
with_items:
- "{{ patch_only_namespace.first }}"
- "{{ patch_only_namespace.second }}"
ignore_errors: true

View File

@@ -0,0 +1,4 @@
k8s_rollback
k8s
k8s_info
time=187

View File

@@ -0,0 +1,222 @@
---
- block:
- name: Set variables
set_fact:
namespace: "testingrollback"
k8s_wait_timeout: 240
- name: Create a namespace
k8s:
name: "{{ namespace }}"
kind: Namespace
api_version: v1
apply: no
register: output
- name: show output
debug:
var: output
- name: Create a deployment
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
inline: &deploy
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
namespace: "{{ namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.17
ports:
- containerPort: 80
register: output
- name: Show output
debug:
var: output
- name: Crash the existing deployment
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
namespace: "{{ namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.0.23449928384992872784
ports:
- containerPort: 80
ignore_errors: yes
register: output
- name: Rolling Back the crashed deployment
k8s_rollback:
api_version: apps/v1
kind: Deployment
name: nginx-deploy
namespace: "{{ namespace }}"
when: output.failed
register: output
- name: Show output
debug:
var: output
- name: Create a DaemonSet
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: "{{ namespace }}"
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
register: output
- name: Show output
debug:
var: output
- name: Crash the existing DaemonSet
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: "{{ namespace }}"
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: quay.io/fluentd_elasticsearch/fluentd:v2734894949
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
ignore_errors: yes
register: output
- name: Rolling Back the crashed DaemonSet
k8s_rollback:
api_version: apps/v1
kind: DaemonSet
name: fluentd-elasticsearch
namespace: "{{ namespace }}"
when: output.failed
register: output
- name: Show output
debug:
var: output
always:
- name: Delete {{ namespace }} namespace
k8s:
name: "{{ namespace }}"
kind: Namespace
api_version: v1
state: absent

View File

@@ -0,0 +1,4 @@
k8s_scale
k8s
k8s_info
time=150

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 400

View File

@@ -0,0 +1,50 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test0
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test0" && sleep 3600']
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test1
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test1" && sleep 3600']

View File

@@ -0,0 +1,278 @@
---
- block:
- set_fact:
scale_namespace: scale
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ scale_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
- name: Scale the deployment
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 0
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_down
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_down_deploy_pods
until: scale_down_deploy_pods.resources | length == 0
retries: 6
delay: 5
- name: Ensure that scale down took effect
assert:
that:
- scale_down is changed
- '"duration" in scale_down'
- scale_down.diff
- name: Reapply the earlier deployment
k8s:
definition:
api_version: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ scale_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: reapply_after_scale
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
field_selectors:
- status.phase=Running
register: scale_up_deploy_pods
- name: Ensure that reapply after scale worked
assert:
that:
- reapply_after_scale is changed
- scale_up_deploy_pods.resources | length == 1
- name: Scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_up
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_further_deploy_pods
- name: Ensure that scale up worked
assert:
that:
- scale_up is changed
- '"duration" in scale_up'
- scale_up.diff
- scale_up_further_deploy_pods.resources | length == 2
- name: Don't scale the deployment up
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_up_noop
- name: Get pods in scale-deploy
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
field_selectors:
- status.phase=Running
namespace: "{{ scale_namespace }}"
register: scale_up_noop_pods
- name: Ensure that no-op scale up worked
assert:
that:
- scale_up_noop is not changed
- not scale_up_noop.diff
- scale_up_noop_pods.resources | length == 2
- '"duration" in scale_up_noop'
- name: Scale deployment down without wait
k8s_scale:
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ scale_namespace }}"
replicas: 1
wait: no
register: scale_down_no_wait
- name: Ensure that scale down succeeds
k8s_info:
kind: Pod
label_selectors:
- app=scale-deploy
namespace: "{{ scale_namespace }}"
register: scale_down_no_wait_pods
retries: 6
delay: 5
until: scale_down_no_wait_pods.resources | length == 1
- name: Ensure that scale down without wait worked
assert:
that:
- scale_down_no_wait is changed
- scale_down_no_wait.diff
- scale_down_no_wait_pods.resources | length == 1
# scale multiple resource using label selectors
- name: create deployment
kubernetes.core.k8s:
namespace: "{{ scale_namespace }}"
src: files/deployment.yaml
- name: list deployment
kubernetes.core.k8s_info:
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: resource
- assert:
that:
- resource.resources | list | length == 2
- name: scale deployment using resource version
kubernetes.core.k8s_scale:
replicas: 2
kind: Deployment
namespace: "{{ scale_namespace }}"
resource_version: 0
label_selectors:
- app=nginx
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (wrong value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 4
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (right value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 3
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_out
- assert:
that:
- scale_out.changed
- scale_out.results | map(attribute='result.status.replicas') | list | unique == [2]
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ scale_namespace }}"
state: absent

View File

@@ -0,0 +1,4 @@
k8s_service
k8s
k8s_info
time=75

View File

@@ -0,0 +1,254 @@
---
- block:
- set_fact:
template_namespace: template-test
k8s_wait_timeout: 240
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ template_namespace }}"
- name: Check if k8s_service does not inherit parameter
kubernetes.core.k8s_service:
template: "pod_template_one.j2"
state: present
ignore_errors: yes
register: r
- name: Check for expected failures in last tasks
assert:
that:
- r.failed
- "'is only supported parameter for' in r.msg"
- name: Specify both definition and template
kubernetes.core.k8s:
state: present
template: "pod_template_one.j2"
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ template_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name_one }}"
vars:
k8s_pod_name_one: pod
k8s_pod_namespace: "{{ template_namespace }}"
register: r
ignore_errors: yes
- name: Check if definition and template are mutually exclusive
assert:
that:
- r.failed
- "'parameters are mutually exclusive' in r.msg"
- name: Specify both src and template
kubernetes.core.k8s:
state: present
src: "../templates/pod_template_one.j2"
template: "pod_template_one.j2"
vars:
k8s_pod_name_one: pod
k8s_pod_namespace: "{{ template_namespace }}"
register: r
ignore_errors: yes
- name: Check if src and template are mutually exclusive
assert:
that:
- r.failed
- "'parameters are mutually exclusive' in r.msg"
- name: Create pod using template (direct specification)
kubernetes.core.k8s:
template: "pod_template_one.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-1
k8s_pod_namespace: "{{ template_namespace }}"
register: r
- name: Assert that pod creation succeeded using template
assert:
that:
- r is successful
- name: Create pod using template with wrong parameter
kubernetes.core.k8s:
template:
- default
wait: yes
vars:
k8s_pod_name_one: pod-2
k8s_pod_namespace: "{{ template_namespace }}"
register: r
ignore_errors: True
- name: Assert that pod creation failed using template due to wrong parameter
assert:
that:
- r is failed
- name: Create pod using template (path parameter)
kubernetes.core.k8s:
template:
path: "pod_template_one.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-3
k8s_pod_namespace: "{{ template_namespace }}"
register: r
- name: Assert that pod creation succeeded using template
assert:
that:
- r is successful
- name: Create pod using template (different variable string)
kubernetes.core.k8s:
template:
path: "pod_template_two.j2"
variable_start_string: '[['
variable_end_string: ']]'
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_two: pod-4
k8s_pod_namespace: "[[ template_namespace ]]"
ansible_python_interpreter: "[[ ansible_playbook_python ]]"
register: r
- name: Assert that pod creation succeeded using template
assert:
that:
- r is successful
- name: Create pods using multi-resource template
kubernetes.core.k8s:
template:
path: "pod_template_three.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_three_one: pod-5
k8s_pod_name_three_two: pod-6
k8s_pod_namespace: "{{ template_namespace }}"
register: r
- name: Assert that pod creation succeeded using template
assert:
that:
- r is successful
- name: Create pods using list of template
kubernetes.core.k8s:
template:
- pod_template_one.j2
- path: "pod_template_two.j2"
variable_start_string: '[['
variable_end_string: ']]'
- path: "pod_template_three.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-7
k8s_pod_name_two: pod-8
k8s_pod_name_three_one: pod-9
k8s_pod_name_three_two: pod-10
k8s_pod_namespace: "template-test"
register: r
- name: Assert that pod creation succeeded using template
assert:
that:
- r is successful
# continue_on_error
- name: define variable for test
set_fact:
k8s_pod_name_one: pod-11
k8s_pod_bad_name: pod-12
k8s_pod_namespace: "{{ template_namespace }}"
k8s_pod_bad_namespace: "dummy-namespace-012345"
- name: delete pod if it exists
kubernetes.core.k8s:
template: pod_template_one.j2
wait: true
state: absent
- name: create pod on bad namespace ( continue_on_error set to default(false) )
kubernetes.core.k8s:
template:
- pod_with_bad_namespace.j2
- pod_template_one.j2
register: resource
ignore_errors: true
- name: validate that creation failed
assert:
that:
- resource is failed
- '"Failed to create object" in resource.msg'
- name: assert pod has not been created
kubernetes.core.k8s_info:
kind: "{{ item.kind }}"
namespace: "{{ item.namespace }}"
name: "{{ item.name }}"
with_items:
- kind: pod
namespace: "{{ k8s_pod_bad_namespace }}"
name: "{{ k8s_pod_bad_name }}"
- kind: pod
namespace: "{{ k8s_pod_name_one }}"
name: "{{ k8s_pod_namespace }}"
register: resource
- name: check that resources creation failed
assert:
that:
- '{{ resource.results[0].resources | length == 0 }}'
- '{{ resource.results[1].resources | length == 0 }}'
- name: create pod without namespace (continue_on_error = true)
kubernetes.core.k8s:
template:
- pod_with_bad_namespace.j2
- pod_template_one.j2
continue_on_error: true
wait: true
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: resource
ignore_errors: true
- name: validate that creation succeeded
assert:
that:
- resource is successful
- name: validate resource creation succeeded for some and failed for others
assert:
that:
- resource is successful
- resource.result.results | selectattr('changed') | list | length == 1
- resource.result.results | selectattr('error', 'defined') | list | length == 1
always:
- name: Remove namespace (Cleanup)
kubernetes.core.k8s:
kind: Namespace
name: "{{ template_namespace }}"
state: absent

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: "{{ k8s_pod_name_one }}"
name: '{{ k8s_pod_name_one }}'
namespace: '{{ k8s_pod_namespace }}'
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 10; done
image: python:3.7-alpine
imagePullPolicy: Always
name: '{{ k8s_pod_name_one }}'

View File

@@ -0,0 +1,35 @@
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: "{{ k8s_pod_name_three_one }}"
name: '{{ k8s_pod_name_three_one }}'
namespace: '{{ k8s_pod_namespace }}'
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 10; done
image: python:3.7-alpine
imagePullPolicy: Always
name: '{{ k8s_pod_name_three_one }}'
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: "{{ k8s_pod_name_three_two }}"
name: '{{ k8s_pod_name_three_two }}'
namespace: '{{ k8s_pod_namespace }}'
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 10; done
image: python:3.7-alpine
imagePullPolicy: Always
name: '{{ k8s_pod_name_three_two }}'

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: '[[ k8s_pod_name_two ]]'
name: '[[ k8s_pod_name_two ]]'
namespace: '[[ k8s_pod_namespace ]]'
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 10; done
image: python:3.7-alpine
imagePullPolicy: Always
name: '[[ k8s_pod_name_two ]]'

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: "{{ k8s_pod_bad_name }}"
name: '{{ k8s_pod_bad_name }}'
namespace: '{{ k8s_pod_bad_namespace }}'
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 10; done
image: python:3.7-alpine
imagePullPolicy: Always
name: '{{ k8s_pod_bad_name }}'

View File

@@ -0,0 +1,5 @@
# duration 10min
slow
time=504
k8s
k8s_info

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 240

View File

@@ -0,0 +1,375 @@
---
- block:
- set_fact:
wait_namespace: wait
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ wait_namespace }}"
- name: Add a simple pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- sleep
- "10000"
register: wait_pod
ignore_errors: yes
- name: Assert that pod creation succeeded
assert:
that:
- wait_pod is successful
- name: Add a daemonset
k8s:
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_command:
- sleep
- "600"
register: ds
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: Update a daemonset in check_mode
k8s:
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_command:
- sleep
- "600"
register: update_ds_check_mode
check_mode: yes
- name: Check that check_mode result contains the changes
assert:
that:
- update_ds_check_mode is changed
- "update_ds_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:2'"
- name: Update a daemonset
k8s:
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
k8s_pod_command:
- sleep
- "600"
register: ds
- name: Get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-ds
field_selectors:
- status.phase=Running
register: updated_ds_pods
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: Add a crashing pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-crash-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
- name: Check that task failed
assert:
that:
- crash_pod is failed
- name: Use a non-existent image
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-no-image-pod
k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
- name: Check that task failed
assert:
that:
- no_image_pod is failed
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy
- name: Check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: Update a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: update_deploy
# It looks like the Deployment is updated to have the desired state *before* the pods are terminated
# Wait a couple of seconds to allow the old pods to at least get to Terminating state
- name: Avoid race condition
pause:
seconds: 2
- name: Get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-deploy
field_selectors:
- status.phase=Running
register: updated_deploy_pods
until: updated_deploy_pods.resources[0].spec.containers[0].image.endswith(':2')
retries: 6
delay: 5
- name: Check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: Pause a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
paused: True
apply: no
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
wait_condition:
type: Progressing
status: Unknown
reason: DeploymentPaused
register: pause_deploy
- name: Check that paused deployment wait worked
assert:
that:
- condition.reason == "DeploymentPaused"
- condition.status == "Unknown"
vars:
condition: '{{ pause_deploy.result.status.conditions[1] }}'
- name: Add a service based on the deployment
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: wait-svc
namespace: "{{ wait_namespace }}"
spec:
selector:
app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
k8s_pod_name: wait-deploy
register: service
- name: Assert that waiting for service works
assert:
that:
- service is successful
- name: Add a crashing deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: wait-crash-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-crash-deploy
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes
- name: Check that task failed
assert:
that:
- wait_crash_deploy is failed
- name: Remove Pod with very short timeout
k8s:
api_version: v1
kind: Pod
name: wait-pod
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_sleep: 2
wait_timeout: 5
ignore_errors: yes
register: short_wait_remove_pod
- name: Check that task failed
assert:
that:
- short_wait_remove_pod is failed
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent

View File

@@ -1,19 +0,0 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd tests/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

View File

@@ -1,2 +0,0 @@
cloud/openshift
shippable/cloud/group1

View File

@@ -1,22 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -1,21 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -1,6 +0,0 @@
---
- name: delete temporary directory
file:
path: "{{ remote_tmp_dir }}"
state: absent
no_log: yes

View File

@@ -1,3 +0,0 @@
# README
The `test_tempfile.py` module added here is only used for the `setup_remote_tmp_dir.yml` temporary directory setup task. It is a clone of the `tempfile.py` community-supported Ansible module, and has to be included with the tests here because it is not available in the `ansible-base` distribution against which this collection is tested.

View File

@@ -1,121 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: test_tempfile
short_description: Creates temporary files and directories
description:
- The C(test_tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
you need to use M(ansible.builtin.file) module.
- For Windows targets, use the M(ansible.builtin.win_tempfile) module instead.
options:
state:
description:
- Whether to create file or directory.
type: str
choices: [ directory, file ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified, the default system temporary directory will be used.
type: path
prefix:
description:
- Prefix of file/directory name created by module.
type: str
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
type: str
default: ""
seealso:
- module: file
- module: win_tempfile
author:
- Krzysztof Magosa (@krzysztof-magosa)
'''
EXAMPLES = """
- name: create temporary build directory
test_tempfile:
state: directory
suffix: build
- name: create temporary file
test_tempfile:
state: file
suffix: temp
register: tempfile_1
- name: use the registered var and the file module to remove the temporary file
file:
path: "{{ tempfile_1.path }}"
state: absent
when: tempfile_1.path is defined
"""
RETURN = '''
path:
description: Path to created file or directory
returned: success
type: str
sample: "/tmp/ansible.bMlvdk"
'''
from os import close
from tempfile import mkstemp, mkdtemp
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='file', choices=['file', 'directory']),
path=dict(type='path'),
prefix=dict(type='str', default='ansible.'),
suffix=dict(type='str', default=''),
),
)
try:
if module.params['state'] == 'file':
handle, path = mkstemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
close(handle)
elif module.params['state'] == 'directory':
path = mkdtemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
module.exit_json(changed=True, path=path)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()

View File

@@ -1,2 +0,0 @@
---
dependencies: []

View File

@@ -1,76 +0,0 @@
---
- include_tasks: setup_remote_tmp_dir.yml
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m venv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
# Test graceful failure for missing kubernetes-validate
- pip:
name:
- kubernetes>=12.0.0
- coverage>=5.3
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test validate with kubernetes-validate
- pip:
name:
- kubernetes-validate==1.12.0
- kubernetes>=12.0.0
- coverage>=5.3
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-validate
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test new config getter (kubernetes==12.0.0)
- pip:
name:
- kubernetes==12.0.0
- coverage>=5.3
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- pip:
name:
- kubernetes>=12.0.0
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: new_config_getter.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-config-getter
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes

View File

@@ -1,16 +0,0 @@
---
- block:
- name: Create a namespace
k8s:
name: "{{ playbook_namespace }}"
kind: Namespace
- name: Delete namespace
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

View File

@@ -1,12 +0,0 @@
---
- name: create temporary directory
test_tempfile:
state: directory
suffix: .test
register: remote_tmp_dir
notify:
- delete temporary directory
- name: record temporary directory
set_fact:
remote_tmp_dir: "{{ remote_tmp_dir.path }}"

View File

@@ -1,126 +0,0 @@
---
- block:
- name: Create a namespace
k8s:
name: "{{ playbook_namespace }}"
kind: Namespace
- copy:
src: files
dest: "{{ remote_tmp_dir }}"
- name: incredibly simple ConfigMap
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
register: k8s_with_validate
- name: assert that k8s_with_validate succeeds
assert:
that:
- k8s_with_validate is successful
- name: extra property does not fail without strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
- name: extra property fails with strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
ignore_errors: yes
register: extra_property
- name: check that extra property fails with strict
assert:
that:
- extra_property is failed
- name: invalid type fails at validation stage
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
ignore_errors: yes
register: invalid_type
- name: check that invalid type fails
assert:
that:
- invalid_type is failed
- name: invalid type fails with warnings when fail_on_error is False
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: no
strict: no
ignore_errors: yes
register: invalid_type_no_fail
- name: check that invalid type fails
assert:
that:
- invalid_type_no_fail is failed
- name: setup custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/setup-crd.yml"
- name: wait a few seconds
pause:
seconds: 5
- name: add custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/crd-resource.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
register: unknown_kind
- name: check that unknown kind warns
assert:
that:
- unknown_kind is successful
- "'warnings' in unknown_kind"
always:
- name: remove custom resource
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: "{{ playbook_namespace }}"
state: absent
ignore_errors: yes
- name: remove custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
state: absent
- name: Delete namespace
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

View File

@@ -1,25 +0,0 @@
---
# TODO: Not available in ansible-base
# - python_requirements_info:
# dependencies:
# - openshift
# - kubernetes
# - kubernetes-validate
- k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: default
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_no_validate
- name: assert that k8s_no_validate fails gracefully
assert:
that:
- k8s_no_validate is failed
- "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"

View File

@@ -0,0 +1,3 @@
context/target
time=16
k8s

Some files were not shown because too many files have changed in this diff Show More