[backport/2.2] Move integration test suite from molecule to ansible-test (#392) (#457)

[backport/2.2] Move integration test suite from molecule to ansible-test (#392)

Move integration test suite from molecule to ansible-test
SUMMARY
molecule has been replaced with ansible-test
some test cases have been updated
k8s_apply : remove duplicated tasks increasing the running time of the test
helm: use different namespaces for different test cases in order to wait for the namespace deletion before moving to the next test.
all: remove wait: yes at the end of each test when deleting namespace, the role used to create namespace will ensure that it is deleted before if existing.
ISSUE TYPE
Feature Pull Request
COMPONENT NAME
integration testing
Reviewed-by: Mike Graves mgraves@redhat.com
Reviewed-by: Gonéri Le Bouder goneri@lebouder.net
Reviewed-by: None 
(cherry picked from commit fd61f8b)
SUMMARY


ISSUE TYPE


Bugfix Pull Request
Docs Pull Request
Feature Pull Request
New Module Pull Request

COMPONENT NAME

ADDITIONAL INFORMATION
This commit is contained in:
Mike Graves
2022-05-11 14:56:23 -04:00
committed by GitHub
parent 0d9c4d3459
commit 11c800d6ed
190 changed files with 1261 additions and 1768 deletions

View File

@@ -0,0 +1,8 @@
# slow - 11min
slow
time=313
helm_info
helm_plugin
helm_plugin_info
helm_repository
helm_template

View File

@@ -0,0 +1,27 @@
---
helm_archive_name: "helm-{{ helm_version }}-{{ ansible_system | lower }}-amd64.tar.gz"
helm_binary: "/tmp/helm/{{ ansible_system | lower }}-amd64/helm"
chart_test: "ingress-nginx"
chart_test_local_path: "nginx-ingress"
chart_test_version: 3.8.0
chart_test_version_local_path: 1.32.0
chart_test_version_upgrade: 3.9.0
chart_test_version_upgrade_local_path: 1.33.0
chart_test_repo: "https://kubernetes.github.io/ingress-nginx"
chart_test_git_repo: "http://github.com/helm/charts.git"
chart_test_values:
revisionHistoryLimit: 0
myValue: "changed"
test_namespace:
- "helm-diff"
- "helm-envvars"
- "helm-uninstall"
- "helm-not-installed"
- "helm-crd"
- "helm-url"
- "helm-repository"
- "helm-local-path-001"
- "helm-local-path-002"
- "helm-local-path-003"

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: appversionless-chart
description: A chart used in molecule tests
type: application
version: 0.2.0

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}
myOtherValue: {{ default "foo" .Values.myOtherValue }}

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: appversionless-chart
description: A chart used in molecule tests
type: application
version: 0.1.0

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}

View File

@@ -0,0 +1,11 @@
name: "sample_plugin"
version: "0.0.1"
usage: "Sample Helm Plugin"
description: |-
This plugin provides sample plugin to Helm.
usage:
This is new line
This is another line
ignoreFlags: false
useTunnel: false
command: "$HELM_PLUGIN_DIR/main.sh"

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: test-chart
description: A chart used in molecule tests
type: application
version: 0.2.0
appVersion: "default"

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}
myOtherValue: {{ default "foo" .Values.myOtherValue }}

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: test-chart
description: A chart used in molecule tests
type: application
version: 0.1.0
appVersion: "default"

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-configmap
data:
myValue: {{ default "test" .Values.myValue }}

View File

@@ -0,0 +1,5 @@
apiVersion: v2
name: test-crds
description: A chart with CRDs
type: application
version: 0.1.0

View File

@@ -0,0 +1,21 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: foos.ansible.com
spec:
group: ansible.com
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
foobar:
type: string
scope: Namespaced
names:
plural: foos
singular: foo
kind: Foo

View File

@@ -0,0 +1,2 @@
---
revisionHistoryLimit: 0

View File

@@ -0,0 +1,95 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: helm_test_version
short_description: check helm executable version
author:
- Aubin Bikouo (@abikouo)
requirements:
- "helm (https://github.com/helm/helm/releases)"
description:
- validate version of helm binary is lower than the specified version.
options:
binary_path:
description:
- The path of a helm binary to use.
required: false
type: path
version:
description:
- version to test against helm binary.
type: str
default: 3.7.0
"""
EXAMPLES = r"""
- name: validate helm binary version is lower than 3.5.0
helm_test_version:
binary_path: path/to/helm
version: "3.5.0"
"""
RETURN = r"""
message:
type: str
description: Text message describing the test result.
returned: always
sample: 'version installed: 3.4.5 is lower than version 3.5.0'
result:
type: bool
description: Test result.
returned: always
sample: 1
"""
import re
from ansible_collections.kubernetes.core.plugins.module_utils.version import (
LooseVersion,
)
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
binary_path=dict(type="path"), version=dict(type="str", default="3.7.0"),
),
)
bin_path = module.params.get("binary_path")
version = module.params.get("version")
if bin_path is not None:
helm_cmd_common = bin_path
else:
helm_cmd_common = "helm"
helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)
rc, out, err = module.run_command([helm_cmd_common, "version"])
if rc != 0:
module.fail_json(msg="helm version failed.", err=err, out=out, rc=rc)
m = re.match(r'version.BuildInfo{Version:"v([0-9\.]*)",', out)
installed_version = m.group(1)
message = "version installed: %s" % installed_version
if LooseVersion(installed_version) < LooseVersion(version):
message += " is lower than version %s" % version
module.exit_json(changed=False, result=True, message=message)
else:
message += " is greater than version %s" % version
module.exit_json(changed=False, result=False, message=message)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,5 @@
---
collections:
- kubernetes.core
dependencies:
- remove_namespace

View File

@@ -0,0 +1,11 @@
---
- name: Init Helm folders
file:
path: /tmp/helm/
state: directory
- name: Unarchive Helm binary
unarchive:
src: 'https://get.helm.sh/{{ helm_archive_name }}'
dest: /tmp/helm/
remote_src: yes

View File

@@ -0,0 +1,7 @@
---
- name: Run tests
include_tasks: run_test.yml
loop_control:
loop_var: helm_version
with_items:
- "v3.2.4"

View File

@@ -0,0 +1,45 @@
---
- name: Ensure helm is not installed
file:
path: "{{ item }}"
state: absent
with_items:
- "/tmp/helm"
- name: Check failed if helm is not installed
include_tasks: test_helm_not_installed.yml
- name: "Install {{ helm_version }}"
include_tasks: install.yml
- name: "Ensure we honor the environment variables"
include_tasks: test_read_envvars.yml
- name: tests_repository
include_tasks: tests_repository.yml
- name: Deploy charts
include_tasks: "tests_chart/{{ test_chart_type }}.yml"
loop_control:
loop_var: test_chart_type
with_items:
- from_local_path
- from_repository
- from_url
- name: Test helm plugin
include_tasks: tests_helm_plugin.yml
- name: Test helm diff
include_tasks: tests_helm_diff.yml
# https://github.com/ansible-collections/community.kubernetes/issues/296
- name: Test Skip CRDS feature in helm chart install
include_tasks: test_crds.yml
- name: Clean helm install
file:
path: "{{ item }}"
state: absent
with_items:
- "/tmp/helm/"

View File

@@ -0,0 +1,98 @@
---
- name: Test CRDs
vars:
test_chart: "test-crds"
block:
- name: Create namespace
k8s:
kind: Namespace
name: "{{ test_namespace[4] }}"
- name: Copy test chart
copy:
src: "{{ test_chart }}"
dest: "/tmp/helm_test_crds/"
- name: Install chart while skipping CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ test_namespace[4] }}"
name: test-crds
skip_crds: true
register: install
- assert:
that:
- install is changed
- install.status.name == "test-crds"
- name: Fail to create custom resource
k8s:
definition:
apiVersion: ansible.com/v1
kind: Foo
metadata:
namespace: "{{ test_namespace[4] }}"
name: test-foo
foobar: footest
ignore_errors: true
register: result
- assert:
that:
- result is failed
- "result.msg.startswith('Failed to find exact match for ansible.com/v1.Foo')"
# Helm won't install CRDs into an existing release, so we need to delete this, first
- name: Uninstall chart
helm:
binary_path: "{{ helm_binary }}"
namespace: "{{ test_namespace[4] }}"
name: test-crds
state: absent
- name: Install chart with CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ test_namespace[4] }}"
name: test-crds
- name: Create custom resource
k8s:
definition:
apiVersion: ansible.com/v1
kind: Foo
metadata:
namespace: "{{ test_namespace[4] }}"
name: test-foo
foobar: footest
register: result
- assert:
that:
- result is changed
- result.result.foobar == "footest"
always:
- name: Remove chart
file:
path: "/tmp/helm_test_crds"
state: absent
ignore_errors: true
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ test_namespace[4] }}"
state: absent
ignore_errors: true
# CRDs aren't deleted with a namespace, so we need to manually delete it
- name: Remove CRD
k8s:
kind: CustomResourceDefinition
name: foos.ansible.com
state: absent
ignore_errors: true

View File

@@ -0,0 +1,15 @@
---
- name: Failed test when helm is not installed
helm:
binary_path: "{{ helm_binary}}_fake"
name: test
chart_ref: "{{ chart_test }}"
namespace: "{{ test_namespace[3] }}"
ignore_errors: yes
register: helm_missing_binary
- name: Assert that helm is not installed
assert:
that:
- helm_missing_binary is failed
- "'No such file or directory' in helm_missing_binary.msg"

View File

@@ -0,0 +1,10 @@
- name: Pass a bogus server through the K8S_AUTH_HOST environment variable and ensure helm fails as expected
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: does-not-exist
namespace: "{{ test_namespace[1] }}"
environment:
K8S_AUTH_HOST: somewhere
register: _helm_result
failed_when: '"http://somewhere/version" not in _helm_result.stderr'

View File

@@ -0,0 +1,384 @@
---
- name: Chart tests
vars:
chart_release_name: "test-{{ chart_name | default(source) }}"
chart_release_replaced_name: "test-{{ chart_name | default(source) }}-001"
block:
- name: Create temp directory
tempfile:
state: directory
register: tmpdir
- name: Set temp directory fact
set_fact:
temp_dir: "{{ tmpdir.path }}"
- name: Check helm_info empty
helm_info:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
namespace: "{{ helm_namespace }}"
register: empty_info
- name: "Assert that no charts are installed with helm_info"
assert:
that:
- empty_info.status is undefined
- name: "Install fail {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
ignore_errors: yes
register: install_fail
- name: "Assert that Install fail {{ chart_test }} from {{ source }}"
assert:
that:
- install_fail is failed
- "'Error: create: failed to create: namespaces \"' + helm_namespace + '\" not found' in install_fail.stderr"
- name: "Install {{ chart_test }} from {{ source }} in check mode"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
register: install_check_mode
check_mode: true
- name: "Assert that {{ chart_test }} chart is installed from {{ source }} in check mode"
assert:
that:
- install_check_mode is changed
- install_check_mode.status is defined
- install_check_mode.status.values is defined
- name: "Install {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status.status | lower == 'deployed'
- name: Check helm_info content
helm_info:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
namespace: "{{ helm_namespace }}"
register: content_info
- name: "Assert that {{ chart_test }} is installed from {{ source }} with helm_info"
assert:
that:
- content_info.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- content_info.status.status | lower == 'deployed'
- name: Check idempotency
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency
assert:
that:
- install is not changed
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status.status | lower == 'deployed'
- name: "Add vars to {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values: "{{ chart_test_values }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: Check idempotency after adding vars
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values: "{{ chart_test_values }}"
register: install
- name: Assert idempotency after add vars
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: "Remove Vars to {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status['values'] == {}
- name: Check idempotency after removing vars
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency after removing vars
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- install.status['values'] == {}
- name: "Upgrade {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is upgraded with new version from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
- name: Check idempotency after upgrade
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source_upgrade | default(chart_source) }}"
chart_version: "{{ chart_source_version_upgrade | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency after upgrade
assert:
that:
- install is not changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version_upgrade }}"
- name: "Remove {{ chart_test }} from {{ source }}"
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: "{{ chart_release_name }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is removed from {{ source }}"
assert:
that:
- install is changed
- name: Check idempotency after remove
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: "{{ chart_release_name }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Assert idempotency
assert:
that:
- install is not changed
# Test --replace
- name: Install chart for replace option
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_replaced_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- name: "Remove {{ chart_release_replaced_name }} with --purge"
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: "{{ chart_release_replaced_name }}"
purge: False
namespace: "{{ helm_namespace }}"
register: install
- name: Check if chart is removed
assert:
that:
- install is changed
- name: "Install chart again with same name {{ chart_release_replaced_name }}"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_replaced_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
replace: True
register: install
- name: "Assert that {{ chart_test }} chart is installed from {{ source }}"
assert:
that:
- install is changed
- name: Remove {{ chart_test }} (cleanup)
helm:
binary_path: "{{ helm_binary }}"
state: absent
name: "{{ chart_release_replaced_name }}"
namespace: "{{ helm_namespace }}"
register: install
- name: Check if chart is removed
assert:
that:
- install is changed
- name: "Install {{ chart_test }} from {{ source }} with values_files"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: install
- name: "Assert that {{ chart_test }} chart has var from {{ source }}"
assert:
that:
- install is changed
- install.status.status | lower == 'deployed'
- install.status.chart == "{{ chart_test }}-{{ chart_test_version }}"
- "install.status['values'].revisionHistoryLimit == 0"
- name: "Install {{ chart_test }} from {{ source }} with values_files (again)"
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: install
- name: "Assert the result is consistent"
assert:
that:
- not (install is changed)
- name: Render templates
helm_template:
binary_path: "{{ helm_binary }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
output_dir: "{{ temp_dir }}"
values_files:
- "{{ role_path }}/files/values.yaml"
register: result
- assert:
that:
- result is changed
- result is not failed
- result.rc == 0
- result.command is match("{{ helm_binary }} template {{ chart_source }}")
- name: Check templates created
stat:
path: "{{ temp_dir }}/{{ chart_test }}/templates"
register: result
- assert:
that:
result.stat.exists
- name: Release using non-existent context
helm:
binary_path: "{{ helm_binary }}"
name: "{{ chart_release_name }}"
chart_ref: "{{ chart_source }}"
chart_version: "{{ chart_source_version | default(omit) }}"
namespace: "{{ helm_namespace }}"
create_namespace: true
context: does-not-exist
ignore_errors: yes
register: result
- name: Assert that release fails with non-existent context
assert:
that:
- result is failed
- "'context \"does-not-exist\" does not exist' in result.stderr"
always:
- name: Clean up temp dir
file:
state: absent
path: "{{ temp_dir }}"
ignore_errors: true
- name: Remove helm namespace
k8s:
api_version: v1
kind: Namespace
name: "{{ helm_namespace }}"
state: absent

View File

@@ -0,0 +1,111 @@
---
- name: Git clone stable repo
git:
repo: "{{ chart_test_git_repo }}"
dest: /tmp/helm_test_repo
version: 631eb8413f6728962439488f48d7d6fbb954a6db
depth: 1
- name: Git clone stable repo upgrade
git:
repo: "{{ chart_test_git_repo }}"
dest: /tmp/helm_test_repo_upgrade
version: d37b5025ffc8be49699898369fbb59661e2a8ffb
depth: 1
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_test: "{{ chart_test_local_path }}"
chart_source: "/tmp/helm_test_repo/stable/{{ chart_test_local_path }}/"
chart_source_upgrade: "/tmp/helm_test_repo_upgrade/stable/{{ chart_test_local_path }}/"
chart_test_version: "{{ chart_test_version_local_path }}"
chart_test_version_upgrade: "{{ chart_test_version_upgrade_local_path }}"
chart_name: "local-path-001"
helm_namespace: "{{ test_namespace[7] }}"
- name: Test appVersion idempotence
vars:
chart_test: "test-chart"
chart_test_upgrade: "test-chart-v2"
chart_test_version: "0.1.0"
chart_test_version_upgrade: "0.2.0"
chart_test_app_version: "v1"
chart_test_upgrade_app_version: "v2"
block:
- name: Copy test chart
copy:
src: "{{ chart_test }}"
dest: "/tmp/helm_test_appversion/test-chart/"
- name: Copy test chart v2
copy:
src: "{{ chart_test_upgrade }}"
dest: "/tmp/helm_test_appversion/test-chart/"
# create package with appVersion v1
- name: "Package chart into archive with appVersion {{ chart_test_app_version }}"
command: "{{ helm_binary }} package --app-version {{ chart_test_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test }}"
- name: "Move appVersion {{ chart_test_app_version }} chart archive"
copy:
remote_src: true
src: "test-chart-{{ chart_test_version }}.tgz"
dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
# create package with appVersion v2
- name: "Package chart into archive with appVersion {{ chart_test_upgrade_app_version }}"
command: "{{ helm_binary }} package --app-version {{ chart_test_upgrade_app_version }} /tmp/helm_test_appversion/test-chart/{{ chart_test_upgrade }}"
- name: "Move appVersion {{ chart_test_upgrade_app_version }} chart archive"
copy:
remote_src: true
src: "test-chart-{{ chart_test_version_upgrade }}.tgz"
dest: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version_upgrade }}.tgz"
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_source: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_app_version }}-{{ chart_test_version }}.tgz"
chart_source_upgrade: "/tmp/helm_test_appversion/test-chart/{{ chart_test }}-{{ chart_test_upgrade_app_version }}-{{ chart_test_version_upgrade }}.tgz"
chart_name: "local-path-002"
helm_namespace: "{{ test_namespace[8] }}"
- name: Test appVersion handling when null
vars:
chart_test: "appversionless-chart"
chart_test_upgrade: "appversionless-chart-v2"
chart_test_version: "0.1.0"
chart_test_version_upgrade: "0.2.0"
block:
- name: Copy test chart
copy:
src: "{{ chart_test }}"
dest: "/tmp/helm_test_appversion/test-null/"
- name: Copy test chart v2
copy:
src: "{{ chart_test_upgrade }}"
dest: "/tmp/helm_test_appversion/test-null/"
# create package with appVersion v1
- name: "Package chart into archive with appVersion v1"
command: "{{ helm_binary }} package --app-version v1 /tmp/helm_test_appversion/test-null/{{ chart_test_upgrade }}"
- name: Install Chart from local path
include_tasks: "../tests_chart.yml"
vars:
source: local_path
chart_source: "/tmp/helm_test_appversion/test-null/{{ chart_test }}/"
chart_source_upgrade: "{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz"
chart_name: "local-path-003"
helm_namespace: "{{ test_namespace[9] }}"
- name: Remove clone repos
file:
path: "{{ item }}"
state: absent
with_items:
- /tmp/helm_test_repo
- /tmp/helm_test_repo_upgrade
- /tmp/helm_test_appversion

View File

@@ -0,0 +1,22 @@
---
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
- name: Install Chart from repository
include_tasks: "../tests_chart.yml"
vars:
source: repository
chart_source: "test_helm/{{ chart_test }}"
chart_source_version: "{{ chart_test_version }}"
chart_source_version_upgrade: "{{ chart_test_version_upgrade }}"
helm_namespace: "{{ test_namespace[6] }}"
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
state: absent

View File

@@ -0,0 +1,8 @@
---
- name: Install Chart from URL
include_tasks: "../tests_chart.yml"
vars:
source: url
chart_source: "https://github.com/kubernetes/ingress-nginx/releases/download/{{ chart_test }}-{{ chart_test_version }}/{{ chart_test }}-{{ chart_test_version }}.tgz"
chart_source_upgrade: "https://github.com/kubernetes/ingress-nginx/releases/download/{{ chart_test }}-{{ chart_test_version_upgrade }}/{{ chart_test }}-{{ chart_test_version_upgrade }}.tgz"
helm_namespace: "{{ test_namespace[5] }}"

View File

@@ -0,0 +1,154 @@
---
- name: Test helm diff functionality
vars:
test_chart_ref: "/tmp/test-chart"
block:
- set_fact:
helm_namespace: "{{ test_namespace[0] }}"
- name: Install helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/databus23/helm-diff
- name: Copy test chart
copy:
src: "test-chart/"
dest: "{{ test_chart_ref }}"
- name: Install local chart
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
create_namespace: yes
register: install
- assert:
that:
- install is changed
- name: Modify local chart
blockinfile:
create: yes
path: "{{ test_chart_ref }}/templates/anothermap.yaml"
block: !unsafe |
apiVersion: v1
kind: ConfigMap
metadata:
name: test-chart-another-configmap
data:
foo: {{ .Values.foo | default "bar" }}
- name: Upgrade local chart with modifications
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
register: install
- assert:
that:
- install is changed
- name: Upgrade modified local chart idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
register: install
- assert:
that:
- install is not changed
- name: Modify values
blockinfile:
create: yes
path: "{{ test_chart_ref }}/values.yml"
block: |
---
foo: baz
- name: Upgrade with values file
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values_files:
- "{{ test_chart_ref }}/values.yml"
register: install
- assert:
that:
- install is changed
- name: Upgrade with values file idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values_files:
- "{{ test_chart_ref }}/values.yml"
register: install
- assert:
that:
- install is not changed
- name: Upgrade with values
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values:
foo: gaz
register: install
- assert:
that:
- install is changed
- name: Upgrade with values idempotency check
helm:
binary_path: "{{ helm_binary }}"
name: test-chart
namespace: "{{ helm_namespace }}"
chart_ref: "{{ test_chart_ref }}"
values:
foo: gaz
register: install
- assert:
that:
- install is not changed
always:
- name: Remove chart directory
file:
path: "{{ test_chart_ref }}"
state: absent
ignore_errors: yes
- name: Uninstall helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: diff
ignore_errors: yes
- name: Remove helm namespace
k8s:
api_version: v1
kind: Namespace
name: "{{ helm_namespace }}"
state: absent
ignore_errors: yes

View File

@@ -0,0 +1,119 @@
---
- name: Install env plugin in check mode
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: check_install_env
check_mode: true
- assert:
that:
- check_install_env.changed
- name: Install env plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: install_env
- assert:
that:
- install_env.changed
- name: Gather info about all plugin
helm_plugin_info:
binary_path: "{{ helm_binary }}"
register: plugin_info
- assert:
that:
- plugin_info.plugin_list is defined
- name: Install env plugin again
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/adamreese/helm-env
register: install_env
- assert:
that:
- not install_env.changed
- name: Uninstall env plugin in check mode
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: check_uninstall_env
check_mode: true
- assert:
that:
- check_uninstall_env.changed
- name: Uninstall env plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: uninstall_env
- assert:
that:
- uninstall_env.changed
- name: Uninstall env plugin again
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: env
register: uninstall_env
- assert:
that:
- not uninstall_env.changed
# https://github.com/ansible-collections/community.kubernetes/issues/399
- block:
- name: Copy required plugin files
copy:
src: "files/sample_plugin"
dest: "/tmp/helm_plugin_test/"
- name: Install sample_plugin from the directory
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: "/tmp/helm_plugin_test/sample_plugin"
register: sample_plugin_output
- name: Assert that sample_plugin is installed or not
assert:
that:
- sample_plugin_output.changed
- name: Gather Helm plugin info
helm_plugin_info:
binary_path: "{{ helm_binary }}"
register: r
- name: Set sample_plugin version
set_fact:
plugin_version: "{{ ( r.plugin_list | selectattr('name', 'equalto', plugin_name) | list )[0].version }}"
vars:
plugin_name: "sample_plugin"
- name: Assert if sample_plugin with multiline comment is installed
assert:
that:
- plugin_version == "0.0.1"
always:
- name: Uninstall sample_plugin
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: sample_plugin
ignore_errors: yes

View File

@@ -0,0 +1,67 @@
---
- name: "Ensure test_helm_repo doesn't exist"
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
- name: Add test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
- name: Assert that test_helm_repo repository is added
assert:
that:
- repository is changed
- name: Check idempotency
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
- name: Assert idempotency
assert:
that:
- repository is not changed
- name: Failed to add repository with the same name
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "https://other-charts.url"
register: repository_errors
ignore_errors: yes
- name: Assert that adding repository with the same name failed
assert:
that:
- repository_errors is failed
- name: Remove test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository
- name: Assert that test_helm_repo repository is removed
assert:
that:
- repository is changed
- name: Check idempotency after remove
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository
- name: Assert idempotency
assert:
that:
- repository is not changed

View File

@@ -0,0 +1,3 @@
context/target
time=42
k8s

View File

@@ -0,0 +1,102 @@
---
- name: Converge
hosts: localhost
connection: local
collections:
- kubernetes.core
vars_files:
- vars/main.yml
tasks:
- name: Delete existing namespace
k8s:
api_version: v1
kind: Namespace
name: inventory
wait: yes
state: absent
- name: Ensure namespace exists
k8s:
api_version: v1
kind: Namespace
name: inventory
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: inventory
namespace: inventory
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 400
vars:
k8s_pod_name: inventory
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
- meta: refresh_inventory
- name: Verify inventory and connection plugins
hosts: namespace_inventory_pods
gather_facts: no
vars:
file_content: |
Hello world
tasks:
- name: End play if host not running (TODO should we not add these to the inventory?)
meta: end_host
when: pod_phase != "Running"
- debug: var=hostvars
- setup:
- debug: var=ansible_facts
- name: Assert the TEST environment variable was retrieved
assert:
that: ansible_facts.env.TEST == 'test'
- name: Copy a file into the host
copy:
content: '{{ file_content }}'
dest: /tmp/test_file
- name: Retrieve the file from the host
slurp:
src: /tmp/test_file
register: slurped_file
- name: Assert the file content matches expectations
assert:
that: (slurped_file.content|b64decode) == file_content
- name: Delete inventory namespace
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Remove inventory namespace
k8s:
api_version: v1
kind: Namespace
name: inventory
state: absent

View File

@@ -0,0 +1,2 @@
---
plugin: kubernetes.core.k8s

View File

@@ -0,0 +1,38 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"

View File

@@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -eux
export ANSIBLE_INVENTORY_ENABLED=kubernetes.core.k8s,yaml
export ANSIBLE_PYTHON_INTERPRETER=auto_silent
ansible-playbook playbooks/play.yml -i playbooks/test.inventory_k8s.yml "$@"

View File

@@ -0,0 +1,2 @@
time=7
k8s

View File

@@ -0,0 +1,22 @@
---
- name: Create a SelfSubjectAccessReview resource
register: can_i_create_namespaces
ignore_errors: yes
k8s:
state: present
definition:
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectAccessReview
spec:
resourceAttributes:
group: v1
resource: Namespace
verb: create
- name: Assert that the SelfSubjectAccessReview request succeded
assert:
that:
- can_i_create_namespaces is successful
- can_i_create_namespaces.result.status is defined
- can_i_create_namespaces.result.status.allowed is defined
- can_i_create_namespaces.result.status.allowed

View File

@@ -0,0 +1,2 @@
time=14
k8s

View File

@@ -0,0 +1,2 @@
---
test_namespace: "append-hash"

View File

@@ -0,0 +1,2 @@
dependencies:
- setup_namespace

View File

@@ -0,0 +1,69 @@
---
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: "{{ test_namespace }}"
- name: Create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: "{{ test_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: Check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: Recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: Check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: Add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ test_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: Check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: Ensure that namespace is removed
k8s:
kind: Namespace
name: "{{ test_namespace }}"
state: absent

View File

@@ -0,0 +1,5 @@
# duration 9min
slow
k8s_service
k8s
time=192

View File

@@ -0,0 +1,42 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
test_namespace: "apply"
k8s_wait_timeout: 240

View File

@@ -0,0 +1,2 @@
dependencies:
- setup_namespace

View File

@@ -0,0 +1,593 @@
---
- block:
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ test_namespace }}"
- name: Add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ test_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: Check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: Add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ test_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: Check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: Add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ test_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: Check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: Add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ test_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: Check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: Update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ test_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: Ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: Add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service
- name: Add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service_2
- name: Check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: Add exactly same service in check mode
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
apply: yes
register: k8s_service_3
check_mode: yes
- name: Check nothing changed
assert:
that:
- k8s_service_3 is not changed
- name: Change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 1
- k8s_service_4.result.spec.ports[0].port == 8081
- name: Insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: Remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
check_mode: yes
register: k8s_service_check
- name: Check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: Remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ test_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_5
- name: Check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
- name: Add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
- name: Update the earlier deployment in check mode
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
check_mode: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: update_deploy_check_mode
- name: Ensure check mode change took
assert:
that:
- update_deploy_check_mode is changed
- "update_deploy_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
- name: Update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: update_deploy_for_real
- name: Ensure change took
assert:
that:
- update_deploy_for_real is changed
- "update_deploy_for_real.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'"
- name: Remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
- name: Apply deployment after service account removed
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ test_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
k8s_pod_resources:
requests:
cpu: 50m
limits:
cpu: 50m
memory: 50Mi
register: deploy_after_serviceaccount_removal
ignore_errors: yes
- name: Ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed
- name: Add a secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ test_namespace }}"
type: Opaque
stringData:
foo: bar
register: k8s_secret
- name: Check secret was created
assert:
that:
- k8s_secret is changed
- k8s_secret.result.data.foo
- name: Add same secret
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ test_namespace }}"
type: Opaque
stringData:
foo: bar
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Add same secret with check mode on
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ test_namespace }}"
type: Opaque
stringData:
foo: bar
check_mode: yes
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Add same secret with check mode on using data
k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: apply-secret
namespace: "{{ test_namespace }}"
type: Opaque
data:
foo: YmFy
check_mode: yes
register: k8s_secret
- name: Check nothing changed
assert:
that:
- k8s_secret is not changed
- name: Create network policy (egress array with empty dict)
k8s:
namespace: "{{ test_namespace }}"
apply: true
definition:
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: apply-netpolicy
labels:
app: apply-netpolicy
annotations:
{}
spec:
podSelector:
matchLabels:
app: apply-netpolicy
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- port: 9093
protocol: TCP
egress:
- {}
- name: Apply network policy
k8s:
namespace: "{{ test_namespace }}"
definition:
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: apply-netpolicy
labels:
app: apply-netpolicy
annotations:
{}
spec:
podSelector:
matchLabels:
app: apply-netpolicy
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- port: 9093
protocol: TCP
egress:
- {}
apply: true
register: k8s_networkpolicy
- name: Check that nothing changed
assert:
that:
- k8s_networkpolicy is not changed
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ test_namespace }}"
state: absent

View File

@@ -0,0 +1,2 @@
k8s_cluster_info
time=9

View File

@@ -0,0 +1,24 @@
---
- name: Get Information about All APIs
k8s_cluster_info:
register: api_details
- name: Print all APIs for debugging
debug:
msg: "{{ api_details.apis }}"
- name: Get core API version
set_fact:
crd: "{{ api_details.apis['apiextensions.k8s.io/v1'] }}"
host: "{{ api_details.connection['host'] }}"
client_version: "{{ api_details.version['client'] }}"
- name: Check if all APIs are present
assert:
that:
- api_details.apis is defined
- api_details.apis.v1.Secret is defined
- api_details.apis.v1.Service is defined
- crd is defined
- host is defined
- client_version is defined

View File

@@ -0,0 +1,4 @@
k8s_exec
k8s_cp
k8s
time=101

View File

@@ -0,0 +1,15 @@
---
# defaults file for k8copy
test_namespace: copy
pod_with_one_container:
name: pod-copy-0
container: container-00
pod_with_two_container:
name: pod-copy-1
container:
- container-10
- container-11
kubectl_path: /tmp/kubectl

Binary file not shown.

View File

@@ -0,0 +1 @@
kubernetes.core

View File

@@ -0,0 +1 @@
k8s_cp

View File

@@ -0,0 +1 @@
This is a simple file used to test k8s_cp module on ansible.

View File

@@ -0,0 +1,2 @@
cloud team
content team

View File

@@ -0,0 +1 @@
This content will be copied into remote Pod.

View File

@@ -0,0 +1,93 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Aubin Bikouo <@abikouo>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: k8s_create_file
short_description: Create large file with a defined size.
author:
- Aubin Bikouo (@abikouo)
description:
- This module is used to validate k8s_cp module.
options:
path:
description:
- The destination path for the file to create.
type: path
required: yes
size:
description:
- The size of the output file in MB.
type: int
default: 400
binary:
description:
- If this flag is set to yes, the generated file content binary data.
type: bool
default: False
"""
EXAMPLES = r"""
- name: create 150MB file
k8s_diff:
path: large_file.txt
size: 150
"""
RETURN = r"""
"""
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def execute_module(module):
try:
size = module.params.get("size") * 1024 * 1024
path = module.params.get("path")
write_mode = "w"
if module.params.get("binary"):
content = os.urandom(size)
write_mode = "wb"
else:
content = ""
count = 0
while len(content) < size:
content += "This file has been generated using ansible: {0}\n".format(
count
)
count += 1
with open(path, write_mode) as f:
f.write(content)
module.exit_json(changed=True, size=len(content))
except Exception as e:
module.fail_json(msg="failed to create file due to: {0}".format(to_native(e)))
def main():
argument_spec = {}
argument_spec["size"] = {"type": "int", "default": 400}
argument_spec["path"] = {"type": "path", "required": True}
argument_spec["binary"] = {"type": "bool", "default": False}
module = AnsibleModule(argument_spec=argument_spec)
execute_module(module)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,247 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Aubin Bikouo <@abikouo>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: kubectl_file_compare
short_description: Compare file and directory using kubectl
author:
- Aubin Bikouo (@abikouo)
description:
- This module is used to validate k8s_cp module.
- Compare the local file/directory with the remote pod version
notes:
- This module authenticates on kubernetes cluster using default kubeconfig only.
options:
namespace:
description:
- The pod namespace name
type: str
required: yes
pod:
description:
- The pod name
type: str
required: yes
container:
description:
- The container to retrieve files from.
type: str
remote_path:
description:
- Path of the file or directory on Pod.
type: path
required: yes
local_path:
description:
- Path of the local file or directory.
type: path
content:
description:
- local content to compare with remote file from pod.
- mutually exclusive with option I(local_path).
type: path
required: yes
args:
description:
- The file is considered to be an executable.
- The tool will be run locally and on pod and compare result from output and stderr.
type: list
kubectl_path:
description:
- Path to the kubectl executable, if not specified it will be download.
type: path
"""
EXAMPLES = r"""
- name: compare local /tmp/foo with /tmp/bar in a remote pod
kubectl_file_compare:
namespace: some-namespace
pod: some-pod
remote_path: /tmp/bar
local_path: /tmp/foo
kubectl_path: /tmp/test/kubectl
- name: Compare executable running help command
kubectl_file_compare:
namespace: some-namespace
pod: some-pod
remote_path: /tmp/test/kubectl
local_path: kubectl
kubectl_path: /tmp/test/kubectl
args:
- "--help"
"""
RETURN = r"""
"""
import os
import filecmp
from tempfile import NamedTemporaryFile, TemporaryDirectory
from ansible.module_utils.basic import AnsibleModule
def kubectl_get_content(module, dest_dir):
kubectl_path = module.params.get("kubectl_path")
if kubectl_path is None:
kubectl_path = module.get_bin_path("kubectl", required=True)
namespace = module.params.get("namespace")
pod = module.params.get("pod")
file = module.params.get("remote_path")
cmd = [kubectl_path, "cp", "{0}/{1}:{2}".format(namespace, pod, file)]
container = module.params.get("container")
if container:
cmd += ["-c", container]
local_file = os.path.join(
dest_dir, os.path.basename(module.params.get("remote_path"))
)
cmd.append(local_file)
rc, out, err = module.run_command(cmd)
return local_file, err, rc, out
def kubectl_run_from_pod(module):
kubectl_path = module.params.get("kubectl_path")
if kubectl_path is None:
kubectl_path = module.get_bin_path("kubectl", required=True)
cmd = [
kubectl_path,
"exec",
module.params.get("pod"),
"-n",
module.params.get("namespace"),
]
container = module.params.get("container")
if container:
cmd += ["-c", container]
cmd += ["--", module.params.get("remote_path")]
cmd += module.params.get("args")
return module.run_command(cmd)
def compare_directories(dir1, dir2):
test = filecmp.dircmp(dir1, dir2)
if any(
[len(test.left_only) > 0, len(test.right_only) > 0, len(test.funny_files) > 0]
):
return False
(t, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, test.common_files, shallow=False
)
if len(mismatch) > 0 or len(errors) > 0:
return False
for common_dir in test.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not compare_directories(new_dir1, new_dir2):
return False
return True
def execute_module(module):
args = module.params.get("args")
local_path = module.params.get("local_path")
namespace = module.params.get("namespace")
pod = module.params.get("pod")
file = module.params.get("remote_path")
content = module.params.get("content")
if args:
pod_rc, pod_out, pod_err = kubectl_run_from_pod(module)
rc, out, err = module.run_command([module.params.get("local_path")] + args)
if rc == pod_rc and out == pod_out:
module.exit_json(
msg="{0} and {1}/{2}:{3} are same.".format(
local_path, namespace, pod, file
),
rc=rc,
stderr=err,
stdout=out,
)
result = dict(
local=dict(rc=rc, out=out, err=err),
remote=dict(rc=pod_rc, out=pod_out, err=pod_err),
)
module.fail_json(
msg=f"{local_path} and {namespace}/{pod}:{file} are same.", **result
)
else:
with TemporaryDirectory() as tmpdirname:
file_from_pod, err, rc, out = kubectl_get_content(
module=module, dest_dir=tmpdirname
)
if not os.path.exists(file_from_pod):
module.fail_json(
msg="failed to copy content from pod", error=err, output=out
)
if content is not None:
with NamedTemporaryFile(mode="w") as tmp_file:
tmp_file.write(content)
tmp_file.flush()
if filecmp.cmp(file_from_pod, tmp_file.name):
module.exit_json(
msg=f"defined content and {namespace}/{pod}:{file} are same."
)
module.fail_json(
msg=f"defined content and {namespace}/{pod}:{file} are same."
)
if os.path.isfile(local_path):
if filecmp.cmp(file_from_pod, local_path):
module.exit_json(
msg=f"{local_path} and {namespace}/{pod}:{file} are same."
)
module.fail_json(
msg=f"{local_path} and {namespace}/{pod}:{file} are same."
)
if os.path.isdir(local_path):
if compare_directories(file_from_pod, local_path):
module.exit_json(
msg=f"{local_path} and {namespace}/{pod}:{file} are same."
)
module.fail_json(
msg=f"{local_path} and {namespace}/{pod}:{file} are same."
)
def main():
argument_spec = {}
argument_spec["namespace"] = {"type": "str", "required": True}
argument_spec["pod"] = {"type": "str", "required": True}
argument_spec["container"] = {}
argument_spec["remote_path"] = {"type": "path", "required": True}
argument_spec["local_path"] = {"type": "path"}
argument_spec["content"] = {"type": "str"}
argument_spec["kubectl_path"] = {"type": "path"}
argument_spec["args"] = {"type": "list"}
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[("local_path", "content")],
required_one_of=[["local_path", "content"]],
)
execute_module(module)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,5 @@
---
collections:
- kubernetes.core
dependencies:
- setup_namespace

View File

@@ -0,0 +1,49 @@
---
- set_fact:
copy_namespace: "{{ test_namespace }}"
- block:
- name: Download kubeclt executable used to compare results
get_url:
url: https://dl.k8s.io/release/v1.21.3/bin/linux/amd64/kubectl
dest: "{{ kubectl_path }}"
- name: make kubectl executable
ansible.builtin.file:
path: "{{ kubectl_path }}"
mode: "+x"
# Ensure namespace and create pod to perform tests on
- name: Ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ copy_namespace }}"
- name: Create Pods
k8s:
namespace: '{{ copy_namespace }}'
wait: yes
template: pods_definition.j2
- include_tasks: test_copy_errors.yml
- include_tasks: test_copy_file.yml
- include_tasks: test_multi_container_pod.yml
- include_tasks: test_copy_directory.yml
- include_tasks: test_copy_large_file.yml
always:
- name: Remove kubectl executable
ansible.builtin.file:
path: "{{ kubectl_path }}"
state: absent
ignore_errors: true
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ copy_namespace }}"
state: absent
ignore_errors: true

View File

@@ -0,0 +1,85 @@
---
- block:
- name: copy directory into remote Pod (create new directory)
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /dest_data
local_path: files/data
state: to_pod
- name: compare directories
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /dest_data
local_path: '{{ role_path }}/files/data'
kubectl_path: "{{ kubectl_path }}"
- name: copy directory into remote Pod (existing directory)
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp
local_path: files/data
state: to_pod
- name: compare directories
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/data
local_path: '{{ role_path }}/files/data'
kubectl_path: "{{ kubectl_path }}"
- name: copy directory from Pod into local filesystem (new directory to create)
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/data
local_path: /tmp/test
state: from_pod
- name: compare directories
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/data
local_path: /tmp/test
kubectl_path: "{{ kubectl_path }}"
- name: copy directory from Pod into local filesystem (existing directory)
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/data
local_path: /tmp
state: from_pod
- name: compare directories
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/data
local_path: /tmp/data
kubectl_path: "{{ kubectl_path }}"
always:
- name: Remove directories created into remote Pod
k8s_exec:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
command: 'rm -rf {{ item }}'
ignore_errors: true
with_items:
- /dest_data
- /tmp/data
- name: Remove local directories
file:
path: '{{ item }}'
state: absent
ignore_errors: true
with_items:
- /tmp/data
- /tmp/test

View File

@@ -0,0 +1,69 @@
---
# copy non-existent local file should fail
- name: copy non-existent file into remote Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp
local_path: this_file_does_not_exist
state: to_pod
ignore_errors: true
register: copy_non_existent
- name: check that error message is as expected
assert:
that:
- copy_non_existent is failed
- copy_non_existent.msg == "this_file_does_not_exist does not exist in local filesystem"
# copy non-existent pod file should fail
- name: copy of non-existent file from remote pod should fail
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /this_file_does_not_exist
local_path: /tmp
state: from_pod
ignore_errors: true
register: copy_non_existent
- name: check that error message is as expected
assert:
that:
- copy_non_existent is failed
- copy_non_existent.msg == "/this_file_does_not_exist does not exist in remote pod filesystem"
# copy file into multiple container pod without specifying the container should fail
- name: copy file into multiple container pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /tmp
local_path: files/simple_file.txt
state: to_pod
ignore_errors: true
register: copy_multi_container
- name: check that error message is as expected
assert:
that:
- copy_multi_container is failed
- copy_multi_container.msg == "Pod contains more than 1 container, option 'container' should be set"
# copy using non-existent container from pod should failed
- name: copy file into multiple container pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /tmp
local_path: files/simple_file.txt
state: to_pod
container: this_is_a_fake_container
ignore_errors: true
register: copy_fake_container
- name: check that error message is as expected
assert:
that:
- copy_fake_container is failed
- copy_fake_container.msg == "Pod has no container this_is_a_fake_container"

View File

@@ -0,0 +1,206 @@
---
- block:
# Text file
- name: copy text file into remote pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp
local_path: files/simple_file.txt
state: to_pod
- name: Compare files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_file.txt
content: "{{ lookup('file', 'simple_file.txt')}}"
kubectl_path: "{{ kubectl_path }}"
- name: Copy simple text file from Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_file.txt
local_path: /tmp/copy_from_pod.txt
state: from_pod
- name: Compare files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_file.txt
local_path: /tmp/copy_from_pod.txt
kubectl_path: "{{ kubectl_path }}"
# Binary file
- name: Create temp binary file
tempfile:
state: file
register: binfile
- name: Generate random binary content
command: dd if=/dev/urandom of={{ binfile.path }} bs=1M count=1
- name: Copy executable into Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/hello.exe
local_path: "{{ binfile.path }}"
state: to_pod
- name: Get remote hash
kubernetes.core.k8s_exec:
namespace: "{{ copy_namespace }}"
pod: "{{ pod_with_one_container.name }}"
command: sha256sum -b /tmp/hello.exe
register: remote_hash
- name: Get local hash
command: sha256sum -b {{ binfile.path }}
register: local_hash
- assert:
that:
- remote_hash.stdout.split()[0] == local_hash.stdout.split()[0]
- name: Generate tempfile
tempfile:
state: file
register: binfile
- name: Copy executable from Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/hello.exe
local_path: "{{ binfile.path }}"
state: from_pod
- name: Get remote hash
kubernetes.core.k8s_exec:
namespace: "{{ copy_namespace }}"
pod: "{{ pod_with_one_container.name }}"
command: sha256sum -b /tmp/hello.exe
register: remote_hash
- name: Get local hash
command: sha256sum -b {{ binfile.path }}
register: local_hash
- assert:
that:
- remote_hash.stdout.split()[0] == local_hash.stdout.split()[0]
# zip files
- name: copy zip file into remote pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp
local_path: files/simple_zip_file.txt.gz
state: to_pod
- name: compare zip files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_zip_file.txt.gz
local_path: '{{ role_path }}/files/simple_zip_file.txt.gz'
kubectl_path: "{{ kubectl_path }}"
- name: copy zip file from pod into local filesystem
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_zip_file.txt.gz
local_path: /tmp/copied_from_pod.txt.gz
state: from_pod
- name: compare zip files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/simple_zip_file.txt.gz
local_path: /tmp/copied_from_pod.txt.gz
kubectl_path: "{{ kubectl_path }}"
# tar files
- name: copy archive into remote pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp
local_path: files/archive.tar
state: to_pod
- name: compare archive
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/archive.tar
local_path: '{{ role_path }}/files/archive.tar'
kubectl_path: "{{ kubectl_path }}"
- name: copy archive from remote pod into local filesystem
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/archive.tar
local_path: /tmp/local_archive.tar
state: from_pod
- name: compare archive
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /tmp/archive.tar
local_path: /tmp/local_archive.tar
kubectl_path: "{{ kubectl_path }}"
# Copy into Pod using content option
- name: set content to be copied into Pod
set_fact:
pod_content: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=128') }}"
- name: copy archive into remote pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /this_content.txt
content: '{{ pod_content }}'
state: to_pod
- name: Assert that content is as expected into Pod
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /this_content.txt
content: '{{ pod_content }}'
kubectl_path: "{{ kubectl_path }}"
always:
- name: Delete file created on Pod
k8s_exec:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
command: 'rm {{ item }}'
ignore_errors: true
with_items:
- /tmp/simple_file.txt
- /tmp/hello.exe
- /tmp/simple_zip_file.txt.gz
- /tmp/archive.tar
- /this_content.txt
- name: Delete file created locally
file:
path: '{{ item }}'
state: absent
with_items:
- /tmp/copy_from_pod.txt
- /tmp/hello
- /tmp/copied_from_pod.txt.gz
- /tmp/local_archive.tar

View File

@@ -0,0 +1,103 @@
---
- name: test copy of large binary and text files
block:
- set_fact:
test_directory: "/tmp/test_k8scp_large_files"
no_log: true
- name: create temporary directory for local files
ansible.builtin.file:
path: "{{ test_directory }}"
state: directory
- name: create large text file
k8s_create_file:
path: "{{ test_directory }}/large_text_file.txt"
size: 150
- name: create large binary file
k8s_create_file:
path: "{{ test_directory }}/large_bin_file.bin"
size: 200
binary: true
# Copy large text file from/to local filesystem to Pod
- name: copy large file into remote Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_text_file.txt
local_path: "{{ test_directory }}/large_text_file.txt"
state: to_pod
- name: Compare files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_text_file.txt
local_path: "{{ test_directory }}/large_text_file.txt"
kubectl_path: "{{ kubectl_path }}"
- name: copy large file from Pod into local filesystem
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_text_file.txt
local_path: "{{ test_directory }}/large_text_file_from_pod.txt"
state: from_pod
- name: Compare files
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_text_file.txt
local_path: "{{ test_directory }}/large_text_file_from_pod.txt"
kubectl_path: "{{ kubectl_path }}"
# Copy large binary file from/to local filesystem to Pod
- name: copy large file into remote Pod
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_bin_file.bin
local_path: "{{ test_directory }}/large_bin_file.bin"
state: to_pod
- name: Compare executable, local vs remote
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_bin_file.bin
local_path: "{{ test_directory }}/large_bin_file.bin"
- name: copy executable from pod into local filesystem
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_bin_file.bin
local_path: "{{ test_directory }}/large_bin_file_from_pod.bin"
state: from_pod
- name: Compare executable, local vs remote
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
remote_path: /large_bin_file.bin
local_path: "{{ test_directory }}/large_bin_file_from_pod.bin"
always:
- name: Delete temporary directory created for the test
ansible.builtin.file:
path: "{{ test_directory }}"
state: absent
ignore_errors: true
- name: Delete file created on Pod
k8s_exec:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_one_container.name }}'
command: 'rm {{ item }}'
ignore_errors: true
with_items:
- /large_text_file.txt
- /large_bin_file.bin

View File

@@ -0,0 +1,71 @@
---
- set_fact:
random_content: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=128') }}"
- name: Copy content into first pod's container
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost.txt
content: '{{ random_content }}'
container: '{{ pod_with_two_container.container[0] }}'
state: to_pod
- name: Assert that content has been copied into first container
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost.txt
container: '{{ pod_with_two_container.container[0] }}'
content: '{{ random_content }}'
kubectl_path: "{{ kubectl_path }}"
- name: Assert that content has not been copied into second container
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost.txt
container: '{{ pod_with_two_container.container[1] }}'
content: '{{ random_content }}'
kubectl_path: "{{ kubectl_path }}"
register: diff
ignore_errors: true
- name: check that diff failed
assert:
that:
- diff is failed
- name: Copy content into second's pod container
k8s_cp:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost_01.txt
content: '{{ random_content }}-secondpod'
container: '{{ pod_with_two_container.container[1] }}'
state: to_pod
- name: Assert that content has not been copied into first container
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost_01.txt
container: '{{ pod_with_two_container.container[0] }}'
content: '{{ random_content }}-secondpod'
kubectl_path: "{{ kubectl_path }}"
ignore_errors: true
register: diff_1
- name: check that diff failed
assert:
that:
- diff_1 is failed
- name: Assert that content has been copied into second container
kubectl_file_compare:
namespace: '{{ copy_namespace }}'
pod: '{{ pod_with_two_container.name }}'
remote_path: /file_from_localhost_01.txt
container: '{{ pod_with_two_container.container[1] }}'
content: '{{ random_content }}-secondpod'
kubectl_path: "{{ kubectl_path }}"

View File

@@ -0,0 +1,33 @@
---
apiVersion: v1
kind: Pod
metadata:
name: '{{ pod_with_one_container.name }}'
spec:
containers:
- name: '{{ pod_with_one_container.container }}'
image: busybox
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
---
apiVersion: v1
kind: Pod
metadata:
name: '{{ pod_with_two_container.name }}'
spec:
containers:
- name: '{{ pod_with_two_container.container[0] }}'
image: busybox:1.32.0
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
- name: '{{ pod_with_two_container.container[1] }}'
image: busybox:1.33.0
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done

View File

@@ -0,0 +1,2 @@
time=22
k8s

View File

@@ -0,0 +1,2 @@
---
test_namespace: "crd"

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,61 @@
---
- block:
- name: Install custom resource definitions
k8s:
definition: "{{ lookup('file', 'setup-crd.yml') }}"
- name: Pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: Create custom resource definition
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: "{{ test_namespace }}"
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: Patch custom resource definition
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: "{{ test_namespace }}"
register: recreate_crd
ignore_errors: yes
- name: Assert that recreating crd is as expected
assert:
that:
- recreate_crd is not failed
- block:
- name: Recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- merge
namespace: "{{ test_namespace }}"
register: recreate_crd_with_merge
- name: Recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: "{{ test_namespace }}"
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: Remove crd
k8s:
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: "{{ test_namespace }}"
state: absent
always:
- name: Remove crd namespace
k8s:
kind: Namespace
name: "{{ test_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
time=70
k8s_info
k8s

View File

@@ -0,0 +1,25 @@
---
k8s_pod_template:
metadata:
labels:
app: "{{ k8s_pod_name }}"
spec:
serviceAccount: "default"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: []
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: []
env: []
test_namespace: "delete"

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,84 @@
---
- block:
- name: Add a daemonset
k8s:
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ test_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 400
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: Check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: Check if pods exist
k8s_info:
namespace: "{{ test_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: Assert that there are pods
assert:
that:
- pods_create.resources
- name: Remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ test_namespace }}"
state: absent
wait: yes
- name: Show status of pods
k8s_info:
namespace: "{{ test_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: Wait for background deletion
pause:
seconds: 30
- name: Check if pods still exist
k8s_info:
namespace: "{{ test_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: Assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ test_namespace }}"
state: absent

View File

@@ -0,0 +1,2 @@
time=20
k8s

View File

@@ -0,0 +1,3 @@
---
test_namespace: "diff"
diff_configmap: "diff-configmap"

View File

@@ -0,0 +1,2 @@
dependencies:
- setup_namespace

View File

@@ -0,0 +1,148 @@
---
- block:
- set_fact:
diff_namespace: "{{ test_namespace }}"
# Using option 'apply' set to 'yes'
- name: Create Pod using apply and diff set to yes
k8s:
namespace: '{{ diff_namespace }}'
apply: yes
template: "pod.j2"
diff: yes
vars:
pod_name: "pod-apply"
pod_image: "busybox:1.32.0"
register: result
- name: check that result has diff attribute
assert:
that:
- result is changed
- result.diff is defined
- name: Update pod definition using apply and diff set to no
k8s:
namespace: '{{ diff_namespace }}'
apply: yes
template: "pod.j2"
diff: no
vars:
pod_name: "pod-apply"
pod_image: "busybox:1.33.0"
register: result
- name: check that output has no diff attribute
assert:
that:
- result is changed
- result.diff is not defined
# Using option 'state=patched'
- name: Create Pod using state=present and diff set to yes
k8s:
namespace: '{{ diff_namespace }}'
state: present
template: "pod.j2"
vars:
pod_name: "pod-patch"
pod_image: "busybox:1.32.0"
register: result
- name: Update pod definition using state=patched
k8s:
namespace: '{{ diff_namespace }}'
state: patched
template: "pod.j2"
diff: no
vars:
pod_name: "pod-patch"
pod_image: "busybox:1.33.0"
pod_label: "patching"
register: result
- name: check that output has no diff attribute
assert:
that:
- result is changed
- result.diff is not defined
- name: Update pod definition using state=patched and diff=yes
k8s:
namespace: '{{ diff_namespace }}'
state: patched
template: "pod.j2"
diff: yes
vars:
pod_name: "pod-patch"
pod_image: "busybox:1.33.0"
pod_label: "running"
register: result
- name: check that output has no diff attribute
assert:
that:
- result is changed
- result.diff is defined
# check diff mode using force=yes
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: '{{ diff_configmap }}'
namespace: '{{ diff_namespace }}'
definition:
data:
key: "initial value"
diff: yes
register: result
- name: check that output has no diff attribute
assert:
that:
- result is changed
- result.diff is not defined
- name: Update ConfigMap using force and diff=no
k8s:
kind: ConfigMap
name: '{{ diff_configmap }}'
namespace: '{{ diff_namespace }}'
force: yes
definition:
data:
key: "update value with diff=no"
diff: no
register: result
- name: check that output has no diff attribute
assert:
that:
- result is changed
- result.diff is not defined
- name: Update ConfigMap using force and diff=yes
k8s:
kind: ConfigMap
name: '{{ diff_configmap }}'
namespace: '{{ diff_namespace }}'
force: yes
definition:
data:
key: "update value with diff=yes"
diff: yes
register: result
- name: check that output has diff attribute
assert:
that:
- result is changed
- result.diff is defined
always:
- name: Ensure namespace is deleted
k8s:
state: absent
kind: Namespace
name: '{{ diff_namespace }}'
ignore_errors: true

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ pod_name }}
labels:
ansible: {{ pod_label | default('demo') }}
spec:
containers:
- name: c0
image: {{ pod_image }}
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done

View File

@@ -0,0 +1,4 @@
k8s_drain
k8s
k8s_info
time=78

View File

@@ -0,0 +1,3 @@
---
test_namespace: "drain"
k8s_wait_timeout: 400

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,233 @@
---
- block:
- name: Set common facts
set_fact:
drain_daemonset_name: "promotheus-dset"
drain_pod_name: "pod-drain"
# It seems that the default ServiceAccount can take a bit to be created
# right after a cluster is brought up. This can lead to the ServiceAccount
# admission controller rejecting a Pod creation request because the
# ServiceAccount does not yet exist.
- name: Wait for default serviceaccount to be created
k8s_info:
kind: ServiceAccount
name: default
namespace: "{{ test_namespace }}"
wait: yes
- name: list cluster nodes
k8s_info:
kind: node
register: nodes
- name: Select uncordoned nodes
set_fact:
uncordoned_nodes: "{{ nodes.resources | selectattr('spec.unschedulable', 'undefined') | map(attribute='metadata.name') | list}}"
- name: Assert that at least one node is schedulable
assert:
that:
- uncordoned_nodes | length > 0
- name: select node to drain
set_fact:
node_to_drain: '{{ uncordoned_nodes[0] }}'
- name: Deploy daemonset on cluster
k8s:
namespace: '{{ test_namespace }}'
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: '{{ drain_daemonset_name }}'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- '{{ node_to_drain }}'
selector:
matchLabels:
name: prometheus-exporter
template:
metadata:
labels:
name: prometheus-exporter
spec:
containers:
- name: prometheus
image: prom/node-exporter
ports:
- containerPort: 80
- name: Create Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.
k8s:
namespace: '{{ test_namespace }}'
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: v1
kind: Pod
metadata:
name: '{{ drain_pod_name }}'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- '{{ node_to_drain }}'
containers:
- name: c0
image: busybox
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
- name: Cordon node
k8s_drain:
state: cordon
name: '{{ node_to_drain }}'
register: cordon
- name: assert that cordon is changed
assert:
that:
- cordon is changed
- name: Test cordon idempotency
k8s_drain:
state: cordon
name: '{{ node_to_drain }}'
register: cordon
- name: assert that cordon is not changed
assert:
that:
- cordon is not changed
- name: Get pods
k8s_info:
kind: Pod
namespace: '{{ test_namespace }}'
register: Pod
- name: assert that pods are running on cordoned node
assert:
that:
- "{{ Pod.resources | selectattr('status.phase', 'equalto', 'Running') | selectattr('spec.nodeName', 'equalto', node_to_drain) | list | length > 0 }}"
- name: Uncordon node
k8s_drain:
state: uncordon
name: '{{ node_to_drain }}'
register: uncordon
- name: assert that uncordon is changed
assert:
that:
- uncordon is changed
- name: Test uncordon idempotency
k8s_drain:
state: uncordon
name: '{{ node_to_drain }}'
register: uncordon
- name: assert that uncordon is not changed
assert:
that:
- uncordon is not changed
- name: Drain node
k8s_drain:
state: drain
name: '{{ node_to_drain }}'
ignore_errors: true
register: drain_result
- name: assert that drain failed due to DaemonSet managed Pods
assert:
that:
- drain_result is failed
- '"cannot delete DaemonSet-managed Pods" in drain_result.msg'
- '"cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" in drain_result.msg'
- name: Drain node using ignore_daemonsets and force options
k8s_drain:
state: drain
name: '{{ node_to_drain }}'
delete_options:
force: true
ignore_daemonsets: true
wait_timeout: 0
register: drain_result
- name: assert that node has been drained
assert:
that:
- drain_result is changed
- '"node {{ node_to_drain }} marked unschedulable." in drain_result.result'
- name: assert that unmanaged pod were deleted
k8s_info:
namespace: '{{ test_namespace }}'
kind: Pod
name: '{{ drain_pod_name }}'
register: _result
failed_when: _result.resources
- name: Test drain idempotency
k8s_drain:
state: drain
name: '{{ node_to_drain }}'
delete_options:
force: true
ignore_daemonsets: true
register: drain_result
- name: Check idempotency
assert:
that:
- drain_result is not changed
- name: Get DaemonSet
k8s_info:
kind: DaemonSet
namespace: '{{ test_namespace }}'
name: '{{ drain_daemonset_name }}'
register: dset_result
- name: assert that daemonset managed pods were not removed
assert:
that:
- dset_result.resources | list | length > 0
- name: Uncordon node
k8s_drain:
state: uncordon
name: '{{ node_to_drain }}'
always:
- name: Uncordon node
k8s_drain:
state: uncordon
name: '{{ node_to_drain }}'
when: node_to_drain is defined
ignore_errors: true
- name: delete namespace
k8s:
state: absent
kind: namespace
name: '{{ test_namespace }}'

View File

@@ -0,0 +1,3 @@
k8s_exec
k8s
time=23

View File

@@ -0,0 +1,2 @@
---
test_namespace: "k8s-exec"

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,60 @@
---
- vars:
k8s_wait_timeout: 400
pod: sleep-pod
exec_pod_definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ pod }}"
namespace: "{{ test_namespace }}"
spec:
containers:
- name: sleeper
image: busybox
command: ["sleep", "infinity"]
block:
- name: "Create a pod"
k8s:
definition: "{{ exec_pod_definition }}"
wait: yes
wait_sleep: 1
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
- name: "Execute a command"
k8s_exec:
pod: "{{ pod }}"
namespace: "{{ test_namespace }}"
command: cat /etc/resolv.conf
register: output
- name: "Show k8s_exec output"
debug:
var: output
- name: "Assert k8s_exec output is correct"
assert:
that:
- "'nameserver' in output.stdout"
- name: Check if rc is returned for the given command
k8s_exec:
namespace: "{{ test_namespace }}"
pod: "{{ pod }}"
command: 'false'
register: command_status
ignore_errors: True
- name: Check last command status
assert:
that:
- command_status.rc != 0
- command_status.return_code != 0
always:
- name: "Cleanup namespace"
k8s:
kind: Namespace
name: "{{ test_namespace }}"
state: absent

View File

@@ -0,0 +1,3 @@
time=57
k8s
k8s_info

View File

@@ -0,0 +1,10 @@
---
test_namespace:
- testing
- testing1
- testing2
- testing3
- testing4
- testing5
- testing6
- test-namespace-module-defaults

View File

@@ -0,0 +1,3 @@
---
dependencies:
- remove_namespace

View File

@@ -0,0 +1,504 @@
---
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: Show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ca_cert: /dev/null # invalid CA certificate
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- block:
- name: Copy default kubeconfig
copy:
remote_src: yes
src: ~/.kube/config
dest: ~/.kube/customconfig
- name: Delete default kubeconfig
file:
path: ~/.kube/config
state: absent
- name: Try to create namespace without default kube config
kubernetes.core.k8s:
name: testing
kind: Namespace
ignore_errors: true
register: result
- name: No default kube config should fail
assert:
that: result is not successful
- name: Using custom config location should succeed
kubernetes.core.k8s:
name: testing
kind: Namespace
kubeconfig: ~/.kube/customconfig
- name: Using an env var to set config location should succeed
kubernetes.core.k8s:
name: testing
kind: Namespace
environment:
K8S_AUTH_KUBECONFIG: ~/.kube/customconfig
- name: Using in-memory kubeconfig should succeed
kubernetes.core.k8s:
name: testing
kind: Namespace
kubeconfig: "{{ lookup('file', '~/.kube/customconfig') | from_yaml }}"
always:
- name: Return kubeconfig
copy:
remote_src: yes
src: ~/.kube/customconfig
dest: ~/.kube/config
ignore_errors: yes
- name: Delete custom config
file:
path: ~/.kube/customconfig
state: absent
ignore_errors: yes
- name: Ensure k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: apps/v1
register: k8s_info
- name: Assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: Show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: Ensure PVC creation is idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
replicas: 1
selector:
matchLabels:
app: galaxy
service: elastic
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Ensure Deployment creation is idempotent
assert:
that: not output.changed
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
### https://github.com/ansible-collections/community.kubernetes/issues/111
- set_fact:
api_groups: "{{ lookup('kubernetes.core.k8s', cluster_info='api_groups') }}"
- debug:
var: api_groups
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Get info about terminating resources
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Ensure resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
- name: Create large configmap data
command: dd if=/dev/urandom bs=500K count=1
register: cmap_data
- name: Create configmap with large value
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: testmap
namespace: testing
data:
testkey: "{{ cmap_data.stdout | b64encode }}"
wait: true
register: result
- assert:
that:
- result is changed
- name: Retrieve configmap
k8s_info:
kind: ConfigMap
namespace: testing
name: testmap
register: result
- assert:
that:
- result.resources[0].data.testkey == "{{ cmap_data.stdout | b64encode }}"
# test setting module defaults for kubernetes.core.k8s_info
- block:
- name: Create a namespace
kubernetes.core.k8s:
name: test-namespace-module-defaults
kind: Namespace
register: output
- name: Create a ConfigMap
kubernetes.core.k8s:
kind: ConfigMap
name: test-configmap-1
definition:
data:
key1: value1
- name: Create another ConfigMap
kubernetes.core.k8s:
kind: ConfigMap
name: test-configmap-2
definition:
data:
key2: value2
- name: Get list of all ConfigMaps in namespace specified in module_defaults
kubernetes.core.k8s_info:
kind: ConfigMap
register: configmap_info
- name: assert that the ConfigMaps are created in and info is retrieved for namespace specified in module_defaults
assert:
that:
- configmap_info.resources[1].metadata.name == "test-configmap-1"
- configmap_info.resources[1].metadata.namespace == "test-namespace-module-defaults"
- configmap_info.resources[2].metadata.name == "test-configmap-2"
- configmap_info.resources[2].metadata.namespace == "test-namespace-module-defaults"
module_defaults:
group/kubernetes.core.k8s:
namespace: test-namespace-module-defaults
when: ansible_version.full is version("2.12", ">=")
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
- kind: Namespace
apiVersion: v1
metadata:
name: test-namespace-module-defaults
ignore_errors: yes

View File

@@ -0,0 +1 @@
time=142

View File

@@ -0,0 +1,3 @@
---
test_namespace: "garbage"
k8s_wait_timeout: 400

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,236 @@
---
- vars:
gc_namespace: "{{ test_namespace }}"
gc_name: garbage-job
# This is a job definition that runs for 10 minutes and won't gracefully
# shutdown. It allows us to test foreground vs background deletion.
job_definition:
apiVersion: v1
kind: Job
metadata:
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
spec:
template:
metadata:
labels:
job: gc
spec:
containers:
- name: "{{ gc_name }}"
image: busybox
command:
- sleep
- "600"
restartPolicy: Never
block:
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Delete job in foreground
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
delete_options:
propagationPolicy: Foreground
- name: Test job's pod does not exist
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod does not exist
assert:
that: not job.resources
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Delete job in background
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
delete_options:
propagationPolicy: "Background"
# The default grace period is 30s so this pod should still be running.
- name: Test job's pod exists
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod still running
assert:
that: job.resources[0].status.phase == "Running"
- name: Add a job
k8s:
definition: "{{ job_definition }}"
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: job
- name: Assert job's pod is running
assert:
that: job.resources[0].status.phase == "Running"
- name: Orphan the job's pod
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
delete_options:
propagationPolicy: "Orphan"
- name: Ensure grace period has expired
pause:
seconds: 60
- name: Test that job's pod is still running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: job
- name: Assert job's pod is still running
assert:
that: job.resources[0].status.phase == "Running"
- name: Add a job
k8s:
definition: "{{ job_definition }}"
register: job
- name: Delete a job with failing precondition
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
delete_options:
preconditions:
uid: not-a-valid-uid
ignore_errors: yes
register: result
- name: Assert that deletion failed
assert:
that: result is failed
- name: Delete a job using a valid precondition
k8s:
kind: Job
name: "{{ gc_name }}"
namespace: "{{ gc_namespace }}"
state: absent
delete_options:
preconditions:
uid: "{{ job.result.metadata.uid }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
- name: Check that job is deleted
k8s_info:
kind: Job
namespace: "{{ gc_namespace }}"
name: "{{ gc_name }}"
register: job
- name: Assert job is deleted
assert:
that: not job.resources
always:
- name: Delete namespace
k8s:
kind: Namespace
name: "{{ gc_namespace }}"
state: absent
ignore_errors: true

View File

@@ -0,0 +1,3 @@
time=13
k8s
k8s_info

View File

@@ -0,0 +1,3 @@
---
test_namespace: "wait"
k8s_wait_timeout: 400

View File

@@ -0,0 +1,3 @@
---
dependencies:
- setup_namespace

View File

@@ -0,0 +1,238 @@
---
- block:
- set_fact:
wait_namespace: "{{ test_namespace }}"
multi_pod_one: multi-pod-1
multi_pod_two: multi-pod-2
- name: Add a simple pod with initContainer
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 20']
containers:
- name: utilitypod-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Wait and gather information about new pod
k8s_info:
name: "{{ k8s_pod_name }}"
kind: Pod
namespace: "{{ wait_namespace }}"
wait: yes
wait_sleep: 5
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: wait_info
- name: Assert that pod creation succeeded
assert:
that:
- wait_info is successful
- not wait_info.changed
- wait_info.resources[0].status.phase == "Running"
- name: Remove Pod
k8s:
api_version: v1
kind: Pod
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
ignore_errors: yes
register: short_wait_remove_pod
- name: Check if pod is removed
assert:
that:
- short_wait_remove_pod is successful
- short_wait_remove_pod.changed
- name: Create multiple pod with initContainer
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
labels:
run: multi-box
name: "{{ multi_pod_one }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 25']
containers:
- name: multi-pod-01
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Create another pod with same label as previous pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
labels:
run: multi-box
name: "{{ multi_pod_two }}"
namespace: "{{ wait_namespace }}"
spec:
initContainers:
- name: init-02
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 25']
containers:
- name: multi-pod-02
image: python:3.7-alpine
command: ['sh', '-c', 'sleep 360']
- name: Wait and gather information about new pods
k8s_info:
kind: Pod
namespace: "{{ wait_namespace }}"
wait: yes
wait_sleep: 5
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
label_selectors:
- run == multi-box
register: wait_info
- name: Assert that pod creation succeeded
assert:
that:
- wait_info is successful
- not wait_info.changed
- wait_info.resources[0].status.phase == "Running"
- wait_info.resources[1].status.phase == "Running"
- name: "Remove Pod {{ multi_pod_one }}"
k8s:
api_version: v1
kind: Pod
name: "{{ multi_pod_one }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
ignore_errors: yes
register: multi_pod_one_remove
- name: "Check if {{ multi_pod_one }} pod is removed"
assert:
that:
- multi_pod_one_remove is successful
- multi_pod_one_remove.changed
- name: "Remove Pod {{ multi_pod_two }}"
k8s:
api_version: v1
kind: Pod
name: "{{ multi_pod_two }}"
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
ignore_errors: yes
register: multi_pod_two_remove
- name: "Check if {{ multi_pod_two }} pod is removed"
assert:
that:
- multi_pod_two_remove is successful
- multi_pod_two_remove.changed
- name: "Look for existing API"
k8s_info:
api_version: apps/v1
kind: Deployment
register: existing_api
- name: Check if we informed the user the api does exist
assert:
that:
- existing_api.api_found
- name: "Look for non-existent API"
k8s_info:
api_version: pleasedonotcreatethisresource.example.com/v7
kind: DoesNotExist
register: dne_api
- name: Check if we informed the user the api does not exist
assert:
that:
- not dne_api.resources
- not dne_api.api_found
- name: Start timer
set_fact:
start: "{{ lookup('pipe', 'date +%s') }}"
- name: Wait for non-existent pod to be created
k8s_info:
kind: Pod
name: does-not-exist
namespace: "{{ wait_namespace }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: result
- name: Check that module waited
assert:
that:
- "{{ lookup('pipe', 'date +%s') }} - {{ start }} > 30"
- name: Create simple pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: wait-pod-1
namespace: "{{ wait_namespace }}"
spec:
containers:
- image: busybox
name: busybox
command:
- /bin/sh
- -c
- while true; do sleep 5; done
- name: Wait for multiple non-existent pods to be created
k8s_info:
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- thislabel=doesnotexist
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: result
- name: Assert no pods were found
assert:
that:
- not result.resources
vars:
k8s_pod_name: pod-info-1
always:
- name: Remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent
ignore_errors: true

Some files were not shown because too many files have changed in this diff Show More