Compare commits

...

8 Commits

Author SHA1 Message Date
abikouo
4b8b3fa1ee [backport/2.1] molecule to ansible-test CI migration (#398)
integration testing migration from molecule to ansible-test
2022-03-08 17:25:50 +01:00
Mike Graves
abcc3e884c Release 2.1.1 (#152) 2021-06-24 12:41:43 -04:00
Mike Graves
15799b2dd5 Check that auth value is not None (#151)
* Check that auth value is not None

The previous check for truth prevented the verify_ssl param from being
set to false, thus forcing ssl verfication in every case.

* Add changelog fragment

* Fix linting
2021-06-24 12:15:27 -04:00
abikouo
35af8a48ad molecule gc.yml - fix sporadic fail (#144)
* update

* Update gc.yml
2021-06-24 14:33:14 +02:00
Mike Graves
8280bb78c0 Release 2.1.0 (#150) 2021-06-24 07:49:00 -04:00
Mike Graves
2eca446f09 Remove turbo mode functionality (#149)
* Remove turbo mode functionality

* Add changelog fragment

* Fix linting issue

* Update docs
2021-06-23 18:50:32 -04:00
Mike Graves
cd72b6d7df Remove cloud.common dependency (#148)
* Remove cloud.common dependency

* Add changelog fragment
2021-06-23 16:06:26 -04:00
abikouo
b50f1f2fc9 Fix molecule test `gc.yml` (#132)
* add until loop

* Update gc.yml
2021-06-22 08:55:40 +02:00
121 changed files with 736 additions and 1284 deletions

View File

@@ -1,141 +0,0 @@
---
name: CI
'on':
push:
branches:
- main
pull_request:
schedule:
- cron: '0 6 * * *'
jobs:
sanity:
runs-on: ubuntu-latest
strategy:
matrix:
python_version: ['3.7']
ansible_version: ['stable-2.11', 'stable-2.10', 'stable-2.9', 'devel']
steps:
- name: Check out code
uses: actions/checkout@v2
with:
path: ansible_collections/kubernetes/core
- name: Set up Python ${{ matrix.python_version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python_version }}
- name: Check ansible version
uses: actions/checkout@v2
with:
repository: ansible/ansible
ref: ${{ matrix.ansible_version }}
path: ansible_collections/kubernetes/core/ansible
- name: Run sanity tests on Python ${{ matrix.python_version }}
run: source ./ansible/hacking/env-setup && make test-sanity PYTHON_VERSION=${{ matrix.python_version }}
working-directory: ./ansible_collections/kubernetes/core
integration:
runs-on: ubuntu-latest
strategy:
matrix:
# Our old integration tests fail under newer Python versions.
python_version: ['3.6']
steps:
- name: Check out code
uses: actions/checkout@v2
with:
path: ansible_collections/kubernetes/core
- name: Set up Python ${{ matrix.python_version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python_version }}
- name: Install ansible base (devel branch)
run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
- name: Run integration tests on Python ${{ matrix.python_version }}
run: make test-integration PYTHON_VERSION=${{ matrix.python_version }}
working-directory: ./ansible_collections/kubernetes/core
- name: Generate coverage report.
run: ansible-test coverage xml -v --requirements --group-by command --group-by version
working-directory: ./ansible_collections/kubernetes/core
- uses: codecov/codecov-action@v1
with:
fail_ci_if_error: false
molecule:
runs-on: ubuntu-latest
strategy:
matrix:
python_version: ['3.7']
ansible_version: ['==2.9.*', '==2.10.*', '']
steps:
- name: Check out code
uses: actions/checkout@v2
with:
path: ansible_collections/kubernetes/core
- name: Set up KinD cluster
uses: engineerd/setup-kind@v0.5.0
- name: Set up Python ${{ matrix.python_version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python_version }}
# The 3.3.0 release of molecule introduced a breaking change. See
# https://github.com/ansible-community/molecule/issues/3083
- name: Install molecule and kubernetes dependencies
run: pip install ansible${{ matrix.ansible_version }} "molecule<3.3.0" yamllint kubernetes flake8 jsonpatch
# The latest release doesn't work with Molecule currently.
# See: https://github.com/ansible-community/molecule/issues/2757
# - name: Install ansible base, latest release.
# run: |
# pip uninstall -y ansible
# pip install --pre ansible-base
# The devel branch doesn't work with Molecule currently.
# See: https://github.com/ansible-community/molecule/issues/2757
# - name: Install ansible base (devel branch)
# run: |
# pip uninstall -y ansible
# pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
- name: Create default collection path symlink
run: |
mkdir -p /home/runner/.ansible
ln -s /home/runner/work/kubernetes/kubernetes /home/runner/.ansible/collections
- name: Run molecule default test scenario
run: make test-molecule
working-directory: ./ansible_collections/kubernetes/core
unit:
runs-on: ubuntu-latest
strategy:
matrix:
python_version: ['3.7']
steps:
- name: Check out code
uses: actions/checkout@v2
with:
path: ansible_collections/kubernetes/core
- name: Set up Python ${{ matrix.python_version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python_version }}
- name: Install ansible base (devel branch)
run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
- name: Run unit tests on Python ${{ matrix.python_version }}
run: make test-unit PYTHON_VERSION=${{ matrix.python_version }}
working-directory: ./ansible_collections/kubernetes/core

6
.gitignore vendored
View File

@@ -13,3 +13,9 @@ changelogs/.plugin-cache.yaml
tests/output
tests/integration/cloud-config-*
.cache
tests/integration/*-chart-*.tgz
# ansible-test generated file
tests/integration/inventory
tests/integration/*-*.yml

View File

@@ -5,6 +5,23 @@ Kubernetes Collection Release Notes
.. contents:: Topics
v2.1.1
======
Bugfixes
--------
- check auth params for existence, not whether they are true (https://github.com/ansible-collections/kubernetes.core/pull/151).
v2.1.0
======
Minor Changes
-------------
- remove cloud.common as default dependency (https://github.com/ansible-collections/kubernetes.core/pull/148).
- temporarily disable turbo mode (https://github.com/ansible-collections/kubernetes.core/pull/149).
v2.0.2
======

View File

@@ -1,5 +1,5 @@
# Also needs to be updated in galaxy.yml
VERSION = 2.0.2
VERSION = 2.1.1
TEST_ARGS ?= ""
PYTHON_VERSION ?= `python -c 'import platform; print("{0}.{1}".format(platform.python_version_tuple()[0], platform.python_version_tuple()[1]))'`

View File

@@ -82,7 +82,7 @@ You can also include it in a `requirements.yml` file and install it via `ansible
---
collections:
- name: kubernetes.core
version: 2.0.2
version: 2.1.1
```
### Installing the Kubernetes Python Library
@@ -159,11 +159,6 @@ If upgrading older playbooks which were built prior to Ansible 2.10 and this col
For documentation on how to use individual modules and other content included in this collection, please see the links in the 'Included content' section earlier in this README.
## Ansible Turbo mode
The ``kubernetes.core`` collection supports Ansible Turbo mode via ``cloud.common`` collection. Please read more about Ansible Turbo mode - [here](https://github.com/ansible-collections/kubernetes.core/blob/main/docs/ansible_turbo_mode.rst).
## Testing and Development
If you want to develop new content for this collection or improve what's already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATHS`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.

View File

@@ -413,3 +413,19 @@ releases:
- 129-k8s-fix-apply-array-with-empty-dict.yml
- 135-rename-apply-function.yml
release_date: '2021-06-16'
2.1.0:
changes:
minor_changes:
- remove cloud.common as default dependency (https://github.com/ansible-collections/kubernetes.core/pull/148).
- temporarily disable turbo mode (https://github.com/ansible-collections/kubernetes.core/pull/149).
fragments:
- 148-remove-cloud-common-dependency.yaml
- 149-disable-turbo-mode.yaml
release_date: '2021-06-23'
2.1.1:
changes:
bugfixes:
- check auth params for existence, not whether they are true (https://github.com/ansible-collections/kubernetes.core/pull/151).
fragments:
- 151-check-auth-params-for-existence.yaml
release_date: '2021-06-24'

View File

@@ -8,8 +8,6 @@ authors:
- willthames (https://github.com/willthames)
- mmazur (https://github.com/mmazur)
- jamescassell (https://github.com/jamescassell)
dependencies:
cloud.common: '>=2.0.1'
description: Kubernetes Collection for Ansible.
documentation: ''
homepage: ''
@@ -27,7 +25,7 @@ tags:
- openshift
- okd
- cluster
version: 2.0.2
version: 2.1.1
build_ignore:
- .DS_Store
- '*.tar.gz'

View File

@@ -1,254 +0,0 @@
---
- name: Converge
hosts: localhost
connection: local
collections:
- kubernetes.core
vars_files:
- vars/main.yml
tasks:
- name: Verify cluster is working.
k8s_info:
namespace: kube-system
kind: Pod
register: pod_list
- name: Verify cluster has more than 5 pods running.
assert:
that: (pod_list.resources | count) > 5
- name: Include access_review.yml
include_tasks:
file: tasks/access_review.yml
apply:
tags: [ access_review, k8s ]
tags:
- always
- name: Include append_hash.yml
include_tasks:
file: tasks/append_hash.yml
apply:
tags: [ append_hash, k8s ]
tags:
- always
- name: Include apply.yml
include_tasks:
file: tasks/apply.yml
apply:
tags: [ apply, k8s ]
tags:
- always
- name: Include cluster_info.yml
include_tasks:
file: tasks/cluster_info.yml
apply:
tags: [ cluster_info, k8s ]
tags:
- always
- name: Include crd.yml
include_tasks:
file: tasks/crd.yml
apply:
tags: [ crd, k8s ]
tags:
- always
- name: Include delete.yml
include_tasks:
file: tasks/delete.yml
apply:
tags: [ delete, k8s ]
tags:
- always
- name: Include exec.yml
include_tasks:
file: tasks/exec.yml
apply:
tags: [ exec, k8s ]
tags:
- always
- name: Include full.yml
include_tasks:
file: tasks/full.yml
apply:
tags: [ full, k8s ]
tags:
- always
- name: Include gc.yml
include_tasks:
file: tasks/gc.yml
apply:
tags: [ gc, k8s ]
tags:
- always
- name: Include info.yml
include_tasks:
file: tasks/info.yml
apply:
tags: [ info, k8s ]
tags:
- always
- name: Include json_patch.yml
include_tasks:
file: tasks/json_patch.yml
apply:
tags: [ json_patch, k8s ]
tags:
- always
- name: Include lists.yml
include_tasks:
file: tasks/lists.yml
apply:
tags: [ lists, k8s ]
tags:
- always
- name: Include log.yml
include_tasks:
file: tasks/log.yml
apply:
tags: [ log, k8s ]
tags:
- always
- name: Include rollback.yml
include_tasks:
file: tasks/rollback.yml
apply:
tags: [ rollback, k8s ]
tags:
- always
- name: Include scale.yml
include_tasks:
file: tasks/scale.yml
apply:
tags: [ scale, k8s ]
tags:
- always
- name: Include template.yml
include_tasks:
file: tasks/template.yml
apply:
tags: [ template, k8s ]
tags:
- always
- name: Include waiter.yml
include_tasks:
file: tasks/waiter.yml
apply:
tags: [ waiter, k8s ]
tags:
- always
- name: Include merge_type.yml
include_tasks:
file: tasks/merge_type.yml
apply:
tags: [ merge_type, k8s ]
tags:
- always
- name: Include patched.yml
include_tasks:
file: tasks/patched.yml
apply:
tags: [ patched, k8s ]
tags:
- always
- name: Include lookup_k8s.yml
include_tasks:
file: tasks/lookup_k8s.yml
apply:
tags: [ lookup, k8s ]
tags:
- always
roles:
- role: helm
tags:
- helm
post_tasks:
- name: Ensure namespace exists
k8s:
api_version: v1
kind: Namespace
name: inventory
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: inventory
namespace: inventory
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 120
vars:
k8s_pod_name: inventory
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
- meta: refresh_inventory
- name: Verify inventory and connection plugins
hosts: namespace_inventory_pods
gather_facts: no
vars:
file_content: |
Hello world
tasks:
- name: End play if host not running (TODO should we not add these to the inventory?)
meta: end_host
when: pod_phase != "Running"
- debug: var=hostvars
- setup:
- debug: var=ansible_facts
- name: Assert the TEST environment variable was retrieved
assert:
that: ansible_facts.env.TEST == 'test'
- name: Copy a file into the host
copy:
content: '{{ file_content }}'
dest: /tmp/test_file
- name: Retrieve the file from the host
slurp:
src: /tmp/test_file
register: slurped_file
- name: Assert the file content matches expectations
assert:
that: (slurped_file.content|b64decode) == file_content
- name: Delete inventory namespace
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Remove inventory namespace
k8s:
api_version: v1
kind: Namespace
name: inventory
state: absent

View File

@@ -1,43 +0,0 @@
---
driver:
name: delegated
options:
managed: false
login_cmd_template: 'docker exec -ti {instance} bash'
ansible_connection_options:
ansible_connection: docker
lint: |
set -e
yamllint .
flake8
platforms:
- name: instance-kind
provisioner:
name: ansible
log: true
config_options:
inventory:
enable_plugins: kubernetes.core.k8s
lint: {}
inventory:
hosts:
plugin: kubernetes.core.k8s
host_vars:
localhost:
ansible_python_interpreter: '{{ ansible_playbook_python }}'
env:
ANSIBLE_FORCE_COLOR: 'true'
options:
vvv: True
scenario:
name: default
test_sequence:
- dependency
- lint
- syntax
- converge
- verify
dependency:
name: galaxy
options:
requirements-file: requirements.yml

View File

@@ -3,10 +3,4 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from ansible_collections.cloud.common.plugins.module_utils.turbo.module import (
AnsibleTurboModule as AnsibleModule,
) # noqa: F401
AnsibleModule.collection_name = "kubernetes.core"
except ImportError:
from ansible.module_utils.basic import AnsibleModule # noqa: F401
from ansible.module_utils.basic import AnsibleModule # noqa: F401

View File

@@ -124,7 +124,7 @@ def get_api_client(module=None, **kwargs):
# If authorization variables aren't defined, look for them in environment variables
for true_name, arg_name in AUTH_ARG_MAP.items():
if module and module.params.get(arg_name):
if module and module.params.get(arg_name) is not None:
auth[true_name] = module.params.get(arg_name)
elif arg_name in kwargs and kwargs.get(arg_name) is not None:
auth[true_name] = kwargs.get(arg_name)

View File

@@ -1,2 +0,0 @@
collections:
- cloud.common

View File

@@ -1 +1,4 @@
kubernetes-validate
coverage==4.5.4
pytest
pytest-xdist

View File

@@ -0,0 +1,7 @@
slow
time=609
helm_info
helm_repository
helm_template
helm_plugin
helm_plugin_info

View File

@@ -3,9 +3,6 @@ helm_archive_name: "helm-{{ helm_version }}-{{ ansible_system | lower }}-amd64.t
helm_binary: "/tmp/helm/{{ ansible_system | lower }}-amd64/helm"
helm_namespace: helm
tiller_namespace: tiller
tiller_cluster_role: cluster-admin
chart_test: "ingress-nginx"
chart_test_local_path: "nginx-ingress"
chart_test_version: 3.8.0

View File

@@ -9,3 +9,7 @@
src: 'https://get.helm.sh/{{ helm_archive_name }}'
dest: /tmp/helm/
remote_src: yes
retries: 10
delay: 5
register: result
until: result is not failed

View File

@@ -15,6 +15,7 @@
- name: Install chart while skipping CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ helm_namespace }}"
name: test-crds
@@ -46,12 +47,14 @@
# Helm won't install CRDs into an existing release, so we need to delete this, first
- name: Uninstall chart
helm:
binary_path: "{{ helm_binary }}"
namespace: "{{ helm_namespace }}"
name: test-crds
state: absent
- name: Install chart with CRDs
helm:
binary_path: "{{ helm_binary }}"
chart_ref: "/tmp/helm_test_crds/{{ test_chart }}"
namespace: "{{ helm_namespace }}"
name: test-crds

View File

@@ -1,6 +1,7 @@
---
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
@@ -14,6 +15,7 @@
- name: Add chart repo
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm
repo_url: "{{ chart_test_repo }}"
state: absent

View File

@@ -6,6 +6,7 @@
block:
- name: Install helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: present
plugin_path: https://github.com/databus23/helm-diff
@@ -136,6 +137,7 @@
- name: Uninstall helm diff
helm_plugin:
binary_path: "{{ helm_binary }}"
state: absent
plugin_name: diff
ignore_errors: yes

View File

@@ -97,6 +97,7 @@
- name: Gather Helm plugin info
helm_plugin_info:
binary_path: "{{ helm_binary }}"
register: r
- name: Set sample_plugin version

View File

@@ -1,11 +1,13 @@
---
- name: "Ensure test_helm_repo doesn't exist"
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
- name: Add test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
@@ -17,6 +19,7 @@
- name: Check idempotency
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "{{ chart_test_repo }}"
register: repository
@@ -28,6 +31,7 @@
- name: Failed to add repository with the same name
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
repo_url: "https://other-charts.url"
register: repository_errors
@@ -40,6 +44,7 @@
- name: Remove test_helm_repo chart repository
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository
@@ -51,6 +56,7 @@
- name: Check idempotency after remove
helm_repository:
binary_path: "{{ helm_binary }}"
name: test_helm_repo
state: absent
register: repository

View File

@@ -0,0 +1,2 @@
context/target
k8s

View File

@@ -0,0 +1,94 @@
---
- name: Converge
hosts: localhost
connection: local
collections:
- kubernetes.core
vars_files:
- vars/main.yml
tasks:
- name: Ensure namespace exists
k8s:
api_version: v1
kind: Namespace
name: inventory
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: inventory
namespace: inventory
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 240
vars:
k8s_pod_name: inventory
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
- meta: refresh_inventory
- name: Verify inventory and connection plugins
hosts: namespace_inventory_pods
gather_facts: no
vars:
file_content: |
Hello world
tasks:
- name: End play if host not running (TODO should we not add these to the inventory?)
meta: end_host
when: pod_phase != "Running"
- debug: var=hostvars
- setup:
- debug: var=ansible_facts
- name: Assert the TEST environment variable was retrieved
assert:
that: ansible_facts.env.TEST == 'test'
- name: Copy a file into the host
copy:
content: '{{ file_content }}'
dest: /tmp/test_file
- name: Retrieve the file from the host
slurp:
src: /tmp/test_file
register: slurped_file
- name: Assert the file content matches expectations
assert:
that: (slurped_file.content|b64decode) == file_content
- name: Delete inventory namespace
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Remove inventory namespace
k8s:
api_version: v1
kind: Namespace
name: inventory
state: absent

View File

@@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -eux
export ANSIBLE_INVENTORY_ENABLED=kubernetes.core.k8s,yaml
export ANSIBLE_PYTHON_INTERPRETER=auto_silent
ansible-playbook playbooks/play.yml -i playbooks/test.inventory_k8s.yml "$@"

View File

@@ -0,0 +1,2 @@
---
plugin: kubernetes.core.k8s

View File

@@ -0,0 +1,2 @@
time=5
k8s

View File

@@ -0,0 +1,2 @@
time=6
k8s

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,4 @@
slow
k8s_service
k8s
time=192

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 240

View File

@@ -307,6 +307,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
@@ -378,6 +379,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
@@ -426,6 +428,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: apply-deploy
@@ -449,318 +452,6 @@
that:
- deploy_after_serviceaccount_removal is failed
- name: Insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: Remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
check_mode: yes
register: k8s_service_check
- name: Check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: Remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_5
- name: Check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
- name: Add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 2
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy_after_serviceaccount_removal
ignore_errors: yes
- name: Ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed
- name: Insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_4
- name: Check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: Remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
check_mode: yes
register: k8s_service_check
- name: Check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: Remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
apply: yes
register: k8s_service_5
- name: Check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
- name: Add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Add a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
- name: Remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
- name: Update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: apply-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 2
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: apply-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_service_account: apply-deploy
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy_after_serviceaccount_removal
ignore_errors: yes
- name: Ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed
- name: Add a secret
k8s:
definition:

View File

@@ -0,0 +1,2 @@
k8s_cluster_info
time=6

View File

@@ -0,0 +1,2 @@
k8s
time=15

View File

@@ -0,0 +1 @@
---

View File

@@ -7,7 +7,7 @@
- name: Install custom resource definitions
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/setup-crd.yml') }}"
definition: "{{ lookup('file', 'setup-crd.yml') }}"
- name: Pause 5 seconds to avoid race condition
pause:
@@ -15,14 +15,14 @@
- name: Create custom resource definition
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: Patch custom resource definition
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
@@ -35,7 +35,7 @@
- block:
- name: Recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- merge
namespace: crd
@@ -43,7 +43,7 @@
- name: Recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', 'crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
@@ -54,7 +54,7 @@
- name: Remove crd
k8s:
definition: "{{ lookup('file', kubernetes_role_path + '/files/crd-resource.yml') }}"
definition: "{{ lookup('file', 'crd-resource.yml') }}"
namespace: crd
state: absent

View File

@@ -0,0 +1,3 @@
k8s_info
k8s
time=64

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
kubernetes_role_path: ../../tests/integration/targets/kubernetes

View File

@@ -0,0 +1,3 @@
k8s_exec
k8s
time=11

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,3 @@
k8s
k8s_info
time=37

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,2 @@
k8s
time=142

View File

@@ -0,0 +1 @@
---

View File

@@ -37,14 +37,23 @@
k8s:
definition: "{{ job_definition }}"
- name: Test that job's pod is running
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: 100
register: job
- name: Assert job's pod is running
@@ -78,14 +87,23 @@
k8s:
definition: "{{ job_definition }}"
- name: Test that job's pod is running
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: 100
register: job
- name: Assert job's pod is running
@@ -120,14 +138,23 @@
k8s:
definition: "{{ job_definition }}"
- name: Test that job's pod is running
- name: Wait Job's pod
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
label_selectors:
- "job=gc"
register: wait_job
until: wait_job.resources
retries: 5
delay: 10
- name: Wait job's pod running
k8s_info:
kind: Pod
namespace: "{{ gc_namespace }}"
name: "{{ wait_job.resources[0].metadata.name }}"
wait: yes
wait_timeout: 100
register: job
- name: Assert job's pod is running

View File

@@ -0,0 +1,3 @@
k8s
k8s_info
time=13

View File

@@ -0,0 +1,3 @@
k8s_json_patch
k8s
time=33

View File

@@ -2,6 +2,7 @@
namespace: json-patch
pod: json-patch
deployment: json-patch
k8s_wait_timeout: 240
block:
- name: Ensure namespace exists
@@ -28,6 +29,7 @@
- -c
- while true; do echo $(date); sleep 10; done
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
- name: Add a label and replace the image in checkmode
kubernetes.core.k8s_json_patch:
@@ -118,6 +120,7 @@
- name: Create a simple deployment
kubernetes.core.k8s:
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment
@@ -154,6 +157,7 @@
path: /spec/replicas
value: 3
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: result
- name: Assert all replicas are available

View File

@@ -0,0 +1,3 @@
k8s_info
k8s
time=22

View File

@@ -0,0 +1,2 @@
k8s_log
time=27

View File

@@ -1,5 +1,8 @@
---
- block:
- set_fact:
k8s_wait_timeout: 240
- name: ensure that k8s-log namespace exists
k8s:
kind: Namespace
@@ -8,6 +11,7 @@
- name: create hello-world deployment
k8s:
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment

View File

@@ -0,0 +1,3 @@
time=19
k8s
k8s_info

View File

@@ -0,0 +1,3 @@
time=20
k8s
k8s_info

View File

@@ -0,0 +1,4 @@
k8s_rollback
k8s
k8s_info
time=187

View File

@@ -3,6 +3,7 @@
- name: Set variables
set_fact:
namespace: "testingrollback"
k8s_wait_timeout: 240
- name: Create a namespace
k8s:
@@ -20,6 +21,7 @@
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
inline: &deploy
apiVersion: apps/v1
kind: Deployment
@@ -53,6 +55,7 @@
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: Deployment
@@ -96,6 +99,7 @@
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: DaemonSet
@@ -149,6 +153,7 @@
k8s:
state: present
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
definition:
apiVersion: apps/v1
kind: DaemonSet

View File

@@ -0,0 +1,4 @@
k8s_scale
k8s
k8s_info
time=150

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 400

View File

@@ -26,7 +26,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 60
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
@@ -53,6 +53,7 @@
namespace: "{{ scale_namespace }}"
replicas: 0
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_down
- name: Get pods in scale-deploy
@@ -90,7 +91,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 60
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
apply: yes
vars:
k8s_pod_name: scale-deploy
@@ -125,7 +126,7 @@
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: 60
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_up
- name: Get pods in scale-deploy
@@ -154,6 +155,7 @@
namespace: "{{ scale_namespace }}"
replicas: 2
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_up_noop
- name: Get pods in scale-deploy
@@ -227,6 +229,8 @@
resource_version: 0
label_selectors:
- app=nginx
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_out
- assert:
@@ -257,6 +261,8 @@
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: scale_out
- assert:

View File

@@ -0,0 +1,4 @@
k8s_service
k8s
k8s_info
time=75

View File

@@ -2,6 +2,7 @@
- block:
- set_fact:
template_namespace: template-test
k8s_wait_timeout: 240
- name: Ensure namespace exists
k8s:
@@ -72,6 +73,7 @@
kubernetes.core.k8s:
template: "pod_template_one.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-1
k8s_pod_namespace: "{{ template_namespace }}"
@@ -103,6 +105,7 @@
template:
path: "pod_template_one.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-3
k8s_pod_namespace: "{{ template_namespace }}"
@@ -120,6 +123,7 @@
variable_start_string: '[['
variable_end_string: ']]'
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_two: pod-4
k8s_pod_namespace: "[[ template_namespace ]]"
@@ -136,6 +140,7 @@
template:
path: "pod_template_three.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_three_one: pod-5
k8s_pod_name_three_two: pod-6
@@ -156,6 +161,7 @@
variable_end_string: ']]'
- path: "pod_template_three.j2"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name_one: pod-7
k8s_pod_name_two: pod-8
@@ -224,6 +230,7 @@
- pod_template_one.j2
continue_on_error: true
wait: true
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
register: resource
ignore_errors: true
@@ -239,17 +246,6 @@
- resource.result.results | selectattr('changed') | list | length == 1
- resource.result.results | selectattr('error', 'defined') | list | length == 1
- name: Remove Pod (Cleanup)
kubernetes.core.k8s:
api_version: v1
kind: Pod
name: "pod-{{ item }}"
namespace: "{{ template_namespace }}"
state: absent
wait: yes
ignore_errors: yes
loop: "{{ range(1, 12) | list }}"
always:
- name: Remove namespace (Cleanup)
kubernetes.core.k8s:

View File

@@ -0,0 +1,5 @@
# duration 10min
slow
time=504
k8s
k8s_info

View File

@@ -0,0 +1,40 @@
---
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_wait_timeout: 240

View File

@@ -21,6 +21,7 @@
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
@@ -49,8 +50,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 5
wait_timeout: 180
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
@@ -81,7 +81,7 @@
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
@@ -114,7 +114,7 @@
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
@@ -203,6 +203,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
@@ -233,6 +234,7 @@
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
@@ -279,6 +281,7 @@
paused: True
apply: no
wait: yes
wait_timeout: "{{ k8s_wait_timeout | default(omit) }}"
wait_condition:
type: Progressing
status: Unknown

View File

@@ -1,19 +0,0 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd tests/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

Some files were not shown because too many files have changed in this diff Show More