Update CI - Continue work from #195 (#202)

* Upgrade Ansible and OKD versions for CI

* Use ubi9 and fix sanity

* Use correct pip install

* Try using quotes

* Ensure python3.9

* Upgrade ansible and molecule versions

* Remove DeploymentConfig

DeploymentConfigs are deprecated and seem to now be causing idempotence
problems. Replacing them with Deployments fixes it.

* Attempt to fix ldap integration tests

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Move sanity and unit tests to GH actions

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Firt round of sanity fixes

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add kubernetes.core collection as sanity requirement

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add ignore-2.16.txt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Attempt to fix units

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add ignore-2.17

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Attempt to fix unit tests

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add pytest-ansible to test-requirements.txt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add changelog fragment

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add workflow for ansible-lint

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Apply black

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Fix linters

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add # fmt: skip

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Yet another round of linting

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Yet another round of linting

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Remove setup.cfg

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Revert #fmt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Use ansible-core 2.14

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Cleanup ansible-lint ignores

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Try using service instead of pod IP

* Fix typo

* Actually use the correct port

* See if NetworkPolicy is preventing connection

* using Pod internal IP

* fix adm prune auth roles syntax

* adding some retry steps

* fix: openshift_builds target

* add flag --force-with-deps when building downstream collection

* Remove yamllint from tox linters, bump minimum python supported version to 3.9, Remove support for ansible-core < 2.14

---------

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>
Co-authored-by: Mike Graves <mgraves@redhat.com>
Co-authored-by: Alina Buzachis <abuzachis@redhat.com>
This commit is contained in:
Bikouo Aubin
2023-11-15 18:00:38 +01:00
committed by GitHub
parent cb796e1298
commit a63e5b7b36
76 changed files with 4364 additions and 3510 deletions

View File

@@ -21,16 +21,13 @@
debug:
var: output
- name: Create deployment config
- name: Create deployment
community.okd.k8s:
state: present
name: hello-world
namespace: testing
definition: '{{ okd_dc_template }}'
wait: yes
wait_condition:
type: Available
status: True
vars:
k8s_pod_name: hello-world
k8s_pod_image: python
@@ -71,19 +68,12 @@
namespace: '{{ namespace }}'
definition: '{{ okd_imagestream_template }}'
- name: Create DeploymentConfig to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ okd_dc_template }}'
vars:
k8s_pod_name: is-idempotent-dc
- name: Create Deployment to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ k8s_deployment_template | combine(metadata) }}'
wait: true
vars:
k8s_pod_annotations:
"alpha.image.policy.openshift.io/resolve-names": "*"

View File

@@ -10,14 +10,14 @@ objects:
name: "Pod-${{ NAME }}"
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
image: python:3.7-alpine
imagePullPolicy: Always
name: python
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
image: python:3.7-alpine
imagePullPolicy: Always
name: python
parameters:
- name: NAME
- name: NAME
description: trailing name of the pod
required: true

View File

@@ -13,22 +13,22 @@ metadata:
tags: quickstart,examples
name: simple-example
objects:
- apiVersion: v1
kind: ConfigMap
metadata:
annotations:
description: Big example
name: ${NAME}
data:
content: "${CONTENT}"
- apiVersion: v1
kind: ConfigMap
metadata:
annotations:
description: Big example
name: ${NAME}
data:
content: "${CONTENT}"
parameters:
- description: The name assigned to the ConfigMap
displayName: Name
name: NAME
required: true
value: example
- description: The value for the content key of the configmap
displayName: Content
name: CONTENT
required: true
value: ''
- description: The name assigned to the ConfigMap
displayName: Name
name: NAME
required: true
value: example
- description: The value for the content key of the configmap
displayName: Content
name: CONTENT
required: true
value: ''

View File

@@ -4,7 +4,7 @@ dependency:
options:
requirements-file: requirements.yml
driver:
name: delegated
name: default
platforms:
- name: cluster
groups:
@@ -17,9 +17,6 @@ provisioner:
config_options:
inventory:
enable_plugins: community.okd.openshift
lint: |
set -e
ansible-lint
inventory:
hosts:
plugin: community.okd.openshift
@@ -34,14 +31,10 @@ provisioner:
ANSIBLE_COLLECTIONS_PATHS: ${OVERRIDE_COLLECTION_PATH:-$MOLECULE_PROJECT_DIRECTORY}
verifier:
name: ansible
lint: |
set -e
ansible-lint
scenario:
name: default
test_sequence:
- dependency
- lint
- syntax
- prepare
- converge

View File

@@ -37,12 +37,12 @@
name: cluster
spec:
identityProviders:
- name: htpasswd_provider
mappingMethod: claim
type: HTPasswd
htpasswd:
fileData:
name: htpass-secret
- name: htpasswd_provider
mappingMethod: claim
type: HTPasswd
htpasswd:
fileData:
name: htpass-secret
- name: Create ClusterRoleBinding for test user
community.okd.k8s:

View File

@@ -89,6 +89,7 @@ def execute():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
connection = ldap.initialize(module.params['server_uri'])
connection.set_option(ldap.OPT_REFERRALS, 0)
try:
connection.simple_bind_s(module.params['bind_dn'], module.params['bind_pw'])
except ldap.LDAPError as e:

View File

@@ -1,227 +1,227 @@
---
- block:
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'ad/definition.j2') | from_yaml }}"
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'ad/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- developers
- name: Delete existing LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.units | reverse | list }}"
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.users }}"
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'ad/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ admins_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 2
- devs_group.users | length == 1
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Read developers group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Define user dn to delete
set_fact:
user_to_delete: "cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Synchronize Openshift groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
allow_groups:
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- developers
type: openshift
register: openshift_sync
- name: Validate that only developers group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "developers"
- name: Delete existing LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.units | reverse | list }}"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.users }}"
- name: Validate admins group content has not changed
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'ad/sync-config.j2') | from_yaml }}"
- name: Synchronize Openshift groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
deny_groups:
- developers
type: openshift
register: openshift_sync
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Validate that only admins group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "admins"
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ admins_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 2
- devs_group.users | length == 1
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Validate admins group contains only 1 user now
assert:
that:
- result.resources | length == 1
- result.resources.0.users == ["jim.adams@ansible.org"]
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Set users to delete (delete all developers users)
set_fact:
user_to_delete: "cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Read developers group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate result is changed (only developers group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Define user dn to delete
set_fact:
user_to_delete: "cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Synchronize Openshift groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
allow_groups:
- developers
type: openshift
register: openshift_sync
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Validate that only developers group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "developers"
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Assert nothing was changed
assert:
that:
- result is not changed
- name: Validate admins group content has not changed
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Synchronize Openshift groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
deny_groups:
- developers
type: openshift
register: openshift_sync
- name: Validate that only admins group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "admins"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group contains only 1 user now
assert:
that:
- result.resources | length == 1
- result.resources.0.users == ["jim.adams@ansible.org"]
- name: Set users to delete (delete all developers users)
set_fact:
user_to_delete: "cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Validate result is changed (only developers group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Assert nothing was changed
assert:
that:
- result is not changed
always:
- name: Delete openshift groups if existing

View File

@@ -1,166 +1,165 @@
---
- block:
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'augmented-ad/definition.j2') | from_yaml }}"
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'augmented-ad/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- banking
- insurance
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- banking
- insurance
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.groups + ldap_entries.units | reverse | list }}"
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.groups + ldap_entries.units | reverse | list }}"
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.groups + ldap_entries.users }}"
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.groups + ldap_entries.users }}"
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'augmented-ad/sync-config.j2') | from_yaml }}"
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'augmented-ad/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Validate that 'banking' and 'insurance' groups were created
assert:
that:
- result is changed
- banking_group
- insurance_group
- '"james-allan@ansible.org" in {{ banking_group.users }}'
- '"gordon-kane@ansible.org" in {{ banking_group.users }}'
- '"alice-courtney@ansible.org" in {{ insurance_group.users }}'
- banking_group.users | length == 2
- insurance_group.users | length == 1
vars:
banking_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'banking') | first }}"
insurance_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'insurance') | first }}"
- name: Validate that 'banking' and 'insurance' groups were created
assert:
that:
- result is changed
- banking_group
- insurance_group
- '"james-allan@ansible.org" in {{ banking_group.users }}'
- '"gordon-kane@ansible.org" in {{ banking_group.users }}'
- '"alice-courtney@ansible.org" in {{ insurance_group.users }}'
- banking_group.users | length == 2
- insurance_group.users | length == 1
vars:
banking_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'banking') | first }}"
insurance_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'insurance') | first }}"
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Define facts for group to create
set_fact:
ldap_groups:
- name: banking
users:
- "james-allan@ansible.org"
- "gordon-kane@ansible.org"
- name: insurance
users:
- "alice-courtney@ansible.org"
- name: Define facts for group to create
set_fact:
ldap_groups:
- name: banking
users:
- "james-allan@ansible.org"
- "gordon-kane@ansible.org"
- name: insurance
users:
- "alice-courtney@ansible.org"
- name: Read 'banking' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: Validate group info
assert:
that:
- result.resources | length == 1
- '"james-allan@ansible.org" in {{ result.resources.0.users }}'
- '"gordon-kane@ansible.org" in {{ result.resources.0.users }}'
- name: Read 'banking' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: Read 'insurance' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: Validate group info
assert:
that:
- result.resources | length == 1
- '"james-allan@ansible.org" in {{ result.resources.0.users }}'
- '"gordon-kane@ansible.org" in {{ result.resources.0.users }}'
- name: Validate group info
assert:
that:
- result.resources | length == 1
- 'result.resources.0.users == ["alice-courtney@ansible.org"]'
- name: Read 'insurance' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: Delete employee from 'insurance' group
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}"
state: absent
- name: Validate group info
assert:
that:
- result.resources | length == 1
- 'result.resources.0.users == ["alice-courtney@ansible.org"]'
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Delete employee from 'insurance' group
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}"
state: absent
- name: Validate result is changed (only insurance group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Get 'insurance' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: Validate result is changed (only insurance group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get 'insurance' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: Get 'banking' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Get 'banking' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Assert no change was made
assert:
that:
- result is not changed
- name: Assert no change was made
assert:
that:
- result is not changed
always:
- name: Delete openshift groups if existing

View File

@@ -1,5 +1,5 @@
---
- name: Get cluster information
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: info
@@ -16,30 +16,29 @@
app: ldap
spec:
containers:
- name: ldap
image: bitnami/openldap
env:
- name: LDAP_ADMIN_USERNAME
value: "{{ ldap_admin_user }}"
- name: LDAP_ADMIN_PASSWORD
value: "{{ ldap_admin_password }}"
- name: LDAP_USERS
value: "ansible"
- name: LDAP_PASSWORDS
value: "ansible123"
- name: LDAP_ROOT
value: "{{ ldap_root }}"
ports:
- containerPort: 1389
- name: ldap
image: bitnami/openldap
env:
- name: LDAP_ADMIN_USERNAME
value: "{{ ldap_admin_user }}"
- name: LDAP_ADMIN_PASSWORD
value: "{{ ldap_admin_password }}"
- name: LDAP_USERS
value: "ansible"
- name: LDAP_PASSWORDS
value: "ansible123"
- name: LDAP_ROOT
value: "{{ ldap_root }}"
ports:
- containerPort: 1389
name: ldap-server
register: pod_info
- name: Set Pod Internal IP
set_fact:
podIp: "{{ pod_info.result.status.podIP }}"
- name: Set LDAP Common facts
set_fact:
ldap_server_uri: "ldap://{{ podIp }}:1389"
# we can use the Pod IP directly because the integration are running inside a Pod in the
# same openshift cluster
ldap_server_uri: "ldap://{{ pod_info.result.status.podIP }}:1389"
ldap_bind_dn: "cn={{ ldap_admin_user }},{{ ldap_root }}"
ldap_bind_pw: "{{ ldap_admin_password }}"
@@ -53,8 +52,10 @@
bind_pw: "{{ ldap_bind_pw }}"
dn: "ou=users,{{ ldap_root }}"
server_uri: "{{ ldap_server_uri }}"
# ignore_errors: true
# register: ping_ldap
register: test_ldap
retries: 10
delay: 5
until: test_ldap is not failed
- include_tasks: "tasks/python-ldap-not-installed.yml"
- include_tasks: "tasks/rfc2307.yml"

View File

@@ -1,3 +1,4 @@
---
- block:
- name: Create temp directory
tempfile:

View File

@@ -1,459 +1,460 @@
---
- block:
- name: Get LDAP definition
set_fact:
ldap_resources: "{{ lookup('template', 'rfc2307/definition.j2') | from_yaml }}"
- name: Get LDAP definition
set_fact:
ldap_resources: "{{ lookup('template', 'rfc2307/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- engineers
- developers
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_resources.users + ldap_resources.groups + ldap_resources.units | reverse | list }}"
- name: Create LDAP units
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.units }}"
- name: Create LDAP Groups
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.groups }}"
- name: Create LDAP users
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.users }}"
- name: Load test configurations
set_fact:
configs: "{{ lookup('template', 'rfc2307/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Synchronize Groups - User defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.user_defined }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-devs') | first }}"
- name: Synchronize Groups - Using dn for every query
community.okd.openshift_adm_groups_sync:
config: "{{ configs.dn_everywhere }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ admins_group.users }}'
- '"cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- '"cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=admins,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=developers,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.partially_user_defined }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Delete Group 'engineers' if created before
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: 'engineers'
wait: yes
ignore_errors: yes
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.out_scope }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Assert group sync failed due to non-existent member
assert:
that:
- result is failed
- result.msg.startswith("Entry not found for base='cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}'")
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_out_of_scope_tolerate_not_found: "{{ configs.out_scope | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.out_scope.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize Groups - Partially user defined mapping (tolerateMemberNotFoundErrors=true)
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
- name: Assert group sync did not fail (tolerateMemberNotFoundErrors=true)
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == 'engineers'
- result.groups.0.users == ['Abraham']
- name: Create Group 'engineers'
community.okd.k8s:
state: present
wait: yes
definition:
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
apiVersion: "user.openshift.io/v1"
metadata:
name: engineers
users: []
- name: Try to sync LDAP group with Openshift existing group not created using sync should failed
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Validate group sync failed
assert:
that:
- result is failed
- '"openshift.io/ldap.host label did not match sync host" in result.msg'
- name: Define allow_groups and deny_groups groups
set_fact:
allow_groups:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
deny_groups:
- "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Synchronize Groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
allow_groups: "{{ allow_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize Groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
deny_groups: "{{ deny_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize groups, remove check_mode
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set users to delete (no admins users anymore and only 1 developer kept)
set_fact:
users_to_delete:
- "cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}"
- "cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}"
- name: Delete users from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ users_to_delete }}"
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_simple_tolerate_not_found: "{{ configs.simple | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.simple.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize groups once again after users deletion
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group does not contains users anymore
assert:
that:
- result.resources | length == 1
- result.resources.0.users == []
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set group to delete
set_fact:
groups_to_delete:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Delete Group from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ groups_to_delete }}"
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
check_mode: yes
- name: Validate that only developers group is candidate for Prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Read Group (validate that check_mode did not performed update in the cluster)
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Assert group was found
assert:
that:
- result.resources | length == 1
- name: Prune using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
allow_groups:
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- engineers
- developers
state: absent
register: result
check_mode: yes
- name: assert developers group was candidate for prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_resources.users + ldap_resources.groups + ldap_resources.units | reverse | list }}"
- name: Prune using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
deny_groups:
- developers
state: absent
register: result
check_mode: yes
- name: Create LDAP units
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.units }}"
- name: assert nothing found candidate for prune
assert:
that:
- result is not changed
- result.groups | length == 0
- name: Create LDAP Groups
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.groups }}"
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Create LDAP users
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.users }}"
- name: Validate result is changed
assert:
that:
- result is changed
- result.groups | length == 1
- name: Load test configurations
set_fact:
configs: "{{ lookup('template', 'rfc2307/sync-config.j2') | from_yaml }}"
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
check_mode: yes
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Synchronize Groups - User defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.user_defined }}"
check_mode: yes
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-devs') | first }}"
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Synchronize Groups - Using dn for every query
community.okd.openshift_adm_groups_sync:
config: "{{ configs.dn_everywhere }}"
check_mode: yes
register: result
- name: Assert nothing changed
assert:
that:
- result is not changed
- result.groups | length == 0
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ admins_group.users }}'
- '"cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- '"cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=admins,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=developers,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.partially_user_defined }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Delete Group 'engineers' if created before
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: 'engineers'
wait: yes
ignore_errors: yes
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.out_scope }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Assert group sync failed due to non-existent member
assert:
that:
- result is failed
- result.msg.startswith("Entry not found for base='cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}'")
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_out_of_scope_tolerate_not_found: "{{ configs.out_scope | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.out_scope.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize Groups - Partially user defined mapping (tolerateMemberNotFoundErrors=true)
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
- name: Assert group sync did not fail (tolerateMemberNotFoundErrors=true)
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == 'engineers'
- result.groups.0.users == ['Abraham']
- name: Create Group 'engineers'
community.okd.k8s:
state: present
wait: yes
definition:
kind: Group
apiVersion: "user.openshift.io/v1"
metadata:
name: engineers
users: []
- name: Try to sync LDAP group with Openshift existing group not created using sync should failed
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Validate group sync failed
assert:
that:
- result is failed
- '"openshift.io/ldap.host label did not match sync host" in result.msg'
- name: Define allow_groups and deny_groups groups
set_fact:
allow_groups:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
deny_groups:
- "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Synchronize Groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
allow_groups: "{{ allow_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize Groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
deny_groups: "{{ deny_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize groups, remove check_mode
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set users to delete (no admins users anymore and only 1 developer kept)
set_fact:
users_to_delete:
- "cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}"
- "cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}"
- name: Delete users from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ users_to_delete }}"
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_simple_tolerate_not_found: "{{ configs.simple | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.simple.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize groups once again after users deletion
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group does not contains users anymore
assert:
that:
- result.resources | length == 1
- result.resources.0.users == []
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set group to delete
set_fact:
groups_to_delete:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Delete Group from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ groups_to_delete }}"
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
check_mode: yes
- name: Validate that only developers group is candidate for Prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Read Group (validate that check_mode did not performed update in the cluster)
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Assert group was found
assert:
that:
- result.resources | length == 1
- name: Prune using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
allow_groups:
- developers
state: absent
register: result
check_mode: yes
- name: assert developers group was candidate for prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Prune using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
deny_groups:
- developers
state: absent
register: result
check_mode: yes
- name: assert nothing found candidate for prune
assert:
that:
- result is not changed
- result.groups | length == 0
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- result.groups | length == 1
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Assert nothing changed
assert:
that:
- result is not changed
- result.groups | length == 0
always:
- name: Delete openshift groups if existing

View File

@@ -1,293 +1,294 @@
---
- block:
- set_fact:
test_sa: "clusterrole-sa"
test_ns: "clusterrole-ns"
- set_fact:
test_sa: "clusterrole-sa"
test_ns: "clusterrole-ns"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ test_ns }}"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ test_ns }}"
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
metadata:
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
namespace: "{{ test_ns }}"
name: "{{ test_sa }}"
register: result
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: "{{ test_ns }}"
name: "{{ test_sa }}"
register: result
- set_fact:
secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}"
- set_fact:
secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: _secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']")
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: _secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']")
- set_fact:
api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in _secret.resources[0]['data']"
- set_fact:
api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in _secret.resources[0]['data']"
- name: list Node should failed (forbidden user)
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
register: error
ignore_errors: true
- name: list Node should failed (forbidden user)
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
register: error
ignore_errors: true
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- set_fact:
test_labels:
phase: dev
cluster_roles:
- name: pod-manager
resources:
- pods
verbs:
- list
api_version_binding: "authorization.openshift.io/v1"
- name: node-manager
resources:
- nodes
verbs:
- list
api_version_binding: "rbac.authorization.k8s.io/v1"
- set_fact:
test_labels:
phase: dev
cluster_roles:
- name: pod-manager
resources:
- pods
verbs:
- list
api_version_binding: "authorization.openshift.io/v1"
- name: node-manager
resources:
- nodes
verbs:
- list
api_version_binding: "rbac.authorization.k8s.io/v1"
- name: Create cluster roles
kubernetes.core.k8s:
definition:
kind: ClusterRole
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ item.name }}"
labels: "{{ test_labels }}"
rules:
- apiGroups: [""]
resources: "{{ item.resources }}"
verbs: "{{ item.verbs }}"
with_items: '{{ cluster_roles }}'
- name: Create Role Binding (namespaced)
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ cluster_roles[0].name }}-binding"
namespace: "{{ test_ns }}"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
- name: Create cluster roles
kubernetes.core.k8s:
definition:
kind: ClusterRole
name: "{{ cluster_roles[0].name }}"
apiGroup: ""
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ item.name }}"
labels: "{{ test_labels }}"
rules:
- apiGroups: [""]
resources: "{{ item.resources }}"
verbs: "{{ item.verbs }}"
with_items: '{{ cluster_roles }}'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- name: Create Role Binding (namespaced)
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ cluster_roles[0].name }}-binding"
namespace: "{{ test_ns }}"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ cluster_roles[0].name }}"
apiGroup: ""
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: Create Cluster role Binding
kubernetes.core.k8s:
definition:
kind: ClusterRoleBinding
apiVersion: "{{ item.api_version_binding }}"
metadata:
name: "{{ item.name }}-binding"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ item.name }}"
apiGroup: ""
with_items: "{{ cluster_roles }}"
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: list Pod for all namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
no_log: true
- name: Create Cluster role Binding
kubernetes.core.k8s:
definition:
kind: ClusterRoleBinding
apiVersion: "{{ item.api_version_binding }}"
metadata:
name: "{{ item.name }}-binding"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ item.name }}"
apiGroup: ""
with_items: "{{ cluster_roles }}"
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: list Pod for all namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
no_log: true
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: Prune clusterroles (check mode)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
register: check
check_mode: true
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: validate clusterrole binding candidates for prune
assert:
that:
- '"{{ item.name }}-binding" in check.cluster_role_binding'
- '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding'
with_items: "{{ cluster_roles }}"
- name: Prune clusterroles (check mode)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
register: check
check_mode: true
- name: Prune Cluster Role for managing Pod
community.okd.openshift_adm_prune_auth:
resource: clusterroles
name: "{{ cluster_roles[0].name }}"
- name: validate clusterrole binding candidates for prune
assert:
that:
- '"{{ item.name }}-binding" in check.cluster_role_binding'
- '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding'
with_items: "{{ cluster_roles }}"
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
no_log: true
ignore_errors: true
- name: Prune Cluster Role for managing Pod
community.okd.openshift_adm_prune_auth:
resource: clusterroles
name: "{{ cluster_roles[0].name }}"
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
no_log: true
ignore_errors: true
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
no_log: true
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
no_log: true
ignore_errors: true
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: Prune clusterroles (remaining)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: list Node using ServiceAccount should fail
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- name: Prune clusterroles (remaining)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
- name: list Node using ServiceAccount should fail
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
always:
- name: Ensure namespace is deleted

View File

@@ -1,335 +1,336 @@
---
- block:
- set_fact:
test_ns: "prune-roles"
sa_name: "roles-sa"
pod_name: "pod-prune"
role_definition:
- name: pod-list
labels:
action: list
verbs:
- list
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: pod-create
labels:
action: create
verbs:
- create
- get
role_binding:
api_version: authorization.openshift.io/v1
- name: pod-delete
labels:
action: delete
verbs:
- delete
role_binding:
api_version: rbac.authorization.k8s.io/v1
- set_fact:
test_ns: "prune-roles"
sa_name: "roles-sa"
pod_name: "pod-prune"
role_definition:
- name: pod-list
labels:
action: list
verbs:
- list
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: pod-create
labels:
action: create
verbs:
- create
- get
role_binding:
api_version: authorization.openshift.io/v1
- name: pod-delete
labels:
action: delete
verbs:
- delete
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: '{{ sa_name }}'
namespace: '{{ test_ns }}'
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
metadata:
name: '{{ sa_name }}'
namespace: '{{ test_ns }}'
namespace: '{{ test_ns }}'
name: '{{ sa_name }}'
register: sa_out
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: '{{ test_ns }}'
name: '{{ sa_name }}'
register: sa_out
- set_fact:
secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}"
- set_fact:
secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: r_secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']")
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: r_secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']")
- set_fact:
api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in r_secret.resources[0]['data']"
- set_fact:
api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in r_secret.resources[0]['data']"
- name: list resources using service account
kubernetes.core.k8s_info:
api_key: '{{ api_token }}'
host: '{{ cluster_host }}'
validate_certs: no
kind: Pod
namespace: '{{ test_ns }}'
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list resources using service account
kubernetes.core.k8s_info:
api_key: '{{ api_token }}'
host: '{{ cluster_host }}'
validate_certs: no
kind: Pod
namespace: '{{ test_ns }}'
register: error
ignore_errors: true
- name: Create a role to manage Pod from namespace "{{ test_ns }}"
kubernetes.core.k8s:
definition:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: "{{ test_ns }}"
name: "{{ item.name }}"
labels: "{{ item.labels }}"
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: "{{ item.verbs }}"
with_items: "{{ role_definition }}"
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: Create Role Binding
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "{{ item.role_binding.api_version }}"
metadata:
name: "{{ item.name }}-bind"
namespace: "{{ test_ns }}"
subjects:
- kind: ServiceAccount
name: "{{ sa_name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
- name: Create a role to manage Pod from namespace "{{ test_ns }}"
kubernetes.core.k8s:
definition:
kind: Role
name: "{{ item.name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
with_items: "{{ role_definition }}"
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: "{{ test_ns }}"
name: "{{ item.name }}"
labels: "{{ item.labels }}"
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: "{{ item.verbs }}"
with_items: "{{ role_definition }}"
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
- name: Create Role Binding
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "{{ item.role_binding.api_version }}"
metadata:
name: "{{ item.name }}-bind"
namespace: "{{ test_ns }}"
subjects:
- kind: ServiceAccount
name: "{{ sa_name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: Role
name: "{{ item.name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
with_items: "{{ role_definition }}"
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert pod creation succeed
assert:
that:
- result is successful
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
metadata:
name: "{{ pod_name }}"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
register: result
- name: assert pod creation succeed
assert:
that:
- result is successful
- name: assert user is still authorize to list pods
assert:
that:
- result is successful
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: Prune auth roles (check mode)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: check
check_mode: true
- name: assert user is still authorize to list pods
assert:
that:
- result is successful
- name: validate that list role binding are candidates for prune
assert:
that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding'
with_items: "{{ role_definition }}"
- name: Prune auth roles (check mode)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: check
check_mode: true
- name: Prune resource using label_selectors option
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
label_selectors:
- action=delete
register: prune
- name: validate that list role binding are candidates for prune
assert:
that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding'
with_items: "{{ role_definition }}"
- name: assert that role binding 'delete' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding'
- name: Prune resource using label_selectors option
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
label_selectors:
- action=delete
register: prune
- name: assert that role binding 'delete' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding'
- name: assert that user could not delete pod anymore
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
state: absent
namespace: "{{ test_ns }}"
kind: Pod
name: "{{ pod_name }}"
register: result
ignore_errors: true
- name: assert pod deletion failed due to forbidden user
assert:
that:
- '"forbidden: User" in error.msg'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
- name: assert that user could not delete pod anymore
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
state: absent
namespace: "{{ test_ns }}"
kind: Pod
metadata:
name: "{{ pod_name }}-1"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
name: "{{ pod_name }}"
register: result
ignore_errors: true
- name: assert user is still authorize to create pod
assert:
that:
- result is successful
- name: assert pod deletion failed due to forbidden user
assert:
that:
- '"forbidden: User" in error.msg'
- name: Prune role using name
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
name: "{{ role_definition[1].name }}"
register: prune
- name: assert that role binding 'create' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding'
- name: Create Pod (should failed)
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
metadata:
name: "{{ pod_name }}-2"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
ignore_errors: true
register: result
- name: assert user is not authorize to create pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-1"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: assert user is still authorize to create pod
assert:
that:
- result is successful
- name: Prune all role for namespace (neither name nor label_selectors are specified)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: prune
- name: Prune role using name
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
name: "{{ role_definition[1].name }}"
register: prune
- name: assert that role binding 'list' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding'
- name: assert that role binding 'create' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
ignore_errors: true
- name: Create Pod (should failed)
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-2"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
ignore_errors: true
- name: assert user is not authorize to list pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
- name: assert user is not authorize to create pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Prune all role for namespace (neither name nor label_selectors are specified)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: prune
- name: assert that role binding 'list' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
ignore_errors: true
- name: assert user is not authorize to list pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
always:
- name: Ensure namespace is deleted

View File

@@ -1,255 +1,255 @@
---
- name: Prune deployments
block:
- set_fact:
dc_name: "hello"
deployment_ns: "prune-deployments"
deployment_ns_2: "prune-deployments-2"
- set_fact:
dc_name: "hello"
deployment_ns: "prune-deployments"
deployment_ns_2: "prune-deployments-2"
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns }}'
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns }}'
- name: Create deployment config
community.okd.k8s:
namespace: '{{ deployment_ns }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}'
spec:
replicas: 1
selector:
- name: Create deployment config
community.okd.k8s:
namespace: '{{ deployment_ns }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}'
template:
metadata:
labels:
name: '{{ dc_name }}'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
spec:
replicas: 1
selector:
name: '{{ dc_name }}'
template:
metadata:
labels:
name: '{{ dc_name }}'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: prune deployments (no candidate DeploymentConfig)
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
register: test_prune
- name: prune deployments (no candidate DeploymentConfig)
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
register: test_prune
- assert:
that:
- test_prune is not changed
- test_prune.replication_controllers | length == 0
- assert:
that:
- test_prune is not changed
- test_prune.replication_controllers | length == 0
- name: Update DeploymentConfig - set replicas to 0
community.okd.k8s:
namespace: "{{ deployment_ns }}"
definition:
kind: DeploymentConfig
apiVersion: "apps.openshift.io/v1"
metadata:
name: "{{ dc_name }}"
spec:
replicas: 0
selector:
- name: Update DeploymentConfig - set replicas to 0
community.okd.k8s:
namespace: "{{ deployment_ns }}"
definition:
kind: DeploymentConfig
apiVersion: "apps.openshift.io/v1"
metadata:
name: "{{ dc_name }}"
template:
metadata:
labels:
name: "{{ dc_name }}"
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
spec:
replicas: 0
selector:
name: "{{ dc_name }}"
template:
metadata:
labels:
name: "{{ dc_name }}"
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
- name: Prune deployments - should delete 1 ReplicationController
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
check_mode: yes
register: test_prune
- name: Prune deployments - should delete 1 ReplicationController
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
check_mode: yes
register: test_prune
- name: Read ReplicationController
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- name: Read ReplicationController
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- name: Assert that Replication controller was not deleted
assert:
that:
- replications.resources | length == 1
- 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")'
- name: Assert that Replication controller was not deleted
assert:
that:
- replications.resources | length == 1
- 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")'
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name
- test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name
- test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace
- name: Prune deployments - keep younger than 45min (check_mode)
community.okd.openshift_adm_prune_deployments:
keep_younger_than: 45
namespace: "{{ deployment_ns }}"
check_mode: true
register: keep_younger
- name: Prune deployments - keep younger than 45min (check_mode)
community.okd.openshift_adm_prune_deployments:
keep_younger_than: 45
namespace: "{{ deployment_ns }}"
check_mode: true
register: keep_younger
- name: assert no candidate was found
assert:
that:
- keep_younger is not changed
- keep_younger.replication_controllers == []
- name: assert no candidate was found
assert:
that:
- keep_younger is not changed
- keep_younger.replication_controllers == []
- name: Ensure second namespace is created
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns_2 }}'
- name: Ensure second namespace is created
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns_2 }}'
- name: Create deployment config from 2nd namespace
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
spec:
replicas: 1
selector:
- name: Create deployment config from 2nd namespace
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
spec:
replicas: 1
selector:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Stop deployment config - replicas = 0
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
spec:
replicas: 0
selector:
- name: Stop deployment config - replicas = 0
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
spec:
replicas: 0
selector:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
# Prune from one namespace should not have any effect on others namespaces
- name: Prune deployments from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
check_mode: yes
register: test_prune
# Prune from one namespace should not have any effect on others namespaces
- name: Prune deployments from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
check_mode: yes
register: test_prune
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2"
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2"
# Prune without namespace option
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
check_mode: yes
register: no_namespace_prune
# Prune without namespace option
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
check_mode: yes
register: no_namespace_prune
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- no_namespace_prune is changed
- no_namespace_prune.replication_controllers | length == 2
# Execute Prune from 2nd namespace
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: replications
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- no_namespace_prune is changed
- no_namespace_prune.replication_controllers | length == 2
- assert:
that:
- replications.resources | length == 1
# Execute Prune from 2nd namespace
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: replications
- name: Prune DeploymentConfig from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
register: _prune
- assert:
that:
- replications.resources | length == 1
- name: Assert DeploymentConfig was deleted
assert:
that:
- _prune is changed
- _prune.replication_controllers | length == 1
- _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name
- name: Prune DeploymentConfig from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
register: _prune
# Execute Prune without namespace option
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- name: Assert DeploymentConfig was deleted
assert:
that:
- _prune is changed
- _prune.replication_controllers | length == 1
- _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name
- assert:
that:
- replications.resources | length == 1
# Execute Prune without namespace option
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
register: _prune
- assert:
that:
- replications.resources | length == 1
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- _prune is changed
- _prune.replication_controllers | length > 0
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
register: _prune
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- _prune is changed
- _prune.replication_controllers | length > 0
always:
- name: Delete 1st namespace

View File

@@ -1,240 +1,245 @@
---
- block:
- set_fact:
build_ns: "builds"
build_config: "start-build"
is_name: "ruby"
prune_build: "prune-build"
- set_fact:
build_ns: "builds"
build_config: "start-build"
is_name: "ruby"
prune_build: "prune-build"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ build_ns }}"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ build_ns }}"
- name: Create ImageStream
community.okd.k8s:
namespace: "{{ build_ns }}"
definition:
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: "{{ is_name }}"
spec:
lookupPolicy:
local: false
tags: []
- name: Create ImageStream
community.okd.k8s:
namespace: "{{ build_ns }}"
definition:
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: "{{ is_name }}"
spec:
lookupPolicy:
local: false
tags: []
- name: Create build configuration
community.okd.k8s:
namespace: "{{ build_ns }}"
definition:
kind: BuildConfig
apiVersion: build.openshift.io/v1
metadata:
name: "{{ build_config }}"
spec:
source:
dockerfile: |
FROM openshift/ruby-22-centos7
RUN sleep 60s
USER ansible
strategy:
type: Docker
output:
to:
kind: "ImageStreamTag"
name: "{{ is_name }}:latest"
- name: Create build configuration
community.okd.k8s:
namespace: "{{ build_ns }}"
definition:
kind: BuildConfig
apiVersion: build.openshift.io/v1
metadata:
name: "{{ build_config }}"
spec:
source:
dockerfile: |
FROM openshift/ruby-22-centos7
RUN sleep 60s
USER ansible
strategy:
type: Docker
output:
to:
kind: "ImageStreamTag"
name: "{{ is_name }}:latest"
- name: Start Build from Build configuration
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_config_name: "{{ build_config }}"
register: new_build
- name: Start Build from Build configuration
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_config_name: "{{ build_config }}"
register: new_build
- name: Assert that a build has been created
assert:
that:
- new_build is changed
- new_build.builds.0.metadata.name == "{{ build_config }}-1"
- name: Assert that a build has been created
assert:
that:
- new_build is changed
- new_build.builds.0.metadata.name == "{{ build_config }}-1"
- name: Start a new Build from previous Build
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_name: "{{ new_build.builds.0.metadata.name }}"
register: rerun_build
- name: Start a new Build from previous Build
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_name: "{{ new_build.builds.0.metadata.name }}"
register: rerun_build
- name: Assert that another build has been created
assert:
that:
- rerun_build is changed
- rerun_build.builds.0.metadata.name == "{{ build_config }}-2"
- name: Assert that another build has been created
assert:
that:
- rerun_build is changed
- rerun_build.builds.0.metadata.name == "{{ build_config }}-2"
- name: Cancel first build created
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_name: "{{ build_config }}-1"
state: cancelled
wait: yes
register: cancel
- name: Cancel first build created
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_name: "{{ build_config }}-1"
state: cancelled
wait: yes
register: cancel
- name: Assert that the Build was cancelled
assert:
that:
- cancel is changed
- cancel.builds | length == 1
- cancel.builds.0.metadata.name == "{{ build_config }}-1"
- cancel.builds.0.metadata.namespace == "{{ build_ns }}"
- cancel.builds.0.status.cancelled
- name: Assert that the Build was cancelled
assert:
that:
- cancel is changed
- cancel.builds | length == 1
- cancel.builds.0.metadata.name == "{{ build_config }}-1"
- cancel.builds.0.metadata.namespace == "{{ build_ns }}"
- '"cancelled" in cancel.builds.0.status'
- cancel.builds.0.status.cancelled
- name: Get Build info
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ cancel.builds.0.metadata.name }}"
register: build
- name: Get info for 1st Build
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ cancel.builds.0.metadata.name }}"
register: build
- name: Assert that build phase is cancelled
assert:
that:
- build.resources | length == 1
- build.resources.0.status.cancelled
- build.resources.0.status.phase == 'Cancelled'
- name: Assert that build phase is cancelled
assert:
that:
- build.resources | length == 1
- '"cancelled" in build.resources.0.status'
- build.resources.0.status.cancelled
- build.resources.0.status.phase == 'Cancelled'
- name: Cancel and restart Build using build config name
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_config_name: "{{ build_config }}"
state: restarted
build_phases:
- Running
- New
register: restart
- name: Cancel and restart Build using build config name
community.okd.openshift_build:
namespace: "{{ build_ns }}"
build_config_name: "{{ build_config }}"
state: restarted
build_phases:
- Pending
- Running
- New
register: restart
- name: assert that new build was created
assert:
that:
- restart is changed
- restart.builds | length == 1
- 'restart.builds.0.metadata.name == "{{ build_config }}-3"'
- name: Get Build 2 info
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ build_config }}-2"
register: build
- name: assert that new build was created
assert:
that:
- restart is changed
- restart.builds | length == 1
- 'restart.builds.0.metadata.name == "{{ build_config }}-3"'
- name: Assert that build phase is cancelled
assert:
that:
- build.resources | length == 1
- build.resources.0.status.cancelled
- build.resources.0.status.phase == 'Cancelled'
- name: Get info for 2nd Build
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ build_config }}-2"
register: build
- name: Get Build info
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ build_config }}-3"
register: build
- name: Assert that build phase is cancelled
assert:
that:
- build.resources | length == 1
- '"cancelled" in build.resources.0.status'
- build.resources.0.status.cancelled
- build.resources.0.status.phase == 'Cancelled'
- name: Assert that Build is not cancelled
assert:
that:
- build.resources | length == 1
- '"cancelled" not in build.resources.0.status'
- "build.resources.0.status.phase in ('New', 'Pending', 'Running')"
- name: Get info for 3rd build
kubernetes.core.k8s_info:
version: build.openshift.io/v1
kind: Build
namespace: "{{ build_ns }}"
name: "{{ build_config }}-3"
register: build
- name: Prune Builds keep younger than 30min
community.okd.openshift_adm_prune_builds:
keep_younger_than: 30
namespace: "{{ build_ns }}"
register: prune
check_mode: yes
- name: Assert that Build is not cancelled
assert:
that:
- build.resources | length == 1
- '"cancelled" not in build.resources.0.status'
- "build.resources.0.status.phase in ('New', 'Pending', 'Running')"
- name: Assert that no Builds were found
assert:
that:
- not prune.changed
- prune.builds | length == 0
- name: Prune Builds keep younger than 30min
community.okd.openshift_adm_prune_builds:
keep_younger_than: 30
namespace: "{{ build_ns }}"
register: prune
check_mode: yes
- name: Prune Builds without namespace
community.okd.openshift_adm_prune_builds:
register: prune_without_ns
check_mode: yes
- name: Assert that no Builds were found
assert:
that:
- not prune.changed
- prune.builds | length == 0
- name: Assert that completed build are candidate for prune
assert:
that:
- prune_without_ns is changed
- prune_without_ns.builds | length > 0
- '"{{ build_config }}-1" in build_names'
- '"{{ build_config }}-2" in build_names'
vars:
build_names: '{{ prune_without_ns.builds | map(attribute="metadata") | flatten | map(attribute="name") | list }}'
- name: Prune Builds without namespace
community.okd.openshift_adm_prune_builds:
register: prune_without_ns
check_mode: yes
- name: Prune Builds using namespace
community.okd.openshift_adm_prune_builds:
namespace: "{{ build_ns }}"
register: prune_with_ns
check_mode: yes
- name: Assert that completed build are candidate for prune
assert:
that:
- prune_without_ns is changed
- prune_without_ns.builds | length > 0
- '"{{ build_config }}-1" in build_names'
- '"{{ build_config }}-2" in build_names'
vars:
build_names: '{{ prune_without_ns.builds | map(attribute="metadata") | flatten | map(attribute="name") | list }}'
- name: Assert that prune operation found the completed build
assert:
that:
- prune_with_ns is changed
- prune_with_ns.builds | length == 2
- name: Prune Builds using namespace
community.okd.openshift_adm_prune_builds:
namespace: "{{ build_ns }}"
register: prune_with_ns
check_mode: yes
- name: Check Build before prune
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-1"
namespace: "{{ build_ns }}"
register: resource
- name: Assert that prune operation found the completed build
assert:
that:
- prune_with_ns is changed
- prune_with_ns.builds | length == 2
- name: Validate that any previous build operation executed with check_mode did not deleted the build
assert:
that:
- resource.resources | length == 1
- name: Check Build before prune
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-1"
namespace: "{{ build_ns }}"
register: resource
- name: Execute prune operation
community.okd.openshift_adm_prune_builds:
namespace: "{{ build_ns }}"
register: prune
- name: Validate that any previous build operation executed with check_mode did not deleted the build
assert:
that:
- resource.resources | length == 1
- name: assert prune is changed
assert:
that:
- prune is changed
- name: Execute prune operation
community.okd.openshift_adm_prune_builds:
namespace: "{{ build_ns }}"
register: prune
- name: Check Build
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-1"
namespace: "{{ build_ns }}"
register: resource
- name: assert prune is changed
assert:
that:
- prune is changed
- name: Assert that the Build does not exist anymore
assert:
that:
- resource.resources | length == 0
- name: Check Build
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-1"
namespace: "{{ build_ns }}"
register: resource
- name: Check Build
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-2"
namespace: "{{ build_ns }}"
register: resource
- name: Assert that the Build does not exist anymore
assert:
that:
- resource.resources | length == 0
- name: Assert that the Build does not exist anymore
assert:
that:
- resource.resources | length == 0
- name: Check Build
kubernetes.core.k8s_info:
kind: Build
api_version: build.openshift.io/v1
name: "{{ build_config }}-2"
namespace: "{{ build_ns }}"
register: resource
- name: Assert that the Build does not exist anymore
assert:
that:
- resource.resources | length == 0
always:
- name: Ensure namespace is deleted

View File

@@ -1,174 +1,175 @@
---
- name: Openshift import image testing
block:
- set_fact:
test_ns: "import-images"
- set_fact:
test_ns: "import-images"
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
check_mode: yes
register: import_tag
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
check_mode: yes
register: import_tag
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- import_tag.result | length == 1
- import_tag.result.0.spec.import
- import_tag.result.0.spec.images.0.from.kind == "DockerImage"
- import_tag.result.0.spec.images.0.from.name == "ansible/awx"
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- import_tag.result | length == 1
- import_tag.result.0.spec.import
- import_tag.result.0.spec.images.0.from.kind == "DockerImage"
- import_tag.result.0.spec.images.0.from.name == "ansible/awx"
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream is not created when using check_mode=yes
assert:
that:
- resource.resources == []
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream contains only tag latest
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: Import once again the latest tag
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: assert change was performed
assert:
that:
- import_tag is changed
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
version: image.openshift.io/v1
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream still contains unique tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: Import another tags
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx:17.1.0"
register: import_another_tag
ignore_errors: yes
- name: assert that another tag was imported
assert:
that:
- import_another_tag is failed
- '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg'
- name: Create simple ImageStream (without docker external container)
community.okd.k8s:
namespace: "{{ test_ns }}"
name: "local-is"
definition:
apiVersion: image.openshift.io/v1
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
spec:
lookupPolicy:
local: false
tags: []
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: Import all tag for image stream not pointing on external container image should failed
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "local-is"
all: true
register: error_tag
ignore_errors: true
check_mode: yes
- name: assert that image stream is not created when using check_mode=yes
assert:
that:
- resource.resources == []
- name: Assert module cannot import from non-existing tag from ImageStream
assert:
that:
- error_tag is failed
- 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"'
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name:
- "ibmcom/pause"
- "redhat/ubi8-micro:8.5-437"
all: true
register: multiple_import
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- name: Assert that import succeed
assert:
that:
- multiple_import is changed
- multiple_import.result | length == 2
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: Read ibmcom/pause ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: pause
register: pause
- name: assert that image stream contains only tag latest
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: assert that ibmcom/pause has multiple tags
assert:
that:
- pause.resources | length == 1
- pause.resources.0.status.tags | length > 1
- name: Import once again the latest tag
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: Read redhat/ubi8-micro ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: ubi8-micro
register: resource
- name: assert change was performed
assert:
that:
- import_tag is changed
- name: assert that redhat/ubi8-micro has only one tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags | length == 1
- 'resource.resources.0.status.tags.0.tag == "8.5-437"'
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
version: image.openshift.io/v1
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream still contains unique tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: Import another tags
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx:17.1.0"
register: import_another_tag
ignore_errors: yes
- name: assert that another tag was imported
assert:
that:
- import_another_tag is failed
- '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg'
- name: Create simple ImageStream (without docker external container)
community.okd.k8s:
namespace: "{{ test_ns }}"
name: "local-is"
definition:
apiVersion: image.openshift.io/v1
kind: ImageStream
spec:
lookupPolicy:
local: false
tags: []
- name: Import all tag for image stream not pointing on external container image should failed
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "local-is"
all: true
register: error_tag
ignore_errors: true
check_mode: yes
- name: Assert module cannot import from non-existing tag from ImageStream
assert:
that:
- error_tag is failed
- 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"'
- name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name:
- "ibmcom/pause"
- "redhat/ubi8-micro:8.5-437"
all: true
register: multiple_import
- name: Assert that import succeed
assert:
that:
- multiple_import is changed
- multiple_import.result | length == 2
- name: Read ibmcom/pause ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: pause
register: pause
- name: assert that ibmcom/pause has multiple tags
assert:
that:
- pause.resources | length == 1
- pause.resources.0.status.tags | length > 1
- name: Read redhat/ubi8-micro ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: ubi8-micro
register: resource
- name: assert that redhat/ubi8-micro has only one tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags | length == 1
- 'resource.resources.0.status.tags.0.tag == "8.5-437"'
always:
- name: Delete testing namespace

View File

@@ -38,12 +38,12 @@
name: "{{ pod_name }}"
spec:
containers:
- name: test-container
image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest"
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
- name: test-container
image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest"
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
- name: Create limit range for images size
community.okd.k8s:
@@ -57,7 +57,7 @@
- type: openshift.io/Image
max:
storage: 1Gi
- name: Prune images from namespace
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"

View File

@@ -19,10 +19,10 @@
app: hello-kubernetes
spec:
containers:
- name: hello-kubernetes
image: docker.io/openshift/hello-openshift
ports:
- containerPort: 8080
- name: hello-kubernetes
image: docker.io/openshift/hello-openshift
ports:
- containerPort: 8080
- name: Create Service
community.okd.k8s:
@@ -35,8 +35,8 @@
namespace: default
spec:
ports:
- port: 80
targetPort: 8080
- port: 80
targetPort: 8080
selector:
app: hello-kubernetes

View File

@@ -64,14 +64,16 @@ okd_dc_triggers:
okd_dc_spec:
template: '{{ k8s_pod_template }}'
triggers: '{{ okd_dc_triggers }}'
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
replicas: 1
strategy:
type: Recreate
okd_dc_template:
apiVersion: v1
kind: DeploymentConfig
apiVersion: apps/v1
kind: Deployment
spec: '{{ okd_dc_spec }}'
okd_imagestream_template:
@@ -83,12 +85,12 @@ okd_imagestream_template:
lookupPolicy:
local: true
tags:
- annotations: null
from:
kind: DockerImage
name: '{{ image }}'
name: '{{ image_tag }}'
referencePolicy:
type: Source
- annotations: null
from:
kind: DockerImage
name: '{{ image }}'
name: '{{ image_tag }}'
referencePolicy:
type: Source
image_tag: latest