openshift admin prune auth (#130)

* openshift admin prune auth

* update change scope
This commit is contained in:
abikouo
2021-11-30 14:16:22 +01:00
committed by GitHub
parent 6b2efa180f
commit 496cdba4e8
5 changed files with 1141 additions and 0 deletions

View File

@@ -0,0 +1,326 @@
- block:
- set_fact:
test_sa: "clusterrole-sa"
test_ns: "clusterrole-ns"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ test_ns }}"
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: "{{ test_ns }}"
name: "{{ test_sa }}"
register: result
- set_fact:
secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: _secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']")
- set_fact:
api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in _secret.resources[0]['data']"
- name: list Node should failed (forbidden user)
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"nodes is forbidden: User" in error.msg'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.msg'
- set_fact:
test_labels:
phase: dev
cluster_roles:
- name: pod-manager
resources:
- pods
verbs:
- list
api_version_binding: "authorization.openshift.io/v1"
- name: node-manager
resources:
- nodes
verbs:
- list
api_version_binding: "rbac.authorization.k8s.io/v1"
- name: Create cluster roles
kubernetes.core.k8s:
definition:
kind: ClusterRole
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ item.name }}"
labels: "{{ test_labels }}"
rules:
- apiGroups: [""]
resources: "{{ item.resources }}"
verbs: "{{ item.verbs }}"
with_items: '{{ cluster_roles }}'
- name: Create Role Binding (namespaced)
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ cluster_roles[0].name }}-binding"
namespace: "{{ test_ns }}"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ cluster_roles[0].name }}"
apiGroup: ""
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: Create Cluster role Binding
kubernetes.core.k8s:
definition:
kind: ClusterRoleBinding
apiVersion: "{{ item.api_version_binding }}"
metadata:
name: "{{ item.name }}-binding"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ item.name }}"
apiGroup: ""
with_items: "{{ cluster_roles }}"
- name: list Pod for all namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
no_log: true
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: Prune clusterroles (check mode)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
register: check
check_mode: true
- name: validate clusterrole binding candidates for prune
assert:
that:
- '"{{ item.name }}-binding" in check.cluster_role_binding'
- '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding'
with_items: "{{ cluster_roles }}"
- name: Prune Cluster Role for managing Pod
community.okd.openshift_adm_prune_auth:
resource: clusterroles
name: "{{ cluster_roles[0].name }}"
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
no_log: true
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
no_log: true
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.msg'
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: Prune clusterroles (remaining)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"nodes is forbidden: User" in error.msg'
always:
- name: Ensure namespace is deleted
kubernetes.core.k8s:
state: absent
kind: Namespace
name: "{{ test_ns }}"
wait: yes
ignore_errors: true
- name: Delete ClusterRoleBinding
kubernetes.core.k8s:
kind: ClusterRoleBinding
api_version: "rbac.authorization.k8s.io/v1"
name: "{{ item.name }}-binding"
state: absent
ignore_errors: true
with_items: "{{ cluster_roles }}"
when: cluster_roles is defined
- name: Delete ClusterRole
kubernetes.core.k8s:
kind: ClusterRole
api_version: "rbac.authorization.k8s.io/v1"
name: "{{ item.name }}"
state: absent
ignore_errors: true
with_items: "{{ cluster_roles }}"
when: cluster_roles is defined

View File

@@ -0,0 +1,344 @@
- block:
- set_fact:
test_ns: "prune-roles"
sa_name: "roles-sa"
pod_name: "pod-prune"
role_definition:
- name: pod-list
labels:
action: list
verbs:
- list
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: pod-create
labels:
action: create
verbs:
- create
- get
role_binding:
api_version: authorization.openshift.io/v1
- name: pod-delete
labels:
action: delete
verbs:
- delete
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: '{{ sa_name }}'
namespace: '{{ test_ns }}'
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: '{{ test_ns }}'
name: '{{ sa_name }}'
register: sa_out
- set_fact:
secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: r_secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']")
- set_fact:
api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in r_secret.resources[0]['data']"
- name: list resources using service account
kubernetes.core.k8s_info:
api_key: '{{ api_token }}'
host: '{{ cluster_host }}'
validate_certs: no
kind: Pod
namespace: '{{ test_ns }}'
register: error
ignore_errors: true
- assert:
that:
- error is failed
# - '"pods is forbidden: User" in error.module_stderr'
- name: Create a role to manage Pod from namespace "{{ test_ns }}"
kubernetes.core.k8s:
definition:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: "{{ test_ns }}"
name: "{{ item.name }}"
labels: "{{ item.labels }}"
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: "{{ item.verbs }}"
with_items: "{{ role_definition }}"
- name: Create Role Binding
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "{{ item.role_binding.api_version }}"
metadata:
name: "{{ item.name }}-bind"
namespace: "{{ test_ns }}"
subjects:
- kind: ServiceAccount
name: "{{ sa_name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: Role
name: "{{ item.name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
with_items: "{{ role_definition }}"
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert pod creation succeed
assert:
that:
- result is successful
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still authorize to list pods
assert:
that:
- result is successful
- name: Prune auth roles (check mode)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: check
check_mode: true
- name: validate that list role binding are candidates for prune
assert:
that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding'
with_items: "{{ role_definition }}"
- name: Prune resource using label_selectors option
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
label_selectors:
- action=delete
register: prune
- name: assert that role binding 'delete' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding'
- name: assert that user could not delete pod anymore
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
state: absent
namespace: "{{ test_ns }}"
kind: Pod
name: "{{ pod_name }}"
register: result
ignore_errors: true
- name: assert pod deletion failed due to forbidden user
assert:
that:
- result is failed
# - '"forbidden: User" in error.module_stderr'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-1"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert user is still authorize to create pod
assert:
that:
- result is successful
- name: Prune role using name
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
name: "{{ role_definition[1].name }}"
register: prune
- name: assert that role binding 'create' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding'
- name: Create Pod (should failed)
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-2"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
ignore_errors: true
- name: assert user is not authorize to create pod anymore
assert:
that:
- result is failed
# - '"forbidden: User" in error.module_stderr'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Prune all role for namespace (neither name nor label_selectors are specified)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: prune
- name: assert that role binding 'list' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
ignore_errors: true
- name: assert user is not authorize to list pod anymore
assert:
that:
- result is failed
# - '"forbidden: User" in error.module_stderr'
always:
- name: Ensure namespace is deleted
kubernetes.core.k8s:
state: absent
kind: Namespace
name: "{{ test_ns }}"
ignore_errors: true

View File

@@ -61,6 +61,8 @@
- import_tasks: tasks/validate_not_installed.yml - import_tasks: tasks/validate_not_installed.yml
- import_tasks: tasks/openshift_auth.yml - import_tasks: tasks/openshift_auth.yml
- import_tasks: tasks/openshift_adm_prune_auth_clusterroles.yml
- import_tasks: tasks/openshift_adm_prune_auth_roles.yml
- import_tasks: tasks/openshift_route.yml - import_tasks: tasks/openshift_route.yml
- block: - block:
- name: Create namespace - name: Create namespace

View File

@@ -0,0 +1,335 @@
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
from ansible.module_utils._text import to_native
try:
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
K8sAnsibleMixin,
get_api_client,
)
HAS_KUBERNETES_COLLECTION = True
except ImportError as e:
HAS_KUBERNETES_COLLECTION = False
k8s_collection_import_exception = e
K8S_COLLECTION_ERROR = traceback.format_exc()
try:
from kubernetes import client
from kubernetes.dynamic.exceptions import DynamicApiError, NotFoundError
except ImportError:
pass
class OpenShiftAdmPruneAuth(K8sAnsibleMixin):
def __init__(self, module):
self.module = module
self.fail_json = self.module.fail_json
self.exit_json = self.module.exit_json
if not HAS_KUBERNETES_COLLECTION:
self.module.fail_json(
msg="The kubernetes.core collection must be installed",
exception=K8S_COLLECTION_ERROR,
error=to_native(k8s_collection_import_exception),
)
super(OpenShiftAdmPruneAuth, self).__init__(self.module)
self.params = self.module.params
self.check_mode = self.module.check_mode
self.client = get_api_client(self.module)
def prune_resource_binding(self, kind, api_version, ref_kind, ref_namespace_names, propagation_policy=None):
resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
candidates = []
for ref_namespace, ref_name in ref_namespace_names:
try:
result = resource.get(name=None, namespace=ref_namespace)
result = result.to_dict()
result = result.get('items') if 'items' in result else [result]
for obj in result:
namespace = obj['metadata'].get('namespace', None)
name = obj['metadata'].get('name')
if ref_kind and obj['roleRef']['kind'] != ref_kind:
# skip this binding as the roleRef.kind does not match
continue
if obj['roleRef']['name'] == ref_name:
# select this binding as the roleRef.name match
candidates.append((namespace, name))
except NotFoundError:
continue
except DynamicApiError as exc:
msg = "Failed to get {kind} resource due to: {msg}".format(kind=kind, msg=exc.body)
self.fail_json(msg=msg)
except Exception as e:
msg = "Failed to get {kind} due to: {msg}".format(kind=kind, msg=to_native(e))
self.fail_json(msg=msg)
if len(candidates) == 0 or self.check_mode:
return [y if x is None else x + "/" + y for x, y in candidates]
delete_options = client.V1DeleteOptions()
if propagation_policy:
delete_options.propagation_policy = propagation_policy
for namespace, name in candidates:
try:
result = resource.delete(name=name, namespace=namespace, body=delete_options)
except DynamicApiError as exc:
msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=exc.body)
self.fail_json(msg=msg)
except Exception as e:
msg = "Failed to delete {kind} {namespace}/{name} due to: {msg}".format(kind=kind, namespace=namespace, name=name, msg=to_native(e))
self.fail_json(msg=msg)
return [y if x is None else x + "/" + y for x, y in candidates]
def update_resource_binding(self, ref_kind, ref_names, namespaced=False):
kind = 'ClusterRoleBinding'
api_version = "rbac.authorization.k8s.io/v1",
if namespaced:
kind = "RoleBinding"
resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
result = resource.get(name=None, namespace=None).to_dict()
result = result.get('items') if 'items' in result else [result]
if len(result) == 0:
return [], False
def _update_user_group(binding_namespace, subjects):
users, groups = [], []
for x in subjects:
if x['kind'] == 'User':
users.append(x['name'])
elif x['kind'] == 'Group':
groups.append(x['name'])
elif x['kind'] == 'ServiceAccount':
namespace = binding_namespace
if x.get('namespace') is not None:
namespace = x.get('namespace')
if namespace is not None:
users.append("system:serviceaccount:%s:%s" % (namespace, x['name']))
return users, groups
candidates = []
changed = False
for item in result:
subjects = item.get('subjects', [])
retainedSubjects = [x for x in subjects if x['kind'] == ref_kind and x['name'] in ref_names]
if len(subjects) != len(retainedSubjects):
updated_binding = item
updated_binding['subjects'] = retainedSubjects
binding_namespace = item['metadata'].get('namespace', None)
updated_binding['userNames'], updated_binding['groupNames'] = _update_user_group(binding_namespace, retainedSubjects)
candidates.append(binding_namespace + "/" + item['metadata']['name'] if binding_namespace else item['metadata']['name'])
changed = True
if not self.check_mode:
try:
resource.apply(updated_binding, namespace=binding_namespace)
except DynamicApiError as exc:
msg = "Failed to apply object due to: {0}".format(exc.body)
self.fail_json(msg=msg)
return candidates, changed
def update_security_context(self, ref_names, key):
params = {'kind': 'SecurityContextConstraints', 'api_version': 'security.openshift.io/v1'}
sccs = self.kubernetes_facts(**params)
if not sccs['api_found']:
self.fail_json(msg=sccs['msg'])
sccs = sccs.get('resources')
candidates = []
changed = False
resource = self.find_resource(kind="SecurityContextConstraints", api_version="security.openshift.io/v1")
for item in sccs:
subjects = item.get(key, [])
retainedSubjects = [x for x in subjects if x not in ref_names]
if len(subjects) != len(retainedSubjects):
candidates.append(item['metadata']['name'])
changed = True
if not self.check_mode:
upd_sec_ctx = item
upd_sec_ctx.update({key: retainedSubjects})
try:
resource.apply(upd_sec_ctx, namespace=None)
except DynamicApiError as exc:
msg = "Failed to apply object due to: {0}".format(exc.body)
self.fail_json(msg=msg)
return candidates, changed
def auth_prune_roles(self):
params = {'kind': 'Role', 'api_version': 'rbac.authorization.k8s.io/v1', 'namespace': self.params.get('namespace')}
for attr in ('name', 'label_selectors'):
if self.params.get(attr):
params[attr] = self.params.get(attr)
result = self.kubernetes_facts(**params)
if not result['api_found']:
self.fail_json(msg=result['msg'])
roles = result.get('resources')
if len(roles) == 0:
self.exit_json(changed=False, msg="No candidate rolebinding to prune from namespace %s." % self.params.get('namespace'))
ref_roles = [(x['metadata']['namespace'], x['metadata']['name']) for x in roles]
candidates = self.prune_resource_binding(kind="RoleBinding",
api_version="rbac.authorization.k8s.io/v1",
ref_kind="Role",
ref_namespace_names=ref_roles,
propagation_policy='Foreground')
if len(candidates) == 0:
self.exit_json(changed=False, role_binding=candidates)
self.exit_json(changed=True, role_binding=candidates)
def auth_prune_clusterroles(self):
params = {'kind': 'ClusterRole', 'api_version': 'rbac.authorization.k8s.io/v1'}
for attr in ('name', 'label_selectors'):
if self.params.get(attr):
params[attr] = self.params.get(attr)
result = self.kubernetes_facts(**params)
if not result['api_found']:
self.fail_json(msg=result['msg'])
clusterroles = result.get('resources')
if len(clusterroles) == 0:
self.exit_json(changed=False, msg="No clusterroles found matching input criteria.")
ref_clusterroles = [(None, x['metadata']['name']) for x in clusterroles]
# Prune ClusterRoleBinding
candidates_cluster_binding = self.prune_resource_binding(kind="ClusterRoleBinding",
api_version="rbac.authorization.k8s.io/v1",
ref_kind=None,
ref_namespace_names=ref_clusterroles)
# Prune Role Binding
candidates_namespaced_binding = self.prune_resource_binding(kind="RoleBinding",
api_version="rbac.authorization.k8s.io/v1",
ref_kind='ClusterRole',
ref_namespace_names=ref_clusterroles)
self.exit_json(changed=True,
cluster_role_binding=candidates_cluster_binding,
role_binding=candidates_namespaced_binding)
def list_groups(self, params=None):
options = {'kind': 'Group', 'api_version': 'user.openshift.io/v1'}
if params:
for attr in ('name', 'label_selectors'):
if params.get(attr):
options[attr] = params.get(attr)
return self.kubernetes_facts(**options)
def auth_prune_users(self):
params = {'kind': 'User', 'api_version': 'user.openshift.io/v1'}
for attr in ('name', 'label_selectors'):
if self.params.get(attr):
params[attr] = self.params.get(attr)
users = self.kubernetes_facts(**params)
if len(users) == 0:
self.exit_json(changed=False, msg="No resource type 'User' found matching input criteria.")
names = [x['metadata']['name'] for x in users]
changed = False
# Remove the user role binding
rolebinding, changed_role = self.update_resource_binding(ref_kind="User",
ref_names=names,
namespaced=True)
changed = changed or changed_role
# Remove the user cluster role binding
clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="User",
ref_names=names)
changed = changed or changed_cr
# Remove the user from security context constraints
sccs, changed_sccs = self.update_security_context(names, 'users')
changed = changed or changed_sccs
# Remove the user from groups
groups = self.list_groups()
deleted_groups = []
resource = self.find_resource(kind="Group", api_version="user.openshift.io/v1")
for grp in groups:
subjects = grp.get('users', [])
retainedSubjects = [x for x in subjects if x not in names]
if len(subjects) != len(retainedSubjects):
deleted_groups.append(grp['metadata']['name'])
changed = True
if not self.check_mode:
upd_group = grp
upd_group.update({'users': retainedSubjects})
try:
resource.apply(upd_group, namespace=None)
except DynamicApiError as exc:
msg = "Failed to apply object due to: {0}".format(exc.body)
self.fail_json(msg=msg)
# Remove the user's OAuthClientAuthorizations
oauth = self.kubernetes_facts(kind='OAuthClientAuthorization', api_version='oauth.openshift.io/v1')
deleted_auths = []
resource = self.find_resource(kind="OAuthClientAuthorization", api_version="oauth.openshift.io/v1")
for authorization in oauth:
if authorization.get('userName', None) in names:
auth_name = authorization['metadata']['name']
deleted_auths.append(auth_name)
changed = True
if not self.check_mode:
try:
resource.delete(name=auth_name, namespace=None, body=client.V1DeleteOptions())
except DynamicApiError as exc:
msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=exc.body)
self.fail_json(msg=msg)
except Exception as e:
msg = "Failed to delete OAuthClientAuthorization {name} due to: {msg}".format(name=auth_name, msg=to_native(e))
self.fail_json(msg=msg)
self.exit_json(changed=changed,
cluster_role_binding=clusterrolesbinding,
role_binding=rolebinding,
security_context_constraints=sccs,
authorization=deleted_auths,
group=deleted_groups)
def auth_prune_groups(self):
groups = self.list_groups(params=self.params)
if len(groups) == 0:
self.exit_json(changed=False, result="No resource type 'Group' found matching input criteria.")
names = [x['metadata']['name'] for x in groups]
changed = False
# Remove the groups role binding
rolebinding, changed_role = self.update_resource_binding(ref_kind="Group",
ref_names=names,
namespaced=True)
changed = changed or changed_role
# Remove the groups cluster role binding
clusterrolesbinding, changed_cr = self.update_resource_binding(ref_kind="Group",
ref_names=names)
changed = changed or changed_cr
# Remove the groups security context constraints
sccs, changed_sccs = self.update_security_context(names, 'groups')
changed = changed or changed_sccs
self.exit_json(changed=changed,
cluster_role_binding=clusterrolesbinding,
role_binding=rolebinding,
security_context_constraints=sccs)
def execute_module(self):
auth_prune = {
'roles': self.auth_prune_roles,
'clusterroles': self.auth_prune_clusterroles,
'users': self.auth_prune_users,
'groups': self.auth_prune_groups,
}
auth_prune[self.params.get('resource')]()

View File

@@ -0,0 +1,134 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: openshift_adm_prune_auth
short_description: Removes references to the specified roles, clusterroles, users, and groups
version_added: "2.2.0"
author:
- Aubin Bikouo (@abikouo)
description:
- This module allow administrators to remove references to the specified roles, clusterroles, users, and groups.
- Analogous to C(oc adm prune auth).
extends_documentation_fragment:
- kubernetes.core.k8s_auth_options
options:
resource:
description:
- The specified resource to remove.
choices:
- roles
- clusterroles
- users
- groups
type: str
required: True
name:
description:
- Use to specify an object name to remove.
- Mutually exclusive with option I(label_selectors).
- If neither I(name) nor I(label_selectors) are specified, prune all resources in the namespace.
type: str
namespace:
description:
- Use to specify an object namespace.
- Ignored when I(resource) is set to C(clusterroles).
type: str
label_selectors:
description:
- Selector (label query) to filter on.
- Mutually exclusive with option I(name).
type: list
elements: str
requirements:
- python >= 3.6
- kubernetes >= 12.0.0
'''
EXAMPLES = r'''
- name: Prune all roles from default namespace
openshift_adm_prune_auth:
resource: roles
namespace: testing
- name: Prune clusterroles using label selectors
openshift_adm_prune_auth:
resource: roles
namespace: testing
label_selectors:
- phase=production
'''
RETURN = r'''
cluster_role_binding:
type: list
description: list of cluster role binding deleted.
returned: always
role_binding:
type: list
description: list of role binding deleted.
returned: I(resource=users) or I(resource=groups) or I(resource=clusterroles)
security_context_constraints:
type: list
description: list of Security Context Constraints deleted.
returned: I(resource=users) or I(resource=groups)
authorization:
type: list
description: list of OAuthClientAuthorization deleted.
returned: I(resource=users)
group:
type: list
description: list of Security Context Constraints deleted.
returned: I(resource=users)
'''
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import AUTH_ARG_SPEC
def argument_spec():
args = copy.deepcopy(AUTH_ARG_SPEC)
args.update(
dict(
resource=dict(type='str', required=True, choices=['roles', 'clusterroles', 'users', 'groups']),
namespace=dict(type='str'),
name=dict(type='str'),
label_selectors=dict(type='list', elements='str'),
)
)
return args
def main():
module = AnsibleModule(argument_spec=argument_spec(),
mutually_exclusive=[("name", "label_selectors")],
supports_check_mode=True)
from ansible_collections.community.okd.plugins.module_utils.openshift_adm_prune_auth import (
OpenShiftAdmPruneAuth)
adm_prune_auth = OpenShiftAdmPruneAuth(module)
adm_prune_auth.argspec = argument_spec
adm_prune_auth.execute_module()
if __name__ == '__main__':
main()