k8s_scale - add option label_selectors and continue_on_error (#114)

k8s_scale suppport for label selector
This commit is contained in:
abikouo
2021-06-07 09:57:50 +02:00
committed by GitHub
parent ef82b78a2f
commit 481521a09d
9 changed files with 232 additions and 26 deletions

View File

@@ -0,0 +1,4 @@
---
minor_changes:
- k8s_scale - ability to scale multiple resource using ``label_selectors`` (https://github.com/ansible-collections/community.kubernetes/pull/114).
- k8s_scale - new parameter to determine whether to continue or not on error when scaling multiple resources (https://github.com/ansible-collections/community.kubernetes/pull/114).

View File

@@ -0,0 +1,50 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test0
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test0" && sleep 3600']
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test1
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, from test1" && sleep 3600']

View File

@@ -202,6 +202,68 @@
- scale_down_no_wait.diff
- scale_down_no_wait_pods.resources | length == 1
# scale multiple resource using label selectors
- name: create deployment
kubernetes.core.k8s:
namespace: "{{ scale_namespace }}"
src: files/deployment.yaml
- name: list deployment
kubernetes.core.k8s_info:
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: resource
- assert:
that:
- resource.resources | list | length == 2
- name: scale deployment using resource version
kubernetes.core.k8s_scale:
replicas: 2
kind: Deployment
namespace: "{{ scale_namespace }}"
resource_version: 0
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (wrong value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 4
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- not scale_out.changed
- scale_out.results | selectattr('warning', 'defined') | list | length == 2
- name: scale deployment using current replicas (right value)
kubernetes.core.k8s_scale:
replicas: 2
current_replicas: 3
kind: Deployment
namespace: "{{ scale_namespace }}"
label_selectors:
- app=nginx
register: scale_out
- assert:
that:
- scale_out.changed
- scale_out.results | map(attribute='result.status.replicas') | list | unique == [2]
always:
- name: Remove namespace
k8s:

View File

@@ -40,4 +40,10 @@ options:
is ignored.
type: int
default: 20
wait_sleep:
description:
- Number of seconds to sleep between checks.
default: 5
type: int
version_added: 2.0.0
'''

View File

@@ -23,6 +23,7 @@ author:
description:
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicaSet,
or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
- C(wait) parameter is not supported for Jobs.
extends_documentation_fragment:
- kubernetes.core.k8s_name_options
@@ -30,6 +31,19 @@ extends_documentation_fragment:
- kubernetes.core.k8s_resource_options
- kubernetes.core.k8s_scale_options
options:
label_selectors:
description: List of label selectors to use to filter results.
type: list
elements: str
version_added: 2.0.0
continue_on_error:
description:
- Whether to continue on errors when multiple resources are defined.
type: bool
default: False
version_added: 2.0.0
requirements:
- "python >= 3.6"
- "kubernetes >= 12.0.0"
@@ -82,6 +96,15 @@ EXAMPLES = r'''
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment using label selectors (continue operation in case error occured on one resource)
kubernetes.core.k8s_scale:
replicas: 3
kind: Deployment
namespace: test
label_selectors:
- app=test
continue_on_error: true
'''
RETURN = r'''
@@ -131,6 +154,7 @@ SCALE_ARG_SPEC = {
'resource_version': {},
'wait': {'type': 'bool', 'default': True},
'wait_timeout': {'type': 'int', 'default': 20},
'wait_sleep': {'type': 'int', 'default': 5},
}
@@ -147,11 +171,17 @@ def execute_module(module, k8s_ansible_mixin,):
replicas = module.params.get('replicas')
resource_version = module.params.get('resource_version')
label_selectors = module.params.get('label_selectors')
if not label_selectors:
label_selectors = []
continue_on_error = module.params.get('continue_on_error')
wait = module.params.get('wait')
wait_time = module.params.get('wait_timeout')
wait_sleep = module.params.get('wait_sleep')
existing = None
existing_count = None
return_attributes = dict(changed=False, result=dict(), diff=dict())
return_attributes = dict(result=dict(), diff=dict())
if wait:
return_attributes['duration'] = 0
@@ -159,37 +189,83 @@ def execute_module(module, k8s_ansible_mixin,):
from ansible_collections.kubernetes.core.plugins.module_utils.common import NotFoundError
multiple_scale = False
try:
existing = resource.get(name=name, namespace=namespace)
return_attributes['result'] = existing.to_dict()
existing = resource.get(name=name, namespace=namespace, label_selector=','.join(label_selectors))
if existing.kind.endswith('List'):
existing_items = existing.items
multiple_scale = len(existing_items) > 1
else:
existing_items = [existing]
except NotFoundError as exc:
module.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
error=exc.value.get('status'))
if module.params['kind'] == 'job':
existing_count = existing.spec.parallelism
elif hasattr(existing.spec, 'replicas'):
existing_count = existing.spec.replicas
if multiple_scale:
# when scaling multiple resource, the 'result' is changed to 'results' and is a list
return_attributes = {'results': []}
changed = False
if existing_count is None:
module.fail_json(msg='Failed to retrieve the available count for the requested object.')
def _continue_or_fail(error):
if multiple_scale and continue_on_error:
if "errors" not in return_attributes:
return_attributes['errors'] = []
return_attributes['errors'].append({'error': error, 'failed': True})
else:
module.fail_json(msg=error, **return_attributes)
if resource_version and resource_version != existing.metadata.resourceVersion:
module.exit_json(**return_attributes)
def _continue_or_exit(warn):
if multiple_scale:
return_attributes['results'].append({'warning': warn, 'changed': False})
else:
module.exit_json(warning=warn, **return_attributes)
if current_replicas is not None and existing_count != current_replicas:
module.exit_json(**return_attributes)
for existing in existing_items:
if module.params['kind'] == 'job':
existing_count = existing.spec.parallelism
elif hasattr(existing.spec, 'replicas'):
existing_count = existing.spec.replicas
if existing_count != replicas:
return_attributes['changed'] = True
if not module.check_mode:
if module.params['kind'] == 'job':
existing.spec.parallelism = replicas
return_attributes['result'] = resource.patch(existing.to_dict()).to_dict()
else:
return_attributes = scale(module, k8s_ansible_mixin, resource, existing, replicas, wait, wait_time)
if existing_count is None:
error = 'Failed to retrieve the available count for object kind={0} name={1} namespace={2}.'.format(
existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_fail(error)
continue
module.exit_json(**return_attributes)
if resource_version and resource_version != existing.metadata.resourceVersion:
warn = 'expected resource version {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
resource_version, existing.metadata.resourceVersion, existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_exit(warn)
continue
if current_replicas is not None and existing_count != current_replicas:
warn = 'current replicas {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
current_replicas, existing_count, existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_exit(warn)
continue
if existing_count != replicas:
if not module.check_mode:
if module.params['kind'] == 'job':
existing.spec.parallelism = replicas
result = resource.patch(existing.to_dict()).to_dict()
else:
result = scale(module, k8s_ansible_mixin, resource, existing, replicas, wait, wait_time, wait_sleep)
changed = changed or result['changed']
else:
name = existing.metadata.name
namespace = existing.metadata.namespace
existing = resource.get(name=name, namespace=namespace)
result = {'changed': False, 'result': existing.to_dict(), 'diff': {}}
if wait:
result['duration'] = 0
# append result to the return attribute
if multiple_scale:
return_attributes['results'].append(result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, **return_attributes)
def argspec():
@@ -197,10 +273,12 @@ def argspec():
args.update(RESOURCE_ARG_SPEC)
args.update(NAME_ARG_SPEC)
args.update(AUTH_ARG_SPEC)
args.update({'label_selectors': {'type': 'list', 'elements': 'str', 'default': []}})
args.update(({'continue_on_error': {'type': 'bool', 'default': False}}))
return args
def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait, wait_time):
def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait, wait_time, wait_sleep):
name = existing_object.metadata.name
namespace = existing_object.metadata.namespace
kind = existing_object.kind
@@ -227,17 +305,19 @@ def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait,
result['diff'] = diffs
if wait:
success, result['result'], result['duration'] = k8s_ansible_mixin.wait(resource, scale_obj, 5, wait_time)
success, result['result'], result['duration'] = k8s_ansible_mixin.wait(resource, scale_obj, wait_sleep, wait_time)
if not success:
module.fail_json(msg="Resource scaling timed out", **result)
return result
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True)
mutually_exclusive = [
('resource_definition', 'src'),
]
module = AnsibleModule(argument_spec=argspec(), mutually_exclusive=mutually_exclusive, supports_check_mode=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
K8sAnsibleMixin, get_api_client)
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)

View File

@@ -15,3 +15,4 @@ plugins/module_utils/client/discovery.py future-import-boilerplate!skip
plugins/module_utils/client/discovery.py metaclass-boilerplate!skip
tests/unit/module_utils/test_discoverer.py future-import-boilerplate!skip
tests/unit/module_utils/test_discoverer.py metaclass-boilerplate!skip
molecule/default/files/deployment.yaml yamllint!skip

View File

@@ -15,3 +15,4 @@ plugins/module_utils/client/discovery.py future-import-boilerplate!skip
plugins/module_utils/client/discovery.py metaclass-boilerplate!skip
tests/unit/module_utils/test_discoverer.py future-import-boilerplate!skip
tests/unit/module_utils/test_discoverer.py metaclass-boilerplate!skip
molecule/default/files/deployment.yaml yamllint!skip

View File

@@ -13,3 +13,4 @@ plugins/module_utils/client/discovery.py import-3.7!skip
plugins/module_utils/client/resource.py import-3.7!skip
plugins/module_utils/client/discovery.py future-import-boilerplate!skip
plugins/module_utils/client/discovery.py metaclass-boilerplate!skip
molecule/default/files/deployment.yaml yamllint!skip

View File

@@ -12,3 +12,4 @@ plugins/module_utils/client/discovery.py future-import-boilerplate!skip
plugins/module_utils/client/discovery.py metaclass-boilerplate!skip
tests/unit/module_utils/test_discoverer.py future-import-boilerplate!skip
tests/unit/module_utils/test_discoverer.py metaclass-boilerplate!skip
molecule/default/files/deployment.yaml yamllint!skip