Don't update image field when we can't manage it (#29)

* If deploymentconfigs are configured to trigger on image stream updates don't try to replace image field

* First pass at parsing the trigger annotation

* First draft of IS idempotence tests

* Found even more not idempotent stuff

* Separate handling of annotation and dc spec

* handle malformed annotations

* refactor incluster integration test to catch last flake

* Add proper DNS01 regex for container names

* fix broken conditional for trigger annotations

* Handle namespace field that is added to trigger

* deduplicate shared code

* Set namespace in incluster script

* Give high permissions to test pod

* Still working on permissions issues in prow

* Fix inventory test

* add namespace to watch

* run in default namespace

* fix recursive call

* Fix ansible collection path for downstream test

* Clone the proper ansible collection
This commit is contained in:
Fabian von Feilitzsch
2020-09-17 13:21:00 -04:00
committed by GitHub
parent 1339e2bdf7
commit f52d63c83f
6 changed files with 317 additions and 53 deletions

View File

@@ -133,9 +133,8 @@ f_install_kubernetes_core_from_src()
# ansible-galaxy collection install -p "${install_collections_dir}" "${community_k8s_tmpdir}"/kubernetes-core-*.tar.gz
# popd
#popd
git clone https://github.com/maxamillion/community.kubernetes "${community_k8s_tmpdir}"
git clone https://github.com/ansible-collections/community.kubernetes "${community_k8s_tmpdir}"
pushd "${community_k8s_tmpdir}"
git checkout downstream/fix_collection_name
make downstream-build
ansible-galaxy collection install -p "${install_collections_dir}" "${community_k8s_tmpdir}"/kubernetes-core-*.tar.gz
popd
@@ -189,7 +188,7 @@ f_test_integration_option()
f_install_kubernetes_core_from_src
pushd "${_build_dir}" || return
f_log_info "INTEGRATION TEST WD: ${PWD}"
make test-integration-incluster
OVERRIDE_COLLECTION_PATH="${_tmp_dir}" molecule test
popd || return
f_cleanup
}

View File

@@ -2,6 +2,8 @@
set -x
NAMESPACE=${NAMESPACE:-default}
# IMAGE_FORMAT is in the form $registry/$org/$image:$$component, ie
# quay.io/openshift/release:$component
# To test with your own image, build and push the test image
@@ -14,6 +16,15 @@ eval IMAGE=$IMAGE_FORMAT
PULL_POLICY=${PULL_POLICY:-IfNotPresent}
if ! oc get namespace $NAMESPACE
then
oc create namespace $NAMESPACE
fi
oc project $NAMESPACE
oc adm policy add-cluster-role-to-user cluster-admin -z default
oc adm policy who-can create projectrequests
echo "Deleting test job if it exists"
oc delete job molecule-integration-test --wait --ignore-not-found
@@ -40,22 +51,36 @@ spec:
parallelism: 1
EOF
function wait_for_success {
oc wait --for=condition=complete job/molecule-integration-test --timeout 5m
echo "Molecule integration tests ran successfully"
exit 0
function check_success {
oc wait --for=condition=complete job/molecule-integration-test --timeout 5s -n $NAMESPACE \
&& oc logs job/molecule-integration-test \
&& echo "Molecule integration tests ran successfully" \
&& return 0
return 1
}
function wait_for_failure {
oc wait --for=condition=failed job/molecule-integration-test --timeout 5m
oc logs job/molecule-integration-test
echo "Molecule integration tests failed, see logs for more information..."
exit 1
function check_failure {
oc wait --for=condition=failed job/molecule-integration-test --timeout 5s -n $NAMESPACE \
&& oc logs job/molecule-integration-test \
&& echo "Molecule integration tests failed, see logs for more information..." \
&& return 0
return 1
}
# Ensure the child processes are killed
trap 'kill -SIGTERM 0' SIGINT EXIT
runtime="15 minute"
endtime=$(date -ud "$runtime" +%s)
echo "Waiting for test job to complete"
wait_for_success &
wait_for_failure
while [[ $(date -u +%s) -le $endtime ]]
do
if check_success
then
exit 0
elif check_failure
then
exit 1
fi
sleep 10
done
exit 1

View File

@@ -5,6 +5,8 @@
gather_facts: no
vars:
ansible_python_interpreter: '{{ virtualenv_interpreter }}'
vars_files:
- vars/main.yml
tasks:
# OpenShift Resources
- name: Create a project
@@ -22,41 +24,76 @@
- name: Create deployment config
community.okd.k8s:
state: present
inline: &dc
apiVersion: v1
kind: DeploymentConfig
metadata:
name: hello-world
labels:
app: galaxy
service: hello-world
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: hello-world
spec:
containers:
- name: hello-world
image: python
command:
- python
- '-m'
- http.server
env:
- name: TEST
value: test
replicas: 1
strategy:
type: Recreate
name: hello-world
namespace: testing
definition: '{{ okd_dc_template }}'
wait: yes
wait_condition:
type: Available
status: True
vars:
k8s_pod_name: hello-world
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
okd_dc_triggers:
- type: ConfigChange
register: output
- name: Show output
debug:
var: output
- vars:
image: docker.io/python
image_name: python
image_tag: latest
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
namespace: idempotence-testing
block:
- name: Create a namespace
community.okd.k8s:
name: '{{ namespace }}'
kind: Namespace
api_version: v1
- name: Create imagestream
community.okd.k8s:
namespace: '{{ namespace }}'
definition: '{{ okd_imagestream_template }}'
- name: Create DeploymentConfig to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ okd_dc_template }}'
vars:
k8s_pod_name: is-idempotent-dc
- name: Create Deployment to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ k8s_deployment_template | combine(metadata) }}'
vars:
k8s_pod_annotations:
"alpha.image.policy.openshift.io/resolve-names": "*"
k8s_pod_name: is-idempotent-deployment
annotation:
- from:
kind: ImageStreamTag
name: "{{ image_name }}:{{ image_tag}}}"
fieldPath: 'spec.template.spec.containers[?(@.name=="{{ k8s_pod_name }}")].image}'
metadata:
metadata:
annotations:
image.openshift.io/triggers: '{{ annotation | to_json }}'

View File

@@ -9,6 +9,9 @@ platforms:
- k8s
provisioner:
name: ansible
log: true
options:
vvv: True
config_options:
inventory:
enable_plugins: community.okd.openshift
@@ -26,7 +29,7 @@ provisioner:
playbook_namespace: molecule-tests
env:
ANSIBLE_FORCE_COLOR: 'true'
ANSIBLE_COLLECTIONS_PATHS: ${MOLECULE_PROJECT_DIRECTORY}
ANSIBLE_COLLECTIONS_PATHS: ${OVERRIDE_COLLECTION_PATH:-$MOLECULE_PROJECT_DIRECTORY}
verifier:
name: ansible
lint: |

View File

@@ -0,0 +1,94 @@
---
k8s_pod_annotations: {}
k8s_pod_metadata:
labels:
app: '{{ k8s_pod_name }}'
annotations: '{{ k8s_pod_annotations }}'
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_deployment_spec:
template: '{{ k8s_pod_template }}'
selector:
matchLabels:
app: '{{ k8s_pod_name }}'
replicas: 1
k8s_deployment_template:
apiVersion: apps/v1
kind: Deployment
spec: '{{ k8s_deployment_spec }}'
okd_dc_triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- '{{ k8s_pod_name }}'
from:
kind: ImageStreamTag
name: '{{ image_name }}:{{ image_tag }}'
okd_dc_spec:
template: '{{ k8s_pod_template }}'
triggers: '{{ okd_dc_triggers }}'
replicas: 1
strategy:
type: Recreate
okd_dc_template:
apiVersion: v1
kind: DeploymentConfig
spec: '{{ okd_dc_spec }}'
okd_imagestream_template:
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: '{{ image_name }}'
spec:
lookupPolicy:
local: true
tags:
- annotations: null
from:
kind: DockerImage
name: '{{ image }}'
name: '{{ image_tag }}'
referencePolicy:
type: Source
image_tag: latest

View File

@@ -265,7 +265,10 @@ result:
sample: 48
'''
import re
import operator
import traceback
from functools import reduce
from ansible.module_utils._text import to_native
@@ -279,11 +282,15 @@ except ImportError as e:
from ansible.module_utils.basic import AnsibleModule as KubernetesRawModule
try:
import yaml
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ForbiddenError
except ImportError:
# Exceptions handled in common
pass
TRIGGER_ANNOTATION = 'image.openshift.io/triggers'
TRIGGER_CONTAINER = re.compile(r"(?P<path>.*)\[((?P<index>[0-9]+)|\?\(@\.name==[\"'\\]*(?P<name>[a-z0-9]([-a-z0-9]*[a-z0-9])?))")
class OKDRawModule(KubernetesRawModule):
@@ -301,17 +308,116 @@ class OKDRawModule(KubernetesRawModule):
name = definition['metadata'].get('name')
namespace = definition['metadata'].get('namespace')
if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
if state != 'absent':
if resource.kind in ['Project', 'ProjectRequest']:
try:
resource.get(name, namespace)
except (NotFoundError, ForbiddenError):
return self.create_project_request(definition)
except DynamicApiError as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
try:
resource.get(name, namespace)
except (NotFoundError, ForbiddenError):
return self.create_project_request(definition)
except DynamicApiError as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
existing = resource.get(name=name, namespace=namespace).to_dict()
except Exception:
existing = None
if existing:
if resource.kind == 'DeploymentConfig':
if definition.get('spec', {}).get('triggers'):
definition = self.resolve_imagestream_triggers(existing, definition)
elif existing['metadata'].get('annotations', '{}').get(TRIGGER_ANNOTATION):
definition = self.resolve_imagestream_trigger_annotation(existing, definition)
return super(OKDRawModule, self).perform_action(resource, definition)
@staticmethod
def get_index(desired, objects, keys):
""" Iterates over keys, returns the first object from objects where the value of the key
matches the value in desired
"""
for i, item in enumerate(objects):
if item and all([desired.get(key, True) == item.get(key, False) for key in keys]):
return i
def resolve_imagestream_trigger_annotation(self, existing, definition):
def get_from_fields(d, fields):
try:
return reduce(operator.getitem, fields, d)
except Exception:
return None
def set_from_fields(d, fields, value):
get_from_fields(d, fields[:-1])[fields[-1]] = value
if TRIGGER_ANNOTATION in definition['metadata'].get('annotations', {}).keys():
triggers = yaml.safe_load(definition['metadata']['annotations'][TRIGGER_ANNOTATION] or '[]')
else:
triggers = yaml.safe_load(existing['metadata'].get('annotations', '{}').get(TRIGGER_ANNOTATION, '[]'))
if not isinstance(triggers, list):
return definition
for trigger in triggers:
if trigger.get('fieldPath'):
parsed = self.parse_trigger_fieldpath(trigger['fieldPath'])
path = parsed.get('path', '').split('.')
if path:
existing_containers = get_from_fields(existing, path)
new_containers = get_from_fields(definition, path)
if parsed.get('name'):
existing_index = self.get_index({'name': parsed['name']}, existing_containers, ['name'])
new_index = self.get_index({'name': parsed['name']}, new_containers, ['name'])
elif parsed.get('index') is not None:
existing_index = new_index = int(parsed['index'])
else:
existing_index = new_index = None
if existing_index is not None and new_index is not None:
if existing_index < len(existing_containers) and new_index < len(new_containers):
set_from_fields(definition, path + [new_index, 'image'], get_from_fields(existing, path + [existing_index, 'image']))
return definition
def resolve_imagestream_triggers(self, existing, definition):
existing_triggers = existing.get('spec', {}).get('triggers')
new_triggers = definition['spec']['triggers']
existing_containers = existing.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
new_containers = definition.get('spec', {}).get('template', {}).get('spec', {}).get('containers', [])
for i, trigger in enumerate(new_triggers):
if trigger.get('type') == 'ImageChange' and trigger.get('imageChangeParams'):
names = trigger['imageChangeParams'].get('containerNames', [])
for name in names:
old_container_index = self.get_index({'name': name}, existing_containers, ['name'])
new_container_index = self.get_index({'name': name}, new_containers, ['name'])
if old_container_index is not None and new_container_index is not None:
image = existing['spec']['template']['spec']['containers'][old_container_index]['image']
definition['spec']['template']['spec']['containers'][new_container_index]['image'] = image
existing_index = self.get_index(trigger['imageChangeParams'], [x.get('imageChangeParams') for x in existing_triggers], ['containerNames'])
if existing_index is not None:
existing_image = existing_triggers[existing_index].get('imageChangeParams', {}).get('lastTriggeredImage')
if existing_image:
definition['spec']['triggers'][i]['imageChangeParams']['lastTriggeredImage'] = existing_image
existing_from = existing_triggers[existing_index].get('imageChangeParams', {}).get('from', {})
new_from = trigger['imageChangeParams'].get('from', {})
existing_namespace = existing_from.get('namespace')
existing_name = existing_from.get('name', False)
new_name = new_from.get('name', True)
add_namespace = existing_namespace and 'namespace' not in new_from.keys() and existing_name == new_name
if add_namespace:
definition['spec']['triggers'][i]['imageChangeParams']['from']['namespace'] = existing_from['namespace']
return definition
def parse_trigger_fieldpath(self, expression):
parsed = TRIGGER_CONTAINER.search(expression).groupdict()
if parsed.get('index'):
parsed['index'] = int(parsed['index'])
return parsed
def create_project_request(self, definition):
definition['kind'] = 'ProjectRequest'
result = {'changed': False, 'result': {}}