Handles deleting and recreating statefulset and deployment when needed

This commit is contained in:
Marcelo Moreira de Mello
2021-04-21 14:58:21 -04:00
parent 09652056b7
commit 597356f317
6 changed files with 61 additions and 57 deletions

View File

@@ -483,11 +483,9 @@ Example spec file for volumes and volume mounts
### From Older Versions
For `AWX` instances created by the `awx-operator<=0.0.7` a manual upgrade will be required. On version `0.0.8` a new set of `labels` were introduced to the PostgreSQL `statefulset` (when applied) and the AWX `deployment`. Since `selector.matchLabels` are `ready-only` by the Kubernetes API, the resource must be deleted and recreated.
For `AWX` instances created by the `awx-operator<0.0.8`, it is required both PostgreSQL `statefulset` and AWX `deployment` resources to be deleted and recreated. This is required due to new labels added on both resources and the requirement of the Kubernetes API which enforces `selector.matchLabels` attributes to be `ready-only`.
The `awx-operator` to avoid deleting data from environments which the `persistence` layer is not configured to retain the data, it will be up to the system administrator to perform such operator. For instances created from `awx-operator>=0.0.8`, no manual intervetion shall be necessary.
For example, if you are running a `managed` PostgreSQL configured instance and have the cluster storage layer to maintain the `PV` storage area upon the `statefulset` deletion, you could just delete the `awx` resource and recreate it on the newer version.
The `awx-operator` will handle the upgrading both resources. Note that just the `statefulset` and `deployment` will be recreated. Therefore, any `persistent volume` used on any of these 2 resources, **shall not be deleted**.
## Development

View File

@@ -0,0 +1,19 @@
---
- name: Check for presence of Deployment
k8s_info:
api_version: v1
kind: Deployment
name: "{{ meta.name }}"
namespace: "{{ meta.namespace }}"
register: tower_deployment
- name: Scale down Deployment for migration
k8s_scale:
api_version: v1
kind: Deployment
name: "{{ meta.name }}"
namespace: "{{ meta.namespace }}"
replicas: 0
wait: yes
when: tower_deployment['resources'] | length

View File

@@ -72,28 +72,32 @@
name: '{{ meta.name }}-postgres'
register: _postgres_statefulset_result
- name: Set manual_statefulset_upgrade to false since PostgreSQL statefulset does not exist yet
- name: Determine if PostgreSQL statefulset must be recreated
set_fact:
manual_statefulset_upgrade: False
when: not _postgres_statefulset_result['resources'] | length
# yamllint disable-line rule:line-length
recreate_statefulset: '{{ _postgres_statefulset_result["resources"][0]["metadata"]["labels"]["app.kubernetes.io/managed-by"] | default("") | ternary("False", "True") }}' # noqa 204
when: _postgres_statefulset_result['resources'] | length
- name: Determine if PostgreSQL statefulset requires manual upgrade
set_fact:
manual_statefulset_upgrade: '{{ _postgres_statefulset_result["resources"][0]["metadata"]["labels"]["app.kubernetes.io/managed-by"] | default("") | ternary("False", "True") }}'
when: manual_statefulset_upgrade is not defined
- block:
- name: Check and scale down deployment
include_tasks: check_and_scale_down_deployment.yml
- name: Show help message since PostgreSQL statefulset was created by a legacy version
debug:
var: manual_upgrade_msg
when: manual_statefulset_upgrade|bool
- name: Delete PostgreSQL statefulset to ensure expected labels
k8s:
state: absent
api_version: v1
kind: StatefulSet
namespace: '{{ meta.namespace }}'
name: '{{ meta.name }}-postgres'
wait: yes
when: recreate_statefulset is defined and recreate_statefulset|bool
- name: Create Database if no database is specified
k8s:
apply: true
definition: "{{ lookup('template', 'tower_postgres.yaml.j2') }}"
when:
- pg_config['resources'][0]['data']['type'] | default('') | b64decode == 'managed'
- not manual_statefulset_upgrade|bool
- pg_config['resources'][0]['data']['type'] | default('') | b64decode == 'managed' or recreate_statefulset|bool
- name: Store Database Configuration
set_fact:

View File

@@ -24,22 +24,8 @@
set_fact:
postgres_pod_name: "{{ postgres_pod['resources'][0]['metadata']['name'] }}"
- name: Check for presence of Deployment
k8s_info:
api_version: v1
kind: Deployment
name: "{{ meta.name }}"
namespace: "{{ meta.namespace }}"
register: tower_deployment
- name: Scale down Deployment for migration
k8s_scale:
api_version: v1
kind: Deployment
name: "{{ meta.name }}"
namespace: "{{ meta.namespace }}"
replicas: 0
when: tower_deployment['resources'] | length
- name: Check and scale down deployment
include_tasks: check_and_scale_down_deployment.yml
- name: Set pg_dump command
set_fact:

View File

@@ -8,24 +8,11 @@
name: '{{ meta.name }}'
register: _deployment_result
- name: Set manual_deployment_upgrade to false since deployment does not exist yet
- name: Determine if deployment must be recreated
set_fact:
manual_deployment_upgrade: False
when: not _deployment_result['resources'] | length
- name: Determine if deployment requires manual upgrade
set_fact:
manual_deployment_upgrade: '{{ _deployment_result["resources"][0]["metadata"]["labels"]["app.kubernetes.io/managed-by"] | default("") | ternary("False", "True") }}'
when: manual_deployment_upgrade is not defined
- name: Determine pod label_selector to be used
set_fact:
tower_pod_label_selector: '{{ manual_deployment_upgrade|bool | default("false"|bool)| ternary("app={{ deployment_type }}", "app.kubernetes.io/name={{ meta.name }}") }}'
- name: Show help message since deployment was created by a legacy version
debug:
var: manual_upgrade_msg
when: manual_deployment_upgrade|bool
# yamllint disable-line rule:line-length
recreate_deployment: '{{ _deployment_result["resources"][0]["metadata"]["labels"]["app.kubernetes.io/managed-by"] | default("") | ternary("False", "True") }}' # noqa 204
when: _deployment_result['resources'] | length
- name: Get the current resource pod information.
k8s_info:
@@ -33,7 +20,9 @@
kind: Pod
namespace: '{{ meta.namespace }}'
label_selectors:
- '{{ tower_pod_label_selector }}'
- "app.kubernetes.io/name={{ meta.name }}"
- "app.kubernetes.io/managed-by=awx-operator"
- "app.kubernetes.io/component=awx"
field_selectors:
- status.phase=Running
register: tower_pods
@@ -43,6 +32,16 @@
set_fact:
tower_pod_name: "{{ tower_pods['resources'][0]['metadata']['name'] | default('') }}"
- name: Delete deployment to ensure expected labels
k8s:
state: absent
api_version: v1
kind: Deployment
namespace: '{{ meta.namespace }}'
name: '{{ meta.name }}'
wait: yes
when: recreate_deployment is defined and recreate_deployment|bool
- name: Apply Resources
k8s:
apply: yes
@@ -56,7 +55,6 @@
- 'tower_persistent'
- 'tower_service'
- 'tower_ingress'
when: not manual_deployment_upgrade|bool
- name: Apply deployment resources
k8s:
@@ -64,10 +62,9 @@
definition: "{{ lookup('template', 'tower_deployment.yaml.j2') | from_yaml }}"
wait: yes
register: tower_deployment_result
when: not manual_deployment_upgrade|bool
- block:
- name: Delete pod to reload configuration
- name: Delete pod to reload a resource configuration
k8s:
api_version: v1
state: absent
@@ -84,7 +81,9 @@
kind: Pod
namespace: '{{ meta.namespace }}'
label_selectors:
- '{{ tower_pod_label_selector }}'
- "app.kubernetes.io/name={{ meta.name }}"
- "app.kubernetes.io/managed-by=awx-operator"
- "app.kubernetes.io/component=awx"
field_selectors:
- status.phase=Running
register: _new_pod
@@ -93,7 +92,6 @@
- _new_pod['resources'][0]['metadata']['name'] != tower_pod_name
delay: 5
retries: 60
when: not manual_deployment_upgrade|bool
- name: Update new resource pod name as a variable.
set_fact:

View File

@@ -3,4 +3,3 @@ postgres_initdb_args: '--auth-host=scram-sha-256'
postgres_host_auth_method: 'scram-sha-256'
ldap_cacert_ca_crt: ''
tower_projects_existing_claim: ''
manual_upgrade_msg: '{{ meta.name }} requires a manual upgrade. Please refer to the upgrade documentation at https://github.com/ansible/awx-operator#upgrade-notes'