mirror of
https://github.com/openshift/community.okd.git
synced 2026-05-07 21:52:37 +00:00
* Upgrade Ansible and OKD versions for CI * Use ubi9 and fix sanity * Use correct pip install * Try using quotes * Ensure python3.9 * Upgrade ansible and molecule versions * Remove DeploymentConfig DeploymentConfigs are deprecated and seem to now be causing idempotence problems. Replacing them with Deployments fixes it. * Attempt to fix ldap integration tests Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Move sanity and unit tests to GH actions Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Firt round of sanity fixes Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add kubernetes.core collection as sanity requirement Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add ignore-2.16.txt Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Attempt to fix units Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add ignore-2.17 Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Attempt to fix unit tests Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add pytest-ansible to test-requirements.txt Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add changelog fragment Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add workflow for ansible-lint Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Apply black Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Fix linters Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Add # fmt: skip Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Yet another round of linting Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Yet another round of linting Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Remove setup.cfg Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Revert #fmt Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Use ansible-core 2.14 Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Cleanup ansible-lint ignores Signed-off-by: Alina Buzachis <abuzachis@redhat.com> * Try using service instead of pod IP * Fix typo * Actually use the correct port * See if NetworkPolicy is preventing connection * using Pod internal IP * fix adm prune auth roles syntax * adding some retry steps * fix: openshift_builds target * add flag --force-with-deps when building downstream collection * Remove yamllint from tox linters, bump minimum python supported version to 3.9, Remove support for ansible-core < 2.14 --------- Signed-off-by: Alina Buzachis <abuzachis@redhat.com> Co-authored-by: Mike Graves <mgraves@redhat.com> Co-authored-by: Alina Buzachis <abuzachis@redhat.com>
This commit is contained in:
@@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from datetime import datetime, timezone
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.okd.plugins.module_utils.openshift_common import AnsibleOpenshiftModule
|
||||
from ansible_collections.community.okd.plugins.module_utils.openshift_common import (
|
||||
AnsibleOpenshiftModule,
|
||||
)
|
||||
|
||||
try:
|
||||
from kubernetes import client
|
||||
@@ -23,7 +25,9 @@ def get_deploymentconfig_for_replicationcontroller(replica_controller):
|
||||
# This is set on replication controller pod template by deployer controller.
|
||||
DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
|
||||
try:
|
||||
deploymentconfig_name = replica_controller['metadata']['annotations'].get(DeploymentConfigAnnotation)
|
||||
deploymentconfig_name = replica_controller["metadata"]["annotations"].get(
|
||||
DeploymentConfigAnnotation
|
||||
)
|
||||
if deploymentconfig_name is None or deploymentconfig_name == "":
|
||||
return None
|
||||
return deploymentconfig_name
|
||||
@@ -32,7 +36,6 @@ def get_deploymentconfig_for_replicationcontroller(replica_controller):
|
||||
|
||||
|
||||
class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(OpenShiftAdmPruneDeployment, self).__init__(**kwargs)
|
||||
|
||||
@@ -41,27 +44,33 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
return get_deploymentconfig_for_replicationcontroller(obj) is not None
|
||||
|
||||
def _zeroReplicaSize(obj):
|
||||
return obj['spec']['replicas'] == 0 and obj['status']['replicas'] == 0
|
||||
return obj["spec"]["replicas"] == 0 and obj["status"]["replicas"] == 0
|
||||
|
||||
def _complete_failed(obj):
|
||||
DeploymentStatusAnnotation = "openshift.io/deployment.phase"
|
||||
try:
|
||||
# validate that replication controller status is either 'Complete' or 'Failed'
|
||||
deployment_phase = obj['metadata']['annotations'].get(DeploymentStatusAnnotation)
|
||||
return deployment_phase in ('Failed', 'Complete')
|
||||
deployment_phase = obj["metadata"]["annotations"].get(
|
||||
DeploymentStatusAnnotation
|
||||
)
|
||||
return deployment_phase in ("Failed", "Complete")
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _younger(obj):
|
||||
creation_timestamp = datetime.strptime(obj['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
|
||||
creation_timestamp = datetime.strptime(
|
||||
obj["metadata"]["creationTimestamp"], "%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
now = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
age = (now - creation_timestamp).seconds / 60
|
||||
return age > self.params['keep_younger_than']
|
||||
return age > self.params["keep_younger_than"]
|
||||
|
||||
def _orphan(obj):
|
||||
try:
|
||||
# verify if the deploymentconfig associated to the replication controller is still existing
|
||||
deploymentconfig_name = get_deploymentconfig_for_replicationcontroller(obj)
|
||||
deploymentconfig_name = get_deploymentconfig_for_replicationcontroller(
|
||||
obj
|
||||
)
|
||||
params = dict(
|
||||
kind="DeploymentConfig",
|
||||
api_version="apps.openshift.io/v1",
|
||||
@@ -69,14 +78,14 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
namespace=obj["metadata"]["name"],
|
||||
)
|
||||
exists = self.kubernetes_facts(**params)
|
||||
return not (exists.get['api_found'] and len(exists['resources']) > 0)
|
||||
return not (exists.get["api_found"] and len(exists["resources"]) > 0)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
predicates = [_deployment, _zeroReplicaSize, _complete_failed]
|
||||
if self.params['orphans']:
|
||||
if self.params["orphans"]:
|
||||
predicates.append(_orphan)
|
||||
if self.params['keep_younger_than']:
|
||||
if self.params["keep_younger_than"]:
|
||||
predicates.append(_younger)
|
||||
|
||||
results = replicacontrollers.copy()
|
||||
@@ -86,8 +95,8 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
|
||||
def execute_module(self):
|
||||
# list replicationcontroller candidate for pruning
|
||||
kind = 'ReplicationController'
|
||||
api_version = 'v1'
|
||||
kind = "ReplicationController"
|
||||
api_version = "v1"
|
||||
resource = self.find_resource(kind=kind, api_version=api_version, fail=True)
|
||||
|
||||
# Get ReplicationController
|
||||
@@ -103,7 +112,7 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
self.exit_json(changed=False, replication_controllers=[])
|
||||
|
||||
changed = True
|
||||
delete_options = client.V1DeleteOptions(propagation_policy='Background')
|
||||
delete_options = client.V1DeleteOptions(propagation_policy="Background")
|
||||
replication_controllers = []
|
||||
for replica in candidates:
|
||||
try:
|
||||
@@ -111,12 +120,18 @@ class OpenShiftAdmPruneDeployment(AnsibleOpenshiftModule):
|
||||
if not self.check_mode:
|
||||
name = replica["metadata"]["name"]
|
||||
namespace = replica["metadata"]["namespace"]
|
||||
result = resource.delete(name=name, namespace=namespace, body=delete_options).to_dict()
|
||||
result = resource.delete(
|
||||
name=name, namespace=namespace, body=delete_options
|
||||
).to_dict()
|
||||
replication_controllers.append(result)
|
||||
except DynamicApiError as exc:
|
||||
msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=exc.body)
|
||||
msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(
|
||||
namespace=namespace, name=name, msg=exc.body
|
||||
)
|
||||
self.fail_json(msg=msg)
|
||||
except Exception as e:
|
||||
msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(namespace=namespace, name=name, msg=to_native(e))
|
||||
msg = "Failed to delete ReplicationController {namespace}/{name} due to: {msg}".format(
|
||||
namespace=namespace, name=name, msg=to_native(e)
|
||||
)
|
||||
self.fail_json(msg=msg)
|
||||
self.exit_json(changed=changed, replication_controllers=replication_controllers)
|
||||
|
||||
Reference in New Issue
Block a user