Update CI - Continue work from #195 (#202)

* Upgrade Ansible and OKD versions for CI

* Use ubi9 and fix sanity

* Use correct pip install

* Try using quotes

* Ensure python3.9

* Upgrade ansible and molecule versions

* Remove DeploymentConfig

DeploymentConfigs are deprecated and seem to now be causing idempotence
problems. Replacing them with Deployments fixes it.

* Attempt to fix ldap integration tests

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Move sanity and unit tests to GH actions

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Firt round of sanity fixes

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add kubernetes.core collection as sanity requirement

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add ignore-2.16.txt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Attempt to fix units

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add ignore-2.17

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Attempt to fix unit tests

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add pytest-ansible to test-requirements.txt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add changelog fragment

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add workflow for ansible-lint

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Apply black

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Fix linters

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Add # fmt: skip

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Yet another round of linting

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Yet another round of linting

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Remove setup.cfg

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Revert #fmt

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Use ansible-core 2.14

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Cleanup ansible-lint ignores

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>

* Try using service instead of pod IP

* Fix typo

* Actually use the correct port

* See if NetworkPolicy is preventing connection

* using Pod internal IP

* fix adm prune auth roles syntax

* adding some retry steps

* fix: openshift_builds target

* add flag --force-with-deps when building downstream collection

* Remove yamllint from tox linters, bump minimum python supported version to 3.9, Remove support for ansible-core < 2.14

---------

Signed-off-by: Alina Buzachis <abuzachis@redhat.com>
Co-authored-by: Mike Graves <mgraves@redhat.com>
Co-authored-by: Alina Buzachis <abuzachis@redhat.com>
This commit is contained in:
Bikouo Aubin
2023-11-15 18:00:38 +01:00
committed by GitHub
parent cb796e1298
commit a63e5b7b36
76 changed files with 4364 additions and 3510 deletions

View File

@@ -1,11 +1,11 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
DOCUMENTATION = """
name: openshift
author:
- Chris Houseknecht (@chouseknecht)
@@ -94,34 +94,41 @@ DOCUMENTATION = '''
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
'''
"""
EXAMPLES = '''
EXAMPLES = """
# File must be named openshift.yaml or openshift.yml
# Authenticate with token, and return all pods and services for all namespaces
plugin: community.okd.openshift
connections:
- host: https://192.168.64.4:8443
api_key: xxxxxxxxxxxxxxxx
verify_ssl: false
- name: Authenticate with token, and return all pods and services for all namespaces
plugin: community.okd.openshift
connections:
- host: https://192.168.64.4:8443
api_key: xxxxxxxxxxxxxxxx
verify_ssl: false
# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
plugin: community.okd.openshift
connections:
- namespaces:
- testing
- name: Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
plugin: community.okd.openshift
connections:
- namespaces:
- testing
# Use a custom config file, and a specific context.
plugin: community.okd.openshift
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
'''
- name: Use a custom config file, and a specific context.
plugin: community.okd.openshift
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
"""
try:
from ansible_collections.kubernetes.core.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import get_api_client
from ansible_collections.kubernetes.core.plugins.inventory.k8s import (
K8sInventoryException,
InventoryModule as K8sInventoryModule,
format_dynamic_api_exc,
)
from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import (
get_api_client,
)
HAS_KUBERNETES_COLLECTION = True
except ImportError as e:
HAS_KUBERNETES_COLLECTION = False
@@ -134,22 +141,26 @@ except ImportError:
class InventoryModule(K8sInventoryModule):
NAME = 'community.okd.openshift'
NAME = "community.okd.openshift"
connection_plugin = 'community.okd.oc'
transport = 'oc'
connection_plugin = "community.okd.oc"
transport = "oc"
def check_kubernetes_collection(self):
if not HAS_KUBERNETES_COLLECTION:
K8sInventoryException("The kubernetes.core collection must be installed")
raise K8sInventoryException(
"The kubernetes.core collection must be installed"
)
def fetch_objects(self, connections):
self.check_kubernetes_collection()
super(InventoryModule, self).fetch_objects(connections)
self.display.deprecated("The 'openshift' inventory plugin has been deprecated and will be removed in release 4.0.0",
version='4.0.0', collection_name='community.okd')
self.display.deprecated(
"The 'openshift' inventory plugin has been deprecated and will be removed in release 4.0.0",
version="4.0.0",
collection_name="community.okd",
)
if connections:
if not isinstance(connections, list):
@@ -157,9 +168,11 @@ class InventoryModule(K8sInventoryModule):
for connection in connections:
client = get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
name = connection.get(
"name", self.get_default_host_name(client.configuration.host)
)
if connection.get("namespaces"):
namespaces = connection["namespaces"]
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
@@ -173,15 +186,19 @@ class InventoryModule(K8sInventoryModule):
def get_routes_for_namespace(self, client, name, namespace):
self.check_kubernetes_collection()
v1_route = client.resources.get(api_version='route.openshift.io/v1', kind='Route')
v1_route = client.resources.get(
api_version="route.openshift.io/v1", kind="Route"
)
try:
obj = v1_route.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc))
raise K8sInventoryException(
"Error fetching Routes list: %s" % format_dynamic_api_exc(exc)
)
namespace_group = 'namespace_{0}'.format(namespace)
namespace_routes_group = '{0}_routes'.format(namespace_group)
namespace_group = "namespace_{0}".format(namespace)
namespace_routes_group = "{0}_routes".format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
@@ -190,14 +207,18 @@ class InventoryModule(K8sInventoryModule):
self.inventory.add_child(namespace_group, namespace_routes_group)
for route in obj.items:
route_name = route.metadata.name
route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations)
route_annotations = (
{}
if not route.metadata.annotations
else dict(route.metadata.annotations)
)
self.inventory.add_host(route_name)
if route.metadata.labels:
# create a group for each label_value
for key, value in route.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
group_name = "label_{0}_{1}".format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, route_name)
route_labels = dict(route.metadata.labels)
@@ -207,19 +228,25 @@ class InventoryModule(K8sInventoryModule):
self.inventory.add_child(namespace_routes_group, route_name)
# add hostvars
self.inventory.set_variable(route_name, 'labels', route_labels)
self.inventory.set_variable(route_name, 'annotations', route_annotations)
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
self.inventory.set_variable(route_name, 'object_type', 'route')
self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
self.inventory.set_variable(route_name, "labels", route_labels)
self.inventory.set_variable(route_name, "annotations", route_annotations)
self.inventory.set_variable(
route_name, "cluster_name", route.metadata.clusterName
)
self.inventory.set_variable(route_name, "object_type", "route")
self.inventory.set_variable(
route_name, "self_link", route.metadata.selfLink
)
self.inventory.set_variable(
route_name, "resource_version", route.metadata.resourceVersion
)
self.inventory.set_variable(route_name, "uid", route.metadata.uid)
if route.spec.host:
self.inventory.set_variable(route_name, 'host', route.spec.host)
self.inventory.set_variable(route_name, "host", route.spec.host)
if route.spec.path:
self.inventory.set_variable(route_name, 'path', route.spec.path)
self.inventory.set_variable(route_name, "path", route.spec.path)
if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
self.inventory.set_variable(route_name, 'port', dict(route.spec.port))
if hasattr(route.spec.port, "targetPort") and route.spec.port.targetPort:
self.inventory.set_variable(route_name, "port", dict(route.spec.port))