Compare commits

..

1 Commits

Author SHA1 Message Date
Hao Liu
d2818931c9 Update Matrix section with CI-TEST channel
Added CI-TEST channel to Matrix section.
2025-10-30 10:44:26 -04:00
42 changed files with 725 additions and 873 deletions

View File

@@ -16,7 +16,7 @@ jobs:
- --skip-tags=replicas
- -t replicas
env:
DOCKER_API_VERSION: "1.44"
DOCKER_API_VERSION: "1.41"
DEBUG_OUTPUT_DIR: /tmp/awx_operator_molecule_test
steps:
- uses: actions/checkout@v4

View File

@@ -0,0 +1,86 @@
name: Publish AWX Operator on operator-hub
on:
release:
types: [published]
workflow_dispatch:
inputs:
tag_name:
description: 'Name for the tag of the release.'
required: true
operator_hub_fork:
description: 'Fork of operator-hub where the PR will be created from. default: awx-auto'
required: true
default: 'awx-auto'
image_registry:
description: 'Image registry where the image is published to. default: quay.io'
required: true
default: 'quay.io'
image_registry_organization:
description: 'Image registry organization where the image is published to. default: ansible'
required: true
default: 'ansible'
community_operator_github_org:
description: 'Github organization for community-opeartor project. default: k8s-operatorhub'
required: true
default: 'k8s-operatorhub'
community_operator_prod_github_org:
description: 'GitHub organization for community-operator-prod project. default: redhat-openshift-ecosystem'
required: true
default: 'redhat-openshift-ecosystem'
jobs:
promote:
runs-on: ubuntu-latest
steps:
- name: Set GITHUB_ENV from workflow_dispatch event
if: ${{ github.event_name == 'workflow_dispatch' }}
run: |
echo "VERSION=${{ github.event.inputs.tag_name }}" >> $GITHUB_ENV
echo "IMAGE_REGISTRY=${{ github.event.inputs.image_registry }}" >> $GITHUB_ENV
echo "IMAGE_REGISTRY_ORGANIZATION=${{ github.event.inputs.image_registry_organization }}" >> $GITHUB_ENV
echo "COMMUNITY_OPERATOR_GITHUB_ORG=${{ github.event.inputs.community_operator_github_org }}" >> $GITHUB_ENV
echo "COMMUNITY_OPERATOR_PROD_GITHUB_ORG=${{ github.event.inputs.community_operator_prod_github_org }}" >> $GITHUB_ENV
- name: Set GITHUB_ENV for release event
if: ${{ github.event_name == 'release' }}
run: |
echo "VERSION=${{ github.event.release.tag_name }}" >> $GITHUB_ENV
echo "IMAGE_REGISTRY=quay.io" >> $GITHUB_ENV
echo "IMAGE_REGISTRY_ORGANIZATION=ansible" >> $GITHUB_ENV
echo "COMMUNITY_OPERATOR_GITHUB_ORG=k8s-operatorhub" >> $GITHUB_ENV
echo "COMMUNITY_OPERATOR_PROD_GITHUB_ORG=redhat-openshift-ecosystem" >> $GITHUB_ENV
- name: Log in to image registry
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login ${{ env.IMAGE_REGISTRY }} -u ${{ secrets.QUAY_USER }} --password-stdin
- name: Checkout awx-operator at workflow branch
uses: actions/checkout@v4
with:
path: awx-operator
- name: Checkout awx-opearator at ${{ env.VERSION }}
uses: actions/checkout@v4
with:
fetch-tags: true
ref: ${{ env.VERSION }}
path: awx-operator-${{ env.VERSION }}
fetch-depth: 0 # fetch all history so that git describe works
- name: Copy scripts to awx-operator-${{ env.VERSION }}
run: |
cp -f \
awx-operator/hack/publish-to-operator-hub.sh \
awx-operator-${{ env.VERSION }}/hack/publish-to-operator-hub.sh
cp -f \
awx-operator/Makefile \
awx-operator-${{ env.VERSION }}/Makefile
- name: Build and publish bundle to operator-hub
working-directory: awx-operator-${{ env.VERSION }}
env:
IMG_REPOSITORY: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REGISTRY_ORGANIZATION }}
GITHUB_TOKEN: ${{ secrets.AWX_AUTO_GITHUB_TOKEN }}
run: |
git config --global user.email "awx-automation@redhat.com"
git config --global user.name "AWX Automation"
./hack/publish-to-operator-hub.sh

View File

@@ -1,58 +1,147 @@
# Contributing to AWX Operator
# AWX-Operator Contributing Guidelines
Hi there! We're excited to have you as a contributor.
Have questions about this document or anything not covered here? Please file an issue at [https://github.com/ansible/awx-operator/issues](https://github.com/ansible/awx-operator/issues).
Have questions about this document or anything not covered here? Please file a new at [https://github.com/ansible/awx-operator/issues](https://github.com/ansible/awx-operator/issues).
## Table of contents
- [AWX-Operator Contributing Guidelines](#awx-operator-contributing-guidelines)
- [Table of contents](#table-of-contents)
- [Things to know prior to submitting code](#things-to-know-prior-to-submitting-code)
- [Submmiting your work](#submmiting-your-work)
- [Development](#development)
- [Testing](#testing)
- [Testing in Kind](#testing-in-kind)
- [Testing in Minikube](#testing-in-minikube)
- [Generating a bundle](#generating-a-bundle)
- [Reporting Issues](#reporting-issues)
## Things to know prior to submitting code
- All code submissions are done through pull requests against the `devel` branch.
- All PRs must have a single commit. Make sure to `squash` any changes into a single commit.
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see [git push --force-with-lease](https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt).
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com).
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
## Setting up your development environment
See [docs/development.md](docs/development.md) for prerequisites, build/deploy instructions, and available Makefile targets.
## Submitting your work
1. From your fork's `devel` branch, create a new branch to stage your changes.
## Submmiting your work
1. From your fork `devel` branch, create a new branch to stage your changes.
```sh
git checkout -b <branch-name>
#> git checkout -b <branch-name>
```
2. Make your changes.
3. Test your changes (see [Testing](#testing) below).
4. Commit your changes.
3. Test your changes according described on the Testing section.
4. If everything looks correct, commit your changes.
```sh
git add <FILES>
git commit -m "My message here"
#> git add <FILES>
#> git commit -m "My message here"
```
5. Create your [pull request](https://github.com/ansible/awx-operator/pulls).
5. Create your [pull request](https://github.com/ansible/awx-operator/pulls)
> **Note**: If you have multiple commits, make sure to `squash` them into a single commit before submitting.
**Note**: If you have multiple commits, make sure to `squash` your commits into a single commit which will facilitate our release process.
## Development
The development environment consists of running an [`up.sh`](./up.sh) and a [`down.sh`](./down.sh) script, which applies or deletes yaml on the Openshift or K8s cluster you are connected to. See the [development.md](docs/development.md) for information on how to deploy and test changes from your branch.
## Testing
All changes must be tested before submission:
This Operator includes a [Molecule](https://ansible.readthedocs.io/projects/molecule/)-based test environment, which can be executed standalone in Docker (e.g. in CI or in a single Docker container anywhere), or inside any kind of Kubernetes cluster (e.g. Minikube).
You need to make sure you have Molecule installed before running the following commands. You can install Molecule with:
```sh
#> python -m pip install molecule-plugins[docker]
```
Running `molecule test` sets up a clean environment, builds the operator, runs all configured tests on an example operator instance, then tears down the environment (at least in the case of Docker).
If you want to actively develop the operator, use `molecule converge`, which does everything but tear down the environment at the end.
#### Testing in Kind
Testing with a kind cluster is the recommended way to test the awx-operator locally. First, you need to install kind if you haven't already. Please see these docs for setting that up:
* https://kind.sigs.k8s.io/docs/user/quick-start/
To run the tests, from the root of your checkout, run the following command:
```sh
#> molecule test -s kind
```
#### Testing in Minikube
```sh
#> minikube start --memory 8g --cpus 4
#> minikube addons enable ingress
#> molecule test -s test-minikube
```
[Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) is a more full-featured test environment running inside a full VM on your computer, with an assigned IP address. This makes it easier to test things like NodePort services and Ingress from outside the Kubernetes cluster (e.g. in a browser on your computer).
Once the operator is deployed, you can visit the AWX UI in your browser by following these steps:
1. Make sure you have an entry like `IP_ADDRESS example-awx.test` in your `/etc/hosts` file. (Get the IP address with `minikube ip`.)
2. Visit `http://example-awx.test/` in your browser. (Default admin login is `test`/`changeme`.)
Alternatively, you can also update the service `awx-service` in your namespace to use the type `NodePort` and use following command to get the URL to access your AWX instance:
```sh
#> minikube service <serviceName> -n <namespaceName> --url
```
## Generating a bundle
> :warning: operator-sdk version 0.19.4 is needed to run the following commands
If one has the Operator Lifecycle Manager (OLM) installed, the following steps is the process to generate the bundle that would nicely display in the OLM interface.
At the root of this directory:
1. Build and publish the operator
```
#> operator-sdk build registry.example.com/ansible/awx-operator:mytag
#> podman push registry.example.com/ansible/awx-operator:mytag
```
2. Build and publish the bundle
```
#> podman build . -f bundle.Dockerfile -t registry.example.com/ansible/awx-operator-bundle:mytag
#> podman push registry.example.com/ansible/awx-operator-bundle:mytag
```
3. Build and publish an index with your bundle in it
```
#> opm index add --bundles registry.example.com/ansible/awx-operator-bundle:mytag --tag registry.example.com/ansible/awx-operator-catalog:mytag
#> podman push registry.example.com/ansible/awx-operator-catalog:mytag
```
4. In your Kubernetes create a new CatalogSource pointing to `registry.example.com/ansible/awx-operator-catalog:mytag`
```
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: <catalogsource-name>
namespace: <namespace>
spec:
displayName: 'myoperatorhub'
image: registry.example.com/ansible/awx-operator-catalog:mytag
publisher: 'myoperatorhub'
sourceType: grpc
```
Applying this template will do it. Once the CatalogSource is in a READY state, the bundle should be available on the OperatorHub tab (as part of the custom CatalogSource that just got added)
5. Enjoy
- **Linting** (required for all PRs): `make lint`
- **Molecule tests** (recommended): The operator includes a [Molecule](https://ansible.readthedocs.io/projects/molecule/)-based test environment for integration testing. See the [Testing section in docs/development.md](docs/development.md#testing) for detailed instructions on running tests locally.
## Reporting Issues
We welcome your feedback, and encourage you to file an issue when you run into a problem at [https://github.com/ansible/awx-operator/issues](https://github.com/ansible/awx-operator/issues).
## Getting Help
### Forum
Join the [Ansible Forum](https://forum.ansible.com) for questions, help, and development discussions. Search for posts tagged with [`awx-operator`](https://forum.ansible.com/tag/awx-operator) or start a new discussion.
### Matrix
For real-time conversations:
* [#awx:ansible.com](https://matrix.to/#/#awx:ansible.com) — AWX and AWX Operator discussions
* [#docs:ansible.im](https://matrix.to/#/#docs:ansible.im) — Documentation discussions
We welcome your feedback, and encourage you to file an issue when you run into a problem.

View File

@@ -1,8 +1,8 @@
FROM quay.io/operator-framework/ansible-operator:v1.40.0
FROM quay.io/operator-framework/ansible-operator:v1.36.1
USER root
RUN dnf update --security --bugfix -y --disableplugin=subscription-manager && \
dnf install -y --disableplugin=subscription-manager openssl
RUN dnf update --security --bugfix -y && \
dnf install -y openssl
USER 1001

109
Makefile
View File

@@ -3,7 +3,10 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
# VERSION ?= 0.0.1 # Set in operator.mk
VERSION ?= $(shell git describe --tags)
PREV_VERSION ?= $(shell git describe --abbrev=0 --tags $(shell git rev-list --tags --skip=1 --max-count=1))
CONTAINER_CMD ?= docker
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
@@ -28,8 +31,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# example.com/temp-operator-bundle:$VERSION and example.com/temp-operator-catalog:$VERSION.
# IMAGE_TAG_BASE ?= quay.io/<org>/<operator-name> # Set in operator.mk
# ansible.com/awx-operator-bundle:$VERSION and ansible.com/awx-operator-catalog:$VERSION.
IMAGE_TAG_BASE ?= quay.io/ansible/awx-operator
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
@@ -43,16 +46,12 @@ BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
# To enable set flag to true
USE_IMAGE_DIGESTS ?= false
ifeq ($(USE_IMAGE_DIGESTS), true)
BUNDLE_GEN_FLAGS += --use-image-digests
BUNDLE_GEN_FLAGS += --use-image-digests
endif
# Set the Operator SDK version to use. By default, what is installed on the system is used.
# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit.
OPERATOR_SDK_VERSION ?= v1.40.0
CONTAINER_TOOL ?= podman
# Image URL to use all building/pushing image targets
IMG ?= $(IMAGE_TAG_BASE):$(VERSION)
NAMESPACE ?= awx
.PHONY: all
all: docker-build
@@ -74,20 +73,23 @@ all: docker-build
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
.PHONY: print-%
print-%: ## Print any variable from the Makefile. Use as `make print-VARIABLE`
@echo $($*)
##@ Build
.PHONY: run
ANSIBLE_ROLES_PATH?="$(shell pwd)/roles"
run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config
$(ANSIBLE_OPERATOR) run
ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run
.PHONY: docker-build
docker-build: ## Build docker image with the manager.
docker build $(BUILD_ARGS) -t ${IMG} .
${CONTAINER_CMD} build $(BUILD_ARGS) -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
docker push ${IMG}
${CONTAINER_CMD} push ${IMG}
# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
@@ -95,6 +97,7 @@ docker-push: ## Push docker image with the manager.
# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=<myregistry/image:<tag>> than the export will fail)
# To properly provided solutions that supports more than one platform you should use this option.
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
.PHONY: docker-buildx
docker-buildx: ## Build and push docker image for the manager for cross-platform support
- docker buildx create --name project-v3-builder
@@ -102,11 +105,8 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
- docker buildx build --push $(BUILD_ARGS) --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile .
- docker buildx rm project-v3-builder
##@ Deployment
ifndef ignore-not-found
ignore-not-found = false
endif
##@ Deployment
.PHONY: install
install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
@@ -114,22 +114,28 @@ install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/con
.PHONY: uninstall
uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
$(KUSTOMIZE) build config/crd | kubectl delete -f -
.PHONY: gen-resources
gen-resources: kustomize ## Generate resources for controller and print to stdout
@cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
@cd config/default && $(KUSTOMIZE) edit set namespace ${NAMESPACE}
@$(KUSTOMIZE) build config/default
.PHONY: deploy
deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
@cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
@cd config/default && $(KUSTOMIZE) edit set namespace ${NAMESPACE}
@$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
## Location for locally installed tools
LOCALBIN ?= $(shell pwd)/bin
@cd config/default && $(KUSTOMIZE) edit set namespace ${NAMESPACE}
$(KUSTOMIZE) build config/default | kubectl delete -f -
OS := $(shell uname -s | tr '[:upper:]' '[:lower:]')
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
ARCHA := $(shell uname -m | sed -e 's/x86_64/amd64/' -e 's/aarch64/arm64/')
ARCHX := $(shell uname -m | sed -e 's/amd64/x86_64/' -e 's/aarch64/arm64/')
.PHONY: kustomize
KUSTOMIZE = $(shell pwd)/bin/kustomize
@@ -139,7 +145,7 @@ ifeq (,$(shell which kustomize 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(KUSTOMIZE)) ;\
curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.6.0/kustomize_v5.6.0_$(OS)_$(ARCH).tar.gz | \
curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.0.1/kustomize_v5.0.1_$(OS)_$(ARCHA).tar.gz | \
tar xzf - -C bin/ ;\
}
else
@@ -147,6 +153,22 @@ KUSTOMIZE = $(shell which kustomize)
endif
endif
.PHONY: operator-sdk
OPERATOR_SDK = $(shell pwd)/bin/operator-sdk
operator-sdk: ## Download operator-sdk locally if necessary, preferring the $(pwd)/bin path over global if both exist.
ifeq (,$(wildcard $(OPERATOR_SDK)))
ifeq (,$(shell which operator-sdk 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPERATOR_SDK)) ;\
curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v1.36.1/operator-sdk_$(OS)_$(ARCHA) ;\
chmod +x $(OPERATOR_SDK) ;\
}
else
OPERATOR_SDK = $(shell which operator-sdk)
endif
endif
.PHONY: ansible-operator
ANSIBLE_OPERATOR = $(shell pwd)/bin/ansible-operator
ansible-operator: ## Download ansible-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist.
@@ -155,7 +177,7 @@ ifeq (,$(shell which ansible-operator 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\
curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/ansible-operator-plugins/releases/download/$(OPERATOR_SDK_VERSION)/ansible-operator_$(OS)_$(ARCH) ;\
curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/ansible-operator-plugins/releases/download/v1.36.1/ansible-operator_$(OS)_$(ARCHA) ;\
chmod +x $(ANSIBLE_OPERATOR) ;\
}
else
@@ -163,47 +185,30 @@ ANSIBLE_OPERATOR = $(shell which ansible-operator)
endif
endif
.PHONY: operator-sdk
OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
operator-sdk: ## Download operator-sdk locally if necessary.
ifeq (,$(wildcard $(OPERATOR_SDK)))
ifeq (, $(shell which operator-sdk 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPERATOR_SDK)) ;\
curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\
chmod +x $(OPERATOR_SDK) ;\
}
else
OPERATOR_SDK = $(shell which operator-sdk)
endif
endif
.PHONY: bundle
bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
$(OPERATOR_SDK) bundle validate ./bundle
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
$(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
${CONTAINER_CMD} build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
.PHONY: opm
OPM = $(LOCALBIN)/opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.55.0/$(OS)-$(ARCH)-opm ;\
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.26.0/$(OS)-$(ARCHA)-opm ;\
chmod +x $(OPM) ;\
}
else
@@ -228,15 +233,9 @@ endif
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
$(OPM) index add --container-tool ${CONTAINER_CMD} --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)
##@ Includes
# Operator-specific targets and variables
-include makefiles/operator.mk
# Shared dev workflow targets (synced across all operator repos)
-include makefiles/common.mk

View File

@@ -16,7 +16,11 @@ The AWX Operator documentation is available at <https://ansible.readthedocs.io/p
## Contributing
Please visit our [contributing guidelines](https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md) and [development guide](https://github.com/ansible/awx-operator/blob/devel/docs/development.md) for information on how to set up your environment, build and deploy the operator, and submit changes.
Please visit [our contributing guidelines](https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md).
For docs changes, create PRs on the appropriate files in the `/docs` folder.
The development environment consists of running an [`up.sh`](https://github.com/ansible/awx-operator/blob/devel/up.sh) and a [`down.sh`](https://github.com/ansible/awx-operator/blob/devel/down.sh) script, which applies or deletes yaml on the Openshift or K8s cluster you are connected to. See the [development.md](https://github.com/ansible/awx-operator/blob/devel/docs/development.md) for information on how to deploy and test changes from your branch.
## Author
@@ -46,6 +50,8 @@ For more information on the forum navigation, see [Navigating the Ansible forum]
For real-time interactions, conversations in the community happen over the Matrix protocol in the following channels:
<CI-TEST>
* [#awx:ansible.com](https://matrix.to/#/#awx:ansible.com): AWX and AWX-Operator project-related discussions.
* [#docs:ansible.im](https://matrix.to/#/#docs:ansible.im): Ansible, AWX and AWX-Operator documentation-related discussions.

View File

@@ -37,9 +37,6 @@ spec:
metadata:
type: object
spec:
x-kubernetes-validations:
- rule: "has(self.postgres_image) && has(self.postgres_image_version) || !has(self.postgres_image) && !has(self.postgres_image_version)"
message: "Both postgres_image and postgres_image_version must be set when required"
type: object
x-kubernetes-preserve-unknown-fields: true
required:
@@ -51,10 +48,6 @@ spec:
backup_pvc:
description: Name of the backup PVC
type: string
create_backup_pvc:
description: If true (default), automatically create the backup PVC if it does not exist
type: boolean
default: true
backup_pvc_namespace:
description: (Deprecated) Namespace the PVC is in
type: string
@@ -88,10 +81,6 @@ spec:
pg_dump_suffix:
description: Additional parameters for the pg_dump command
type: string
use_db_compression:
description: Enable compression for database dumps using pg_dump built-in compression.
type: boolean
default: true
postgres_label_selector:
description: Label selector used to identify postgres pod for backing up data
type: string

View File

@@ -37,9 +37,6 @@ spec:
metadata:
type: object
spec:
x-kubernetes-validations:
- rule: "has(self.postgres_image) && has(self.postgres_image_version) || !has(self.postgres_image) && !has(self.postgres_image_version)"
message: "Both postgres_image and postgres_image_version must be set when required"
type: object
x-kubernetes-preserve-unknown-fields: true
required:

View File

@@ -20,11 +20,11 @@ resources:
- ../manager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
- metrics_service.yaml
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patches:
- path: manager_metrics_patch.yaml
target:
kind: Deployment
- path: manager_auth_proxy_patch.yaml

View File

@@ -0,0 +1,40 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: awx-manager
args:
- "--health-probe-bind-address=:6789"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"
- "--leader-election-id=awx-operator"

View File

@@ -1,12 +0,0 @@
# This patch adds the args to allow exposing the metrics endpoint using HTTPS
- op: add
path: /spec/template/spec/containers/0/args/0
value: --metrics-bind-address=:8443
# This patch adds the args to allow securing the metrics endpoint
- op: add
path: /spec/template/spec/containers/0/args/0
value: --metrics-secure
# This patch adds the args to allow RBAC-based authn/authz for the metrics endpoint
- op: add
path: /spec/template/spec/containers/0/args/0
value: --metrics-require-rbac

View File

@@ -38,7 +38,6 @@ spec:
- args:
- --leader-elect
- --leader-election-id=awx-operator
- --health-probe-bind-address=:6789
image: controller:latest
imagePullPolicy: IfNotPresent
name: awx-manager

View File

@@ -175,12 +175,6 @@ spec:
path: additional_labels
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- description: Enable compression for database dumps using pg_dump built-in compression
displayName: Use DB Compression
path: use_db_compression
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:advanced
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- displayName: Node Selector for backup management pod
path: db_management_pod_node_selector
x-descriptors:

View File

@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-auth-role
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io

View File

@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-auth-rolebinding
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metrics-auth-role
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager

View File

@@ -3,8 +3,6 @@ kind: Service
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: awx-operator
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service
namespace: system
spec:
@@ -12,7 +10,6 @@ spec:
- name: https
port: 8443
protocol: TCP
targetPort: 8443
targetPort: https
selector:
control-plane: controller-manager
app.kubernetes.io/name: awx-operator

View File

@@ -9,6 +9,10 @@ resources:
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
- metrics_auth_role.yaml
- metrics_auth_role_binding.yaml
- metrics_reader_role.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml

View File

@@ -14,13 +14,10 @@ resources:
- ../crd
- ../rbac
- ../manager
- ../default/metrics_service.yaml
images:
- name: testing
newName: testing-operator
patches:
- path: manager_image.yaml
- path: debug_logs_patch.yaml
- path: ../default/manager_metrics_patch.yaml
target:
kind: Deployment
- path: ../default/manager_auth_proxy_patch.yaml

View File

@@ -8,3 +8,20 @@ After the draft release is created, publish it and the [Promote AWX Operator ima
- Publish image to Quay
- Release Helm chart
After the GHA is complete, the final step is to run the [publish-to-operator-hub.sh](https://github.com/ansible/awx-operator/blob/devel/hack/publish-to-operator-hub.sh) script, which will create a PR in the following repos to add the new awx-operator bundle version to OperatorHub:
- <https://github.com/k8s-operatorhub/community-operators> (community operator index)
- <https://github.com/redhat-openshift-ecosystem/community-operators-prod> (operator index shipped with Openshift)
!!! note
The usage is documented in the script itself, but here is an example of how you would use the script to publish the 2.5.3 awx-opeator bundle to OperatorHub.
Note that you need to specify the version being released, as well as the previous version. This is because the bundle has a pointer to the previous version that is it being upgrade from. This is used by OLM to create a dependency graph.
```bash
VERSION=2.5.3 PREV_VERSION=2.5.2 ./hack/publish-to-operator-hub.sh
```
There are some quirks with running this on OS X that still need to be fixed, but the script runs smoothly on linux.
As soon as CI completes successfully, the PR's will be auto-merged. Please remember to monitor those PR's to make sure that CI passes, sometimes it needs a retry.

View File

@@ -1,8 +1,6 @@
# Development Guide
There are development yaml examples in the [`dev/`](../dev) directory and Makefile targets that can be used to build, deploy and test changes made to the awx-operator.
Run `make help` to see all available targets and options.
There are development scripts and yaml exaples in the [`dev/`](../dev) directory that, along with the up.sh and down.sh scripts in the root of the repo, can be used to build, deploy and test changes made to the awx-operator.
## Prerequisites
@@ -12,218 +10,95 @@ You will need to have the following tools installed:
* [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
* [podman](https://podman.io/docs/installation) or [docker](https://docs.docker.com/get-docker/)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
* [oc](https://docs.openshift.com/container-platform/4.11/cli_reference/openshift_cli/getting-started-cli.html) (if using OpenShift)
* [oc](https://docs.openshift.com/container-platform/4.11/cli_reference/openshift_cli/getting-started-cli.html) (if using Openshift)
You will also need a container registry account. This guide uses [quay.io](https://quay.io), but any container registry will work.
You will also need to have a container registry account. This guide uses quay.io, but any container registry will work. You will need to create a robot account and login at the CLI with `podman login` or `docker login`.
## Quay.io Setup for Development
## Registry Setup
Before using the development scripts, you'll need to set up a Quay.io repository and pull secret:
### Create a Quay.io Repository
### 1. Create a Private Quay.io Repository
- Go to [quay.io](https://quay.io) and create a private repository named `awx-operator` under your username
- The repository URL should be `quay.io/username/awx-operator`
1. Go to [quay.io](https://quay.io) and create a repository named `awx-operator` under your username.
2. Login at the CLI:
```sh
podman login quay.io
```
### 2. Create a Bot Account
- In your Quay.io repository, go to Settings → Robot Accounts
- Create a new robot account with write permissions to your repository
- Click on the robot account name to view its credentials
### Pull Secret (optional)
### 3. Generate Kubernetes Pull Secret
- In the robot account details, click "Kubernetes Secret"
- Copy the generated YAML content from the pop-up
If your repository is private, you'll need to configure a pull secret so the cluster can pull your operator image:
### 4. Create Local Pull Secret File
- Create a file at `hacking/pull-secret.yml` in your awx-operator checkout
- Paste the Kubernetes secret YAML content into this file
- **Important**: Change the `name` field in the secret from the default to `redhat-operators-pull-secret`
- The `hacking/` directory is in `.gitignore`, so this file won't be committed to git
1. In your Quay.io repository, go to Settings → Robot Accounts.
2. Create a robot account with write permissions.
3. Click the robot account name, then click "Kubernetes Secret" and copy the YAML.
4. Save it to `hacking/pull-secret.yml` in your checkout (this path is in `.gitignore`).
5. Change the `name` field to `redhat-operators-pull-secret`.
Example:
Example `hacking/pull-secret.yml`:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: redhat-operators-pull-secret
name: redhat-operators-pull-secret # Change this name
namespace: awx
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: <base64-encoded-credentials>
```
If a pull secret file is found at `hacking/pull-secret.yml` (or the path set by `PULL_SECRET_FILE`), `make up` will apply it automatically. Otherwise, you can make your quay.io repos public or create a global pull secret on your cluster.
## Build and Deploy
Make sure you are logged into your cluster (`oc login` or `kubectl` configured), then run:
```sh
QUAY_USER=username make up
If you clone the repo, and make sure you are logged in at the CLI with oc and your cluster, you can run:
```
export QUAY_USER=username
export NAMESPACE=awx
export TAG=test
./up.sh
```
This will:
1. Login to container registries
2. Create the target namespace
3. Build the operator image and push it to your registry
4. Deploy the operator via kustomize
5. Apply dev secrets and create a dev AWX instance
You can add those variables to your .bashrc file so that you can just run `./up.sh` in the future.
### Customization Options
> Note: the first time you run this, it will create quay.io repos on your fork. If you followed the Quay.io setup steps above and created the `hacking/pull-secret.yml` file, the script will automatically handle the pull secret. Otherwise, you will need to either make those repos public, or create a global pull secret on your cluster.
| Variable | Default | Description |
|----------|---------|-------------|
| `QUAY_USER` | _(required)_ | Your quay.io username |
| `NAMESPACE` | `awx` | Target namespace |
| `DEV_TAG` | `dev` | Image tag for dev builds |
| `CONTAINER_TOOL` | `podman` | Container engine (`podman` or `docker`) |
| `PLATFORM` | _(auto-detected)_ | Target platform (e.g., `linux/amd64`) |
| `MULTI_ARCH` | `false` | Build multi-arch image (`linux/arm64,linux/amd64`) |
| `DEV_IMG` | `quay.io/<QUAY_USER>/awx-operator` | Override full image path (skips QUAY_USER) |
| `BUILD_IMAGE` | `true` | Set to `false` to skip image build (use existing image) |
| `CREATE_CR` | `true` | Set to `false` to skip creating the dev AWX instance |
| `CREATE_SECRETS` | `true` | Set to `false` to skip creating dev secrets |
| `IMAGE_PULL_POLICY` | `Always` | Set to `Never` for local builds without push |
| `BUILD_ARGS` | _(empty)_ | Extra args passed to container build (e.g., `--no-cache`) |
| `DEV_CR` | `dev/awx-cr/awx-openshift-cr.yml` | Path to the dev CR to apply |
| `PULL_SECRET_FILE` | `dev/pull-secret.yml` | Path to pull secret YAML |
| `PODMAN_CONNECTION` | _(empty)_ | Remote podman connection name |
To get the URL, if on **Openshift**, run:
Examples:
```bash
# Use a specific namespace and tag
QUAY_USER=username NAMESPACE=awx DEV_TAG=mytag make up
# Use docker instead of podman
CONTAINER_TOOL=docker QUAY_USER=username make up
# Build for a specific platform (e.g., when on ARM building for x86)
PLATFORM=linux/amd64 QUAY_USER=username make up
# Deploy without building (use an existing image)
BUILD_IMAGE=false DEV_IMG=quay.io/myuser/awx-operator DEV_TAG=latest make up
# Build without pushing (local cluster like kind/minikube)
IMAGE_PULL_POLICY=Never QUAY_USER=username make up
```
$ oc get route
```
### Accessing the Deployment
On **k8s with ingress**, run:
On **OpenShift**:
```sh
oc get route
```
$ kubectl get ing
```
On **k8s with ingress**:
```sh
kubectl get ing
On **k8s with nodeport**, run:
```
$ kubectl get svc
```
On **k8s with nodeport**:
```sh
kubectl get svc
```
The URL is then `http://<Node-IP>:<NodePort>`.
The URL is then `http://<Node-IP>:<NodePort>`
> **Note**: NodePort will only work if you expose that port on your underlying k8s node, or are accessing it from localhost.
### Default Credentials
The dev CR pre-creates an admin password secret. Default credentials are:
- **Username**: `admin`
- **Password**: `password`
Without the dev CR, a password would be generated and stored in a secret named `<deployment-name>-admin-password`.
> Note: NodePort will only work if you expose that port on your underlying k8s node, or are accessing it from localhost.
By default, the usename and password will be admin and password if using the `up.sh` script because it pre-creates a custom admin password k8s secret and specifies it on the AWX custom resource spec. Without that, a password would have been generated and stored in a k8s secret named <deployment-name>-admin-password.
## Clean up
To tear down your development deployment:
```sh
make down
Same thing for cleanup, just run ./down.sh and it will clean up your namespace on that cluster
```
./down.sh
```
### Teardown Options
## Running CI tests locally
| Variable | Default | Description |
|----------|---------|-------------|
| `KEEP_NAMESPACE` | `false` | Set to `true` to keep the namespace for reuse |
| `DELETE_PVCS` | `true` | Set to `false` to preserve PersistentVolumeClaims |
| `DELETE_SECRETS` | `true` | Set to `false` to preserve secrets |
Examples:
```bash
# Keep the namespace for faster redeploy
KEEP_NAMESPACE=true make down
# Keep PVCs (preserve database data between deploys)
DELETE_PVCS=false make down
```
## Testing
### Linting
Run linting checks (required for all PRs):
```sh
make lint
```
This runs `ansible-lint` on roles, playbooks, and config samples, and checks that `no_log` statements use the `{{ no_log }}` variable.
### Molecule Tests
The operator includes a [Molecule](https://ansible.readthedocs.io/projects/molecule/)-based test environment for integration testing. Molecule can run standalone in Docker or inside a Kubernetes cluster.
Install Molecule:
```sh
python -m pip install molecule-plugins[docker]
```
#### Testing in Kind (recommended)
[Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) is the recommended way to test locally:
```sh
molecule test -s kind
```
#### Testing in Minikube
```sh
minikube start --memory 8g --cpus 4
minikube addons enable ingress
molecule test -s test-minikube
```
[Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) runs a full VM with an assigned IP address, making it easier to test NodePort services and Ingress from outside the cluster.
Once deployed, access the AWX UI:
1. Add `<minikube-ip> example-awx.test` to your `/etc/hosts` file (get the IP with `minikube ip`).
2. Visit `http://example-awx.test/` (default login: `test`/`changeme`).
#### Active Development
Use `molecule converge` instead of `molecule test` to keep the environment running after tests complete — useful for iterating on changes.
## Bundle Generation
If you have the Operator Lifecycle Manager (OLM) installed, you can generate and deploy an operator bundle:
```bash
# Generate bundle manifests and validate
make bundle
# Build and push the bundle image
make bundle-build bundle-push
# Build and push a catalog image
make catalog-build catalog-push
```
After pushing the catalog, create a `CatalogSource` in your cluster pointing to the catalog image. Once the CatalogSource is in a READY state, the operator will be available in OperatorHub.
More tests coming soon...

View File

@@ -24,6 +24,13 @@ Past that, it is often useful to inspect various resources the AWX Operator mana
* secrets
* serviceaccount
And if installing via OperatorHub and OLM:
* subscription
* csv
* installPlan
* catalogSource
To inspect these resources you can use these commands
```sh

36
down.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
# AWX Operator down.sh
# Purpose:
# Cleanup and delete the namespace you deployed in
# -- Usage
# NAMESPACE=awx ./down.sh
# -- Variables
TAG=${TAG:-dev}
AWX_CR=${AWX_CR:-awx}
CLEAN_DB=${CLEAN_DB:-false}
# -- Check for required variables
# Set the following environment variables
# export NAMESPACE=awx
if [ -z "$NAMESPACE" ]; then
echo "Error: NAMESPACE env variable is not set. Run the following with your namespace:"
echo " export NAMESPACE=developer"
exit 1
fi
# -- Delete Backups
kubectl delete awxbackup --all
# -- Delete Restores
kubectl delete awxrestore --all
# Deploy Operator
make undeploy NAMESPACE=$NAMESPACE
# Remove PVCs
kubectl delete pvc postgres-15-$AWX_CR-postgres-15-0

123
hack/publish-to-operator-hub.sh Executable file
View File

@@ -0,0 +1,123 @@
#!/bin/bash
# Create PR to Publish to community-operators and community-operators-prod
#
# * Create upstream awx-operator release
# * Check out tag (1.1.2).
# * Run VERSION=1.1.2 make bundle
# * Clone https://github.com/k8s-operatorhub/community-operators --branch main
# * mkdir -p operators/awx-operator/0.31.0/
# * Copy in manifests/ metadata/ and tests/ directories into operators/awx-operator/1.1.2/
# * Use sed to add in a replaces or skip entry. replace by default.
# * No need to update config.yaml
# * Build and Push operator and bundle images
# * Open PR or at least push to a branch so that a PR can be manually opened from it.
#
# Usage:
# First, check out awx-operator tag you intend to release, in this case, 1.0.0
# $ VERSION=1.1.2 PREV_VERSION=1.1.1 FORK=<your-fork> ./hack/publish-to-operator-hub.sh
#
# Remember to change update the VERSION and PREV_VERSION before running!!!
set -e
VERSION=${VERSION:-$(make print-VERSION)}
PREV_VERSION=${PREV_VERSION:-$(make print-PREV_VERSION)}
BRANCH=publish-awx-operator-$VERSION
FORK=${FORK:-awx-auto}
GITHUB_TOKEN=${GITHUB_TOKEN:-$AWX_AUTO_GITHUB_TOKEN}
IMG_REPOSITORY=${IMG_REPOSITORY:-quay.io/ansible}
OPERATOR_IMG=$IMG_REPOSITORY/awx-operator:$VERSION
CATALOG_IMG=$IMG_REPOSITORY/awx-operator-catalog:$VERSION
BUNDLE_IMG=$IMG_REPOSITORY/awx-operator-bundle:$VERSION
COMMUNITY_OPERATOR_GITHUB_ORG=${COMMUNITY_OPERATOR_GITHUB_ORG:-k8s-operatorhub}
COMMUNITY_OPERATOR_PROD_GITHUB_ORG=${COMMUNITY_OPERATOR_PROD_GITHUB_ORG:-redhat-openshift-ecosystem}
# Build bundle directory
make bundle IMG=$OPERATOR_IMG
# Build bundle and catalog images
make bundle-build bundle-push BUNDLE_IMG=$BUNDLE_IMG IMG=$OPERATOR_IMG
make catalog-build catalog-push CATALOG_IMG=$CATALOG_IMG BUNDLE_IMGS=$BUNDLE_IMG BUNDLE_IMG=$BUNDLE_IMG IMG=$OPERATOR_IMG
# Set containerImage & namespace variables in CSV
sed -i.bak -e "s|containerImage: quay.io/ansible/awx-operator:devel|containerImage: ${OPERATOR_IMG}|g" bundle/manifests/awx-operator.clusterserviceversion.yaml
sed -i.bak -e "s|namespace: placeholder|namespace: awx|g" bundle/manifests/awx-operator.clusterserviceversion.yaml
# Add replaces to dependency graph for upgrade path
if ! grep -qF 'replaces: awx-operator.v${PREV_VERSION}' bundle/manifests/awx-operator.clusterserviceversion.yaml; then
sed -i.bak -e "/version: ${VERSION}/a \\
replaces: awx-operator.v$PREV_VERSION" bundle/manifests/awx-operator.clusterserviceversion.yaml
fi
# Rename CSV to contain version in name
mv bundle/manifests/awx-operator.clusterserviceversion.yaml bundle/manifests/awx-operator.v${VERSION}.clusterserviceversion.yaml
# Set Openshift Support Range (bump minKubeVersion in CSV when changing)
if ! grep -qF 'openshift.versions' bundle/metadata/annotations.yaml; then
sed -i.bak -e "/annotations:/a \\
com.redhat.openshift.versions: v4.11" bundle/metadata/annotations.yaml
fi
# Remove .bak files from bundle result from sed commands
find bundle -name "*.bak" -type f -delete
echo "-- Create branch on community-operators fork --"
git clone https://github.com/$COMMUNITY_OPERATOR_GITHUB_ORG/community-operators.git
mkdir -p community-operators/operators/awx-operator/$VERSION/
cp -r bundle/* community-operators/operators/awx-operator/$VERSION/
pushd community-operators/operators/awx-operator/$VERSION/
git checkout -b $BRANCH
git add ./
git status
message='operator [N] [CI] awx-operator'
commitMessage="${message} ${VERSION}"
git commit -m "$commitMessage" -s
git remote add upstream https://$GITHUB_TOKEN@github.com/$FORK/community-operators.git
git push upstream --delete $BRANCH || true
git push upstream $BRANCH
gh pr create \
--title "operator awx-operator (${VERSION})" \
--body "operator awx-operator (${VERSION})" \
--base main \
--head $FORK:$BRANCH \
--repo $COMMUNITY_OPERATOR_GITHUB_ORG/community-operators
popd
echo "-- Create branch on community-operators-prod fork --"
git clone https://github.com/$COMMUNITY_OPERATOR_PROD_GITHUB_ORG/community-operators-prod.git
mkdir -p community-operators-prod/operators/awx-operator/$VERSION/
cp -r bundle/* community-operators-prod/operators/awx-operator/$VERSION/
pushd community-operators-prod/operators/awx-operator/$VERSION/
git checkout -b $BRANCH
git add ./
git status
message='operator [N] [CI] awx-operator'
commitMessage="${message} ${VERSION}"
git commit -m "$commitMessage" -s
git remote add upstream https://$GITHUB_TOKEN@github.com/$FORK/community-operators-prod.git
git push upstream --delete $BRANCH || true
git push upstream $BRANCH
gh pr create \
--title "operator awx-operator (${VERSION})" \
--body "operator awx-operator (${VERSION})" \
--base main \
--head $FORK:$BRANCH \
--repo $COMMUNITY_OPERATOR_PROD_GITHUB_ORG/community-operators-prod
popd

View File

@@ -1,439 +0,0 @@
# common.mk — Shared dev workflow targets for AAP operators
#
# Synced across all operator repos via GHA.
# Operator-specific customization goes in operator.mk.
#
# Usage:
# make up # Full dev deploy
# make down # Full dev undeploy
#
# Required variables (set in operator.mk):
# NAMESPACE — target namespace
# DEPLOYMENT_NAME — operator deployment name
# VERSION — operator version
#
# Optional overrides:
# CONTAINER_TOOL=docker make up # use docker instead of podman (default in Makefile)
# QUAY_USER=myuser make up
# DEV_TAG=mytag make up
# DEV_IMG=registry.example.com/my-operator make up # override image (skips QUAY_USER)
# IMAGE_PULL_POLICY=Never make up # set imagePullPolicy (e.g. for local builds)
# PODMAN_CONNECTION=aap-lab make up # use remote podman connection
# KEEP_NAMESPACE=true make down # undeploy but keep namespace
# PLATFORM=linux/amd64 make up # build for specific platform (auto-detected from cluster)
# MULTI_ARCH=true make up # build multi-arch image (PLATFORMS=linux/arm64,linux/amd64)
# Suppress "Entering/Leaving directory" messages from recursive make calls
MAKEFLAGS += --no-print-directory
#@ Common Variables
# Kube CLI auto-detect (oc preferred, kubectl fallback)
KUBECTL ?= $(shell command -v oc 2>/dev/null || command -v kubectl 2>/dev/null)
# Dev workflow
QUAY_USER ?=
REGISTRIES ?= registry.redhat.io $(if $(QUAY_USER),quay.io/$(QUAY_USER))
DEV_TAG ?= dev
PULL_SECRET_FILE ?= dev/pull-secret.yml
CREATE_PULL_SECRET ?= true
IMAGE_PULL_POLICY ?=
PODMAN_CONNECTION ?=
# Dev image: defaults to quay.io/<user>/<operator-name>, overridable via DEV_IMG
_OPERATOR_NAME = $(notdir $(IMAGE_TAG_BASE))
DEV_IMG ?= $(if $(QUAY_USER),quay.io/$(QUAY_USER)/$(_OPERATOR_NAME),$(IMAGE_TAG_BASE))
# Build platform (auto-detected from cluster, override with PLATFORM=linux/amd64)
MULTI_ARCH ?= false
PLATFORMS ?= linux/arm64,linux/amd64
# Auto-detect registry auth config
REGISTRY_AUTH_CONFIG ?= $(shell \
if [ "$(CONTAINER_TOOL)" = "podman" ]; then \
for f in "$${XDG_RUNTIME_DIR}/containers/auth.json" \
"$${HOME}/.config/containers/auth.json" \
"$${HOME}/.docker/config.json"; do \
[ -f "$$f" ] && echo "$$f" && break; \
done; \
else \
[ -f "$${HOME}/.docker/config.json" ] && echo "$${HOME}/.docker/config.json"; \
fi)
# Container tool with optional remote connection (podman only)
_CONTAINER_CMD = $(CONTAINER_TOOL)$(if $(and $(filter podman,$(CONTAINER_TOOL)),$(PODMAN_CONNECTION)), --connection $(PODMAN_CONNECTION),)
# Portable sed -i (GNU vs BSD)
_SED_I = $(shell if sed --version >/dev/null 2>&1; then echo 'sed -i'; else echo 'sed -i ""'; fi)
# Custom configs to apply during post-deploy (secrets, configmaps, etc.)
DEV_CUSTOM_CONFIG ?=
# Dev CR to apply after deployment (set in operator.mk)
DEV_CR ?=
CREATE_CR ?= true
# Teardown configuration (set in operator.mk)
TEARDOWN_CR_KINDS ?=
TEARDOWN_BACKUP_KINDS ?=
TEARDOWN_RESTORE_KINDS ?=
OLM_SUBSCRIPTIONS ?=
DELETE_PVCS ?= true
DELETE_SECRETS ?= true
KEEP_NAMESPACE ?= false
##@ Dev Workflow
.PHONY: up
up: _require-img _require-namespace ## Full dev deploy
@$(MAKE) registry-login
@$(MAKE) ns-wait
@$(MAKE) ns-create
@$(MAKE) ns-security
@$(MAKE) pull-secret
@$(MAKE) patch-pull-policy
@$(MAKE) operator-up
.PHONY: down
down: _require-namespace ## Full dev undeploy
@echo "=== Tearing down dev environment ==="
@$(MAKE) _teardown-restores
@$(MAKE) _teardown-backups
@$(MAKE) _teardown-operands
@$(MAKE) _teardown-pvcs
@$(MAKE) _teardown-secrets
@$(MAKE) _teardown-olm
@$(MAKE) _teardown-namespace
#@ Operator Deploy Building Blocks
#
# Composable targets for operator-up. Each operator.mk wires these
# together in its own operator-up target, adding repo-specific steps.
#
# Kustomize repos:
# operator-up: _operator-build-and-push _operator-deploy _operator-wait-ready _operator-post-deploy
#
# OLM repos (gateway):
# operator-up: _olm-cleanup _olm-deploy _operator-build-and-inject _operator-wait-ready <custom> _operator-post-deploy
.PHONY: _operator-build-and-push
_operator-build-and-push:
@if [ "$(BUILD_IMAGE)" != "true" ]; then \
echo "Skipping image build (BUILD_IMAGE=false)"; \
exit 0; \
fi; \
$(MAKE) dev-build; \
echo "Pushing $(DEV_IMG):$(DEV_TAG)..."; \
$(_CONTAINER_CMD) push $(DEV_IMG):$(DEV_TAG)
.PHONY: _operator-deploy
_operator-deploy:
@$(MAKE) pre-deploy-cleanup
@cd config/default && $(KUSTOMIZE) edit set namespace $(NAMESPACE)
@$(MAKE) deploy IMG=$(DEV_IMG):$(DEV_TAG)
.PHONY: _operator-wait-ready
_operator-wait-ready:
@echo "Waiting for operator pods to be ready..."
@ATTEMPTS=0; \
while [ $$ATTEMPTS -lt 30 ]; do \
READY=$$($(KUBECTL) get deployment $(DEPLOYMENT_NAME) -n $(NAMESPACE) \
-o jsonpath='{.status.readyReplicas}' 2>/dev/null); \
DESIRED=$$($(KUBECTL) get deployment $(DEPLOYMENT_NAME) -n $(NAMESPACE) \
-o jsonpath='{.status.replicas}' 2>/dev/null); \
if [ -n "$$READY" ] && [ -n "$$DESIRED" ] && [ "$$READY" = "$$DESIRED" ] && [ "$$READY" -gt 0 ]; then \
echo "All pods ready ($$READY/$$DESIRED)."; \
break; \
fi; \
echo "Pods not ready ($$READY/$$DESIRED). Waiting..."; \
ATTEMPTS=$$((ATTEMPTS + 1)); \
sleep 10; \
done; \
if [ $$ATTEMPTS -ge 30 ]; then \
echo "ERROR: Timed out waiting for operator pods to be ready (5 minutes)." >&2; \
exit 1; \
fi
@$(KUBECTL) config set-context --current --namespace=$(NAMESPACE)
.PHONY: _operator-post-deploy
_operator-post-deploy:
@# Apply dev custom configs (secrets, configmaps, etc.) from DEV_CUSTOM_CONFIG
@$(MAKE) _apply-custom-config
@if [ "$(CREATE_CR)" = "true" ] && [ -f "$(DEV_CR)" ]; then \
echo "Applying dev CR: $(DEV_CR)"; \
$(KUBECTL) apply -n $(NAMESPACE) -f $(DEV_CR); \
fi
#@ Teardown
.PHONY: _teardown-restores
_teardown-restores:
@for kind in $(TEARDOWN_RESTORE_KINDS); do \
echo "Deleting $$kind resources..."; \
$(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \
done
.PHONY: _teardown-backups
_teardown-backups:
@for kind in $(TEARDOWN_BACKUP_KINDS); do \
echo "Deleting $$kind resources..."; \
$(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \
done
.PHONY: _teardown-operands
_teardown-operands:
@for kind in $(TEARDOWN_CR_KINDS); do \
echo "Deleting $$kind resources..."; \
$(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \
done
.PHONY: _teardown-pvcs
_teardown-pvcs:
@if [ "$(DELETE_PVCS)" = "true" ]; then \
echo "Deleting PVCs..."; \
$(KUBECTL) delete pvc -n $(NAMESPACE) --all --ignore-not-found=true; \
else \
echo "Keeping PVCs (DELETE_PVCS=false)"; \
fi
.PHONY: _teardown-secrets
_teardown-secrets:
@if [ "$(DELETE_SECRETS)" = "true" ]; then \
echo "Deleting secrets..."; \
$(KUBECTL) delete secrets -n $(NAMESPACE) --all --ignore-not-found=true; \
else \
echo "Keeping secrets (DELETE_SECRETS=false)"; \
fi
.PHONY: _teardown-olm
_teardown-olm:
@for sub in $(OLM_SUBSCRIPTIONS); do \
echo "Deleting subscription $$sub..."; \
$(KUBECTL) delete subscription $$sub -n $(NAMESPACE) --ignore-not-found=true || true; \
done
@CSV=$$($(KUBECTL) get csv -n $(NAMESPACE) --no-headers -o custom-columns=":metadata.name" 2>/dev/null | grep aap-operator || true); \
if [ -n "$$CSV" ]; then \
echo "Deleting CSV: $$CSV"; \
$(KUBECTL) delete csv $$CSV -n $(NAMESPACE) --ignore-not-found=true; \
fi
.PHONY: _teardown-namespace
_teardown-namespace:
@if [ "$(KEEP_NAMESPACE)" != "true" ]; then \
echo "Deleting namespace $(NAMESPACE)..."; \
$(KUBECTL) delete namespace $(NAMESPACE) --ignore-not-found=true; \
else \
echo "Keeping namespace $(NAMESPACE) (KEEP_NAMESPACE=true)"; \
fi
##@ Registry
.PHONY: registry-login
registry-login: ## Login to container registries
@for registry in $(REGISTRIES); do \
echo "Logging into $$registry..."; \
$(_CONTAINER_CMD) login $$registry; \
done
##@ Namespace
.PHONY: ns-wait
ns-wait: ## Wait for namespace to finish terminating
@if $(KUBECTL) get namespace $(NAMESPACE) 2>/dev/null | grep -q 'Terminating'; then \
echo "Namespace $(NAMESPACE) is terminating. Waiting..."; \
while $(KUBECTL) get namespace $(NAMESPACE) 2>/dev/null | grep -q 'Terminating'; do \
sleep 5; \
done; \
echo "Namespace $(NAMESPACE) terminated."; \
fi
.PHONY: ns-create
ns-create: ## Create namespace if it does not exist
@if ! $(KUBECTL) get namespace $(NAMESPACE) --no-headers 2>/dev/null | grep -q .; then \
echo "Creating namespace $(NAMESPACE)"; \
$(KUBECTL) create namespace $(NAMESPACE); \
else \
echo "Namespace $(NAMESPACE) already exists"; \
fi
.PHONY: ns-security
ns-security: ## Configure namespace security for OLM bundle unpacking
@if ! oc get scc anyuid >/dev/null 2>&1; then \
echo "No SCC support detected (vanilla Kubernetes), applying pod security labels..."; \
$(KUBECTL) label namespace "$(NAMESPACE)" \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/audit=privileged \
pod-security.kubernetes.io/warn=privileged --overwrite; \
elif $(KUBECTL) get namespace openshift-apiserver >/dev/null 2>&1; then \
echo "Full OpenShift detected — skipping SCC grants (OLM handles bundle unpacking)"; \
else \
echo "MicroShift detected — granting SCCs for bundle unpack pods in $(NAMESPACE)..."; \
oc adm policy add-scc-to-user privileged -z default -n "$(NAMESPACE)" 2>/dev/null || true; \
oc adm policy add-scc-to-user anyuid -z default -n "$(NAMESPACE)" 2>/dev/null || true; \
fi
##@ Secrets
.PHONY: pull-secret
pull-secret: ## Apply pull secret from file or create from auth config
@if [ "$(CREATE_PULL_SECRET)" != "true" ]; then \
echo "Pull secret creation disabled (CREATE_PULL_SECRET=false)"; \
exit 0; \
fi; \
if [ -f "$(PULL_SECRET_FILE)" ]; then \
echo "Applying pull secret from $(PULL_SECRET_FILE)"; \
$(KUBECTL) apply -n $(NAMESPACE) -f $(PULL_SECRET_FILE); \
elif [ -n "$(REGISTRY_AUTH_CONFIG)" ] && [ -f "$(REGISTRY_AUTH_CONFIG)" ]; then \
if ! $(KUBECTL) get secret redhat-operators-pull-secret -n $(NAMESPACE) 2>/dev/null | grep -q .; then \
echo "Creating pull secret from $(REGISTRY_AUTH_CONFIG)"; \
$(KUBECTL) create secret generic redhat-operators-pull-secret \
--from-file=.dockerconfigjson="$(REGISTRY_AUTH_CONFIG)" \
--type=kubernetes.io/dockerconfigjson \
-n $(NAMESPACE); \
else \
echo "Pull secret already exists"; \
fi; \
else \
echo "No pull secret file or registry auth config found, skipping"; \
exit 0; \
fi; \
echo "Linking pull secret to default service account..."; \
$(KUBECTL) patch serviceaccount default -n $(NAMESPACE) \
-p '{"imagePullSecrets": [{"name": "redhat-operators-pull-secret"}]}' 2>/dev/null \
&& echo "Pull secret linked to default SA" \
|| echo "Warning: could not link pull secret to default SA"
##@ Build
.PHONY: podman-build
podman-build: ## Build image with podman
$(_CONTAINER_CMD) build $(BUILD_ARGS) -t ${IMG} .
.PHONY: podman-push
podman-push: ## Push image with podman
$(_CONTAINER_CMD) push ${IMG}
.PHONY: podman-buildx
podman-buildx: ## Build multi-arch image with podman
$(_CONTAINER_CMD) build $(BUILD_ARGS) --platform=$(PLATFORMS) --manifest ${IMG} -f Dockerfile .
.PHONY: podman-buildx-push
podman-buildx-push: podman-buildx ## Build and push multi-arch image with podman
$(_CONTAINER_CMD) manifest push --all ${IMG}
.PHONY: dev-build
dev-build: ## Build dev image (auto-detects arch of connected cluster, cross-compiles if needed)
@HOST_ARCH=$$(uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/'); \
CLUSTER_ARCH=$$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.nodeInfo.architecture}' 2>/dev/null); \
if [ -z "$$CLUSTER_ARCH" ]; then \
echo "WARNING: Could not detect cluster architecture. Is the cluster reachable?"; \
echo " Falling back to host architecture ($$HOST_ARCH)"; \
CLUSTER_ARCH="$$HOST_ARCH"; \
fi; \
echo "Building $(DEV_IMG):$(DEV_TAG) with $(CONTAINER_TOOL)..."; \
echo " Host arch: $$HOST_ARCH"; \
echo " Cluster arch: $$CLUSTER_ARCH"; \
if [ "$(MULTI_ARCH)" = "true" ]; then \
echo " Build mode: multi-arch ($(PLATFORMS))"; \
$(MAKE) $(CONTAINER_TOOL)-buildx IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=$(PLATFORMS); \
elif [ -n "$(PLATFORM)" ]; then \
echo " Build mode: cross-arch ($(PLATFORM))"; \
$(MAKE) $(CONTAINER_TOOL)-buildx IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=$(PLATFORM); \
elif [ "$$HOST_ARCH" != "$$CLUSTER_ARCH" ]; then \
echo " Build mode: cross-arch (linux/$$CLUSTER_ARCH)"; \
$(MAKE) $(CONTAINER_TOOL)-buildx-push IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=linux/$$CLUSTER_ARCH; \
else \
echo " Build mode: local ($$HOST_ARCH)"; \
$(MAKE) $(CONTAINER_TOOL)-build IMG=$(DEV_IMG):$(DEV_TAG); \
if [ "$(IMAGE_PULL_POLICY)" != "Never" ]; then \
echo "WARNING: Local build without push. Set IMAGE_PULL_POLICY=Never or the kubelet"; \
echo " will attempt to pull $(DEV_IMG):$(DEV_TAG) from a registry and fail."; \
fi; \
fi
##@ Deployment Helpers
.PHONY: patch-pull-policy
patch-pull-policy: ## Patch imagePullPolicy in manager config (default: Always, override with IMAGE_PULL_POLICY)
@_POLICY="$(if $(IMAGE_PULL_POLICY),$(IMAGE_PULL_POLICY),Always)"; \
for file in config/manager/manager.yaml; do \
if [ -f "$$file" ] && grep -q 'imagePullPolicy: IfNotPresent' "$$file"; then \
echo "Patching imagePullPolicy to $$_POLICY in $$file"; \
$(_SED_I) "s|imagePullPolicy: IfNotPresent|imagePullPolicy: $$_POLICY|g" "$$file"; \
fi; \
done
.PHONY: pre-deploy-cleanup
pre-deploy-cleanup: ## Delete existing operator deployment (safe)
@if [ -n "$(DEPLOYMENT_NAME)" ]; then \
echo "Cleaning up deployment $(DEPLOYMENT_NAME)..."; \
$(KUBECTL) delete deployment $(DEPLOYMENT_NAME) \
-n $(NAMESPACE) --ignore-not-found=true; \
fi
.PHONY: _apply-custom-config
_apply-custom-config: ## Apply custom configs (secrets, configmaps, etc.)
@for f in $(DEV_CUSTOM_CONFIG); do \
if [ -f "$$f" ]; then \
echo "Applying custom config: $$f"; \
$(KUBECTL) apply -n $(NAMESPACE) -f $$f; \
else \
echo "WARNING: Custom config not found: $$f"; \
fi; \
done
#@ Validation
.PHONY: _require-img
_require-img:
@if [ -z "$(DEV_IMG)" ]; then \
echo "Error: Set QUAY_USER or DEV_IMG."; \
echo " export QUAY_USER=<your-quay-username>"; \
echo " or: DEV_IMG=registry.example.com/my-operator make up"; \
exit 1; \
fi
@if echo "$(DEV_IMG)" | grep -q '^registry\.redhat\.io'; then \
echo "Error: Cannot push to registry.redhat.io (production registry)."; \
echo " Set QUAY_USER or DEV_IMG to use a personal registry."; \
exit 1; \
fi
@if echo "$(DEV_IMG)" | grep -q '^quay\.io/'; then \
if [ -z "$(QUAY_USER)" ]; then \
echo "Error: Cannot push to quay.io without QUAY_USER."; \
echo " export QUAY_USER=<your-quay-username>"; \
echo " or: DEV_IMG=<your-registry>/<image> make up"; \
exit 1; \
fi; \
if ! echo "$(DEV_IMG)" | grep -q '^quay\.io/$(QUAY_USER)/'; then \
echo "Error: DEV_IMG ($(DEV_IMG)) does not match QUAY_USER ($(QUAY_USER))."; \
echo " Expected: quay.io/$(QUAY_USER)/<image>"; \
echo " Either fix QUAY_USER or set DEV_IMG explicitly."; \
exit 1; \
fi; \
fi
.PHONY: _require-namespace
_require-namespace:
@if [ -z "$(NAMESPACE)" ]; then \
echo "Error: NAMESPACE is required. Set it in operator.mk or run: export NAMESPACE=<namespace>"; \
exit 1; \
fi
##@ Linting
LINT_PATHS ?= roles/ playbooks/ config/samples/ config/manager/
.PHONY: lint
lint: ## Run ansible-lint and check no_log usage
@echo "Checking if ansible-lint is installed..."
@which ansible-lint > /dev/null || (echo "ansible-lint not found, installing..." && pip install --user ansible-lint)
@echo "Running ansible-lint..."
@ansible-lint $(LINT_PATHS)
@if [ -d "roles/" ]; then \
echo "Checking for no_log instances that need to use the variable..."; \
if grep -nr ' no_log:' roles | grep -qv '"{{ no_log }}"'; then \
echo 'Please update the following no_log statement(s) with the "{{ no_log }}" value'; \
grep -nr ' no_log:' roles | grep -v '"{{ no_log }}"'; \
exit 1; \
fi; \
fi

View File

@@ -1,39 +0,0 @@
# operator.mk — AWX Operator specific targets and variables
#
# This file is NOT synced across repos. Each operator maintains its own.
#@ Operator Variables
VERSION ?= $(shell git describe --tags 2>/dev/null || echo 0.0.1)
PREV_VERSION ?= $(shell git describe --abbrev=0 --tags $(shell git rev-list --tags --skip=1 --max-count=1) 2>/dev/null)
IMAGE_TAG_BASE ?= quay.io/ansible/awx-operator
NAMESPACE ?= awx
DEPLOYMENT_NAME ?= awx-operator-controller-manager
# Dev CR to apply after deployment
DEV_CR ?= dev/awx-cr/awx-openshift-cr.yml
# Custom configs to apply during post-deploy (secrets, configmaps, etc.)
DEV_CUSTOM_CONFIG ?= dev/secrets/custom-secret-key.yml dev/secrets/admin-password-secret.yml
# Feature flags
BUILD_IMAGE ?= true
CREATE_CR ?= true
# Teardown configuration
TEARDOWN_CR_KINDS ?= awx
TEARDOWN_BACKUP_KINDS ?= awxbackup
TEARDOWN_RESTORE_KINDS ?= awxrestore
OLM_SUBSCRIPTIONS ?=
##@ AWX Operator
.PHONY: operator-up
operator-up: _operator-build-and-push _operator-deploy _operator-wait-ready _operator-post-deploy ## AWX-specific deploy
@:
##@ Utilities
.PHONY: print-%
print-%: ## Print any variable from the Makefile. Use as `make print-VARIABLE`
@echo $($*)

View File

@@ -5,21 +5,10 @@
name: '{{ item.metadata.name }}'
all_containers: true
register: all_container_logs
ignore_errors: yes
- name: Store logs in file
ansible.builtin.copy:
content: |-
{% if all_container_logs is failed %}
Failed to retrieve logs for pod {{ item.metadata.name }}:
{{ all_container_logs.msg | default(all_container_logs.stderr | default('No additional details provided.')) }}
{% elif all_container_logs.log_lines is defined %}
{{ all_container_logs.log_lines | join('\n') }}
{% elif all_container_logs.log is defined %}
{{ all_container_logs.log }}
{% else %}
No log content returned by kubernetes.core.k8s_log.
{% endif %}
content: "{{ all_container_logs.log_lines | join('\n') }}"
dest: '{{ debug_output_dir }}/{{ item.metadata.name }}.log'
# TODO: all_containser option dump all of the output in a single output make it hard to read we probably should iterate through each of the container to get specific logs

View File

@@ -8,9 +8,6 @@ api_version: '{{ deployment_type }}.ansible.com/v1beta1'
backup_pvc: ''
backup_pvc_namespace: "{{ ansible_operator_meta.namespace }}"
# If true (default), automatically create the backup PVC if it does not exist
create_backup_pvc: true
# Size of backup PVC if created dynamically
backup_storage_requirements: ''
@@ -42,9 +39,6 @@ backup_resource_requirements:
# Allow additional parameters to be added to the pg_dump backup command
pg_dump_suffix: ''
# Enable compression for database dumps (pg_dump -F custom built-in compression)
use_db_compression: true
# Labels defined on the resource, which should be propagated to child resources
additional_labels: []

View File

@@ -22,18 +22,17 @@
block:
- name: Set error message
set_fact:
error_msg: "{{ backup_pvc }} does not exist, please create this pvc first or ensure create_backup_pvc is set to true (default) for automatic backup_pvc creation."
error_msg: "{{ backup_pvc }} does not exist, please create this pvc first."
- name: Handle error
import_tasks: error_handling.yml
- name: Fail early if pvc is defined but does not exist
fail:
msg: "{{ backup_pvc }} does not exist, please create this pvc first or ensure create_backup_pvc is set to true (default) for automatic backup_pvc creation."
msg: "{{ backup_pvc }} does not exist, please create this pvc first."
when:
- backup_pvc != ''
- provided_pvc.resources | length == 0
- not create_backup_pvc | bool
# If backup_pvc is defined, use in management-pod.yml.j2
- name: Set default pvc name
@@ -43,7 +42,7 @@
# by default, it will re-use the old pvc if already created (unless a pvc is provided)
- name: Set PVC to use for backup
set_fact:
backup_pvc: "{{ backup_pvc | default(_default_backup_pvc, true) }}"
backup_claim: "{{ backup_pvc | default(_default_backup_pvc, true) }}"
- block:
- name: Create PVC for backup
@@ -57,11 +56,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "{{ backup_pvc }}"
name: "{{ deployment_name }}-backup-claim"
namespace: "{{ backup_pvc_namespace }}"
ownerReferences: null
when:
- (backup_pvc == '' or backup_pvc is not defined) or (create_backup_pvc | bool)
- backup_pvc == '' or backup_pvc is not defined
- name: Set default postgres image
set_fact:

View File

@@ -121,7 +121,6 @@
-d {{ awx_postgres_database }}
-p {{ awx_postgres_port }}
-F custom
{{ use_db_compression | bool | ternary('', '-Z 0') }}
{{ pg_dump_suffix }}
no_log: "{{ no_log }}"

View File

@@ -9,5 +9,5 @@
namespace: "{{ ansible_operator_meta.namespace }}"
status:
backupDirectory: "{{ backup_dir }}"
backupClaim: "{{ backup_pvc }}"
backupClaim: "{{ backup_claim }}"
when: backup_complete

View File

@@ -2,7 +2,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ backup_pvc }}
name: {{ deployment_name }}-backup-claim
namespace: "{{ backup_pvc_namespace }}"
ownerReferences: null
labels:

View File

@@ -12,6 +12,6 @@ involvedObject:
message: {{ error_msg }}
reason: BackupFailed
type: Warning
firstTimestamp: "{{ now }}"
lastTimestamp: "{{ now }}"
firstTimestamp: {{ now }}
lastTimestamp: {{ now }}
count: 1

View File

@@ -27,6 +27,6 @@ spec:
volumes:
- name: {{ ansible_operator_meta.name }}-backup
persistentVolumeClaim:
claimName: {{ backup_pvc }}
claimName: {{ backup_claim }}
readOnly: false
restartPolicy: Never

View File

@@ -8,7 +8,7 @@
bash -c "echo 'from django.contrib.auth.models import User;
nsu = User.objects.filter(is_superuser=True, username=\"{{ admin_user }}\").count();
exit(0 if nsu > 0 else 1)'
| awx-manage shell --no-imports"
| awx-manage shell"
ignore_errors: true
register: users_result
changed_when: users_result.return_code > 0

View File

@@ -111,23 +111,11 @@ data:
server_tokens off;
client_max_body_size {{ nginx_client_max_body_size }}M;
map $http_x_trusted_proxy $trusted_proxy_present {
default "trusted-proxy";
"" "-";
}
map $http_x_dab_jw_token $dab_jwt_present {
default "dab-jwt";
"" "-";
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'$trusted_proxy_present $dab_jwt_present';
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
error_log /dev/stderr warn;
map $http_upgrade $connection_upgrade {
default upgrade;

View File

@@ -202,7 +202,7 @@ spec:
volumeMounts:
{% if public_base_url is defined %}
- name: redirect-page
mountPath: '/var/lib/awx/venv/awx/lib/python3.12/site-packages/awx/ui/build/index.html'
mountPath: '/var/lib/awx/venv/awx/lib/python3.11/site-packages/awx/ui/build/index.html'
subPath: redirect-page.html
{% endif %}
{% if bundle_ca_crt %}

View File

@@ -40,8 +40,5 @@ additional_labels: []
# Maintain some of the recommended `app.kubernetes.io/*` labels on the resource (self)
set_self_labels: true
# If set to true, the restore process will drop and recreate the database schema before restoring
force_drop_db: false
spec_overrides: {}
...

View File

@@ -83,24 +83,13 @@
- name: Set pg_restore command
set_fact:
pg_restore: >-
pg_restore {{ force_drop_db | bool | ternary('', '--clean --if-exists') }} --no-owner --no-acl
pg_restore --clean --if-exists --no-owner --no-acl
-U {{ awx_postgres_user }}
-h {{ resolvable_db_host }}
-d {{ awx_postgres_database }}
-p {{ awx_postgres_port }}
no_log: "{{ no_log }}"
- name: Grant CREATEDB privilege to database user for force_drop_db
kubernetes.core.k8s_exec:
namespace: "{{ ansible_operator_meta.namespace }}"
pod: "{{ postgres_pod_name }}"
container: postgres
command: >-
psql -c "ALTER USER {{ awx_postgres_user }} CREATEDB;"
when:
- force_drop_db | bool
- awx_postgres_type == 'managed'
- name: Force drop and create database if force_drop_db is true
block:
- name: Set drop db command
@@ -166,14 +155,3 @@
"
register: data_migration
no_log: "{{ no_log }}"
- name: Revoke CREATEDB privilege from database user
kubernetes.core.k8s_exec:
namespace: "{{ ansible_operator_meta.namespace }}"
pod: "{{ postgres_pod_name }}"
container: postgres
command: >-
psql -c "ALTER USER {{ awx_postgres_user }} NOCREATEDB;"
when:
- force_drop_db | bool
- awx_postgres_type == 'managed'

View File

@@ -12,6 +12,6 @@ involvedObject:
message: {{ error_msg }}
reason: RestoreFailed
type: Warning
firstTimestamp: "{{ now }}"
lastTimestamp: "{{ now }}"
firstTimestamp: {{ now }}
lastTimestamp: {{ now }}
count: 1

View File

@@ -14,4 +14,7 @@ broadcast_websocket_secret: '{{ deployment_name }}-broadcast-websocket'
postgres_configuration_secret: '{{ deployment_name }}-postgres-configuration'
supported_pg_version: 15
image_pull_policy: IfNotPresent
# If set to true, the restore process will delete the existing database and create a new one
force_drop_db: false
pg_drop_create: ''

140
up.sh Executable file
View File

@@ -0,0 +1,140 @@
#!/bin/bash
# AWX Operator up.sh
# Purpose:
# Build operator image from your local checkout, push to quay.io/youruser/awx-operator:dev, and deploy operator
# -- Usage
# NAMESPACE=awx TAG=dev QUAY_USER=developer ./up.sh
# NAMESPACE=awx TAG=dev QUAY_USER=developer PULL_SECRET_FILE=my-secret.yml ./up.sh
# -- User Variables
NAMESPACE=${NAMESPACE:-awx}
QUAY_USER=${QUAY_USER:-developer}
TAG=${TAG:-$(git rev-parse --short HEAD)}
DEV_TAG=${DEV_TAG:-dev}
DEV_TAG_PUSH=${DEV_TAG_PUSH:-true}
PULL_SECRET_FILE=${PULL_SECRET_FILE:-hacking/pull-secret.yml}
# -- Check for required variables
# Set the following environment variables
# export NAMESPACE=awx
# export QUAY_USER=developer
if [ -z "$QUAY_USER" ]; then
echo "Error: QUAY_USER env variable is not set."
echo " export QUAY_USER=developer"
exit 1
fi
if [ -z "$NAMESPACE" ]; then
echo "Error: NAMESPACE env variable is not set. Run the following with your namespace:"
echo " export NAMESPACE=developer"
exit 1
fi
# -- Container Build Engine (podman or docker)
ENGINE=${ENGINE:-podman}
# -- Variables
IMG=quay.io/$QUAY_USER/awx-operator
KUBE_APPLY="kubectl apply -n $NAMESPACE -f"
# -- Wait for existing project to be deleted
# Function to check if the namespace is in terminating state
is_namespace_terminating() {
kubectl get namespace $NAMESPACE 2>/dev/null | grep -q 'Terminating'
return $?
}
# Check if the namespace exists and is in terminating state
if kubectl get namespace $NAMESPACE 2>/dev/null; then
echo "Namespace $NAMESPACE exists."
if is_namespace_terminating; then
echo "Namespace $NAMESPACE is in terminating state. Waiting for it to be fully terminated..."
while is_namespace_terminating; do
sleep 5
done
echo "Namespace $NAMESPACE has been terminated."
fi
fi
# -- Create namespace
kubectl create namespace $NAMESPACE
# -- Prepare
# Set imagePullPolicy to Always
files=(
config/manager/manager.yaml
)
for file in "${files[@]}"; do
if grep -qF 'imagePullPolicy: IfNotPresent' ${file}; then
sed -i -e "s|imagePullPolicy: IfNotPresent|imagePullPolicy: Always|g" ${file};
fi
done
# Create redhat-operators-pull-secret if pull credentials file exists
if [ -f "$PULL_SECRET_FILE" ]; then
$KUBE_APPLY $PULL_SECRET_FILE
fi
# Delete old operator deployment
kubectl delete deployment awx-operator-controller-manager
# Create secrets
$KUBE_APPLY dev/secrets/custom-secret-key.yml
$KUBE_APPLY dev/secrets/admin-password-secret.yml
# (Optional) Create external-pg-secret
# $KUBE_APPLY dev/secrets/external-pg-secret.yml
# -- Login to Quay.io
$ENGINE login quay.io
if [ $ENGINE = 'podman' ]; then
if [ -f "$XDG_RUNTIME_DIR/containers/auth.json" ] ; then
REGISTRY_AUTH_CONFIG=$XDG_RUNTIME_DIR/containers/auth.json
echo "Found registry auth config: $REGISTRY_AUTH_CONFIG"
elif [ -f $HOME/.config/containers/auth.json ] ; then
REGISTRY_AUTH_CONFIG=$HOME/.config/containers/auth.json
echo "Found registry auth config: $REGISTRY_AUTH_CONFIG"
elif [ -f "/home/$USER/.docker/config.json" ] ; then
REGISTRY_AUTH_CONFIG=/home/$USER/.docker/config.json
echo "Found registry auth config: $REGISTRY_AUTH_CONFIG"
else
echo "No Podman configuration files were found."
fi
fi
if [ $ENGINE = 'docker' ]; then
if [ -f "/home/$USER/.docker/config.json" ] ; then
REGISTRY_AUTH_CONFIG=/home/$USER/.docker/config.json
echo "Found registry auth config: $REGISTRY_AUTH_CONFIG"
else
echo "No Docker configuration files were found."
fi
fi
# -- Build & Push Operator Image
echo "Preparing to build $IMG:$TAG ($IMG:$DEV_TAG) with $ENGINE..."
sleep 3
make docker-build docker-push IMG=$IMG:$TAG
# Tag and Push DEV_TAG Image when DEV_TAG_PUSH is 'True'
if $DEV_TAG_PUSH ; then
$ENGINE tag $IMG:$TAG $IMG:$DEV_TAG
make docker-push IMG=$IMG:$DEV_TAG
fi
# -- Deploy Operator
make deploy IMG=$IMG:$TAG NAMESPACE=$NAMESPACE
# -- Create CR
# uncomment the CR you want to use
$KUBE_APPLY dev/awx-cr/awx-openshift-cr.yml
# $KUBE_APPLY dev/awx-cr/awx-cr-settings.yml
# $KUBE_APPLY dev/awx-cr/awx-k8s-ingress.yml