diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 00000000..230cf78a --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,60 @@ +--- +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale +# label is closed. Set to false to disable. If disabled, issues still need to be +# closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. +# Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set +# to `[]` to disable +exemptLabels: + - security + - planned + - priority/critical + - lifecycle/frozen + - verified + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: lifecycle/stale + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +pulls: + markComment: |- + PRs go stale after 90 days of inactivity. + If there is no further activity, the PR will be closed in another 30 days. + + unmarkComment: >- + This pull request is no longer stale. + + closeComment: >- + This pull request has been closed due to inactivity. + +issues: + markComment: |- + Issues go stale after 90 days of inactivity. + If there is no further activity, the issue will be closed in another 30 days. + + unmarkComment: >- + This issue is no longer stale. + + closeComment: >- + This issue has been closed due to inactivity. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e3fe7c20..5eb9b67f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,7 @@ name: CI 'on': push: branches: - - master + - main pull_request: schedule: - cron: '0 6 * * *' @@ -30,7 +30,7 @@ jobs: run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check - name: Run sanity tests on Python ${{ matrix.python_version }} - run: ansible-test sanity --docker -v --color --python ${{ matrix.python_version }} + run: make test-sanity PYTHON_VERSION=${{ matrix.python_version }} working-directory: ./ansible_collections/community/kubernetes integration: @@ -53,7 +53,7 @@ jobs: run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check - name: Run integration tests on Python ${{ matrix.python_version }} - run: ansible-test integration --docker -v --color --retry-on-error --python ${{ matrix.python_version }} --continue-on-error --diff --coverage + run: make test-integration PYTHON_VERSION=${{ matrix.python_version }} working-directory: ./ansible_collections/community/kubernetes - name: Generate coverage report. @@ -76,7 +76,7 @@ jobs: path: ansible_collections/community/kubernetes - name: Set up KinD cluster - uses: engineerd/setup-kind@v0.3.0 + uses: engineerd/setup-kind@v0.4.0 - name: Set up Python ${{ matrix.python_version }} uses: actions/setup-python@v1 @@ -84,10 +84,21 @@ jobs: python-version: ${{ matrix.python_version }} - name: Install molecule and openshift dependencies - run: pip install molecule yamllint openshift + run: pip install ansible molecule yamllint openshift flake8 - - name: Install ansible base (devel branch) - run: pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check + # The latest release doesn't work with Molecule currently. + # See: https://github.com/ansible-community/molecule/issues/2757 + # - name: Install ansible base, latest release. + # run: | + # pip uninstall -y ansible + # pip install --pre ansible-base + + # The devel branch doesn't work with Molecule currently. + # See: https://github.com/ansible-community/molecule/issues/2757 + # - name: Install ansible base (devel branch) + # run: | + # pip uninstall -y ansible + # pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check - name: Create default collection path symlink run: | @@ -95,5 +106,82 @@ jobs: ln -s /home/runner/work/kubernetes/kubernetes /home/runner/.ansible/collections - name: Run molecule default test scenario - run: molecule test + run: make test-molecule + working-directory: ./ansible_collections/community/kubernetes + + downstream-sanity-29: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.6'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install "ansible>=2.9.0,<2.10.0" + + - name: Run sanity tests on Python ${{ matrix.python_version }} + run: make downstream-test-sanity + working-directory: ./ansible_collections/community/kubernetes + + downstream-integration-29: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.6'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install ansible base (devel branch) + run: pip install "ansible>=2.9.0,<2.10.0" + + - name: Run integration tests on Python ${{ matrix.python_version }} + run: make downstream-test-integration + working-directory: ./ansible_collections/community/kubernetes + + downstream-molecule-29: + runs-on: ubuntu-latest + strategy: + matrix: + python_version: ['3.7'] + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/kubernetes + + - name: Set up KinD cluster + uses: engineerd/setup-kind@v0.4.0 + + - name: Set up Python ${{ matrix.python_version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python_version }} + + - name: Install molecule and openshift dependencies + run: pip install "ansible>=2.9.0,<2.10.0" molecule yamllint openshift flake8 + + - name: Create default collection path symlink + run: | + mkdir -p /home/runner/.ansible + ln -s /home/runner/work/kubernetes/kubernetes /home/runner/.ansible/collections + + - name: Run molecule default test scenario + run: make downstream-test-molecule working-directory: ./ansible_collections/community/kubernetes diff --git a/.gitignore b/.gitignore index 0460287a..f97b7875 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,9 @@ __pycache__/ # Galaxy artifacts. *.tar.gz +# Changelog cache files. +changelogs/.plugin-cache.yaml + # Temporary test files. tests/output tests/integration/cloud-config-* diff --git a/.yamllint b/.yamllint index c5ae64be..45b68407 100644 --- a/.yamllint +++ b/.yamllint @@ -8,5 +8,9 @@ rules: brackets: max-spaces-inside: 1 level: error + document-start: disable line-length: disable truthy: disable + indentation: + spaces: 2 + indent-sequences: consistent diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index aeccbad0..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,54 +0,0 @@ -# Kubernetes Collection Changes - -## 0.11.0 - -### New Features - - - PR #61: Add `helm`, `helm_info`, and `helm_repository` modules. - - PR #81: Rename repository to `community.kubernetes`. - -### Bug Fixes - - - PR #78: Update GitHub Actions workflow for better CI stability. - - PR #69: k8s_log no longer attempts to parse log as JSON. - - PR #85: Make sure extra files are not included in built collection. - -## 0.10.0 - -### New Features - - - PR #14: Add `k8s_exec` module for executing commands on pods via Kubernetes API. - - PR #16: Add `k8s_log` module for retrieving pod logs. - - Issue #49, PR #55: Add `persist_config` option for persisting refreshed tokens. - -### Security Fixes - - - PR #51: Warn about disclosure when using options like `kubectl_password`, `kubectl_extra_args`, and `kubectl_token` to pass data through to the command line using the `kubectl` connection plugin. - -### Bug Fixes - - - Issue #13: Fix argspec for 'elements'. - - Issue #33, PR #34: Fix argspec in `k8s_service`. - - Issue #10, PR #22: Test collection in a Kind cluster in CI using Molecule. - - PR #52: Documentation fix in `kubectl.py`. - - PR #54: Add exception handling when retrieving k8s client. - - PR #56: Use from_yaml filter with lookup examples in `k8s` module documentation examples. - -## 0.9.0 - - - Initial migration of Kubernetes content from Ansible core (2.9 / devel), including content: - - **Connection Plugins**: - - `kubectl` - - **Filter Plugins**: - - `k8s_config_resource_name` - - **Inventory Source**: - - `k8s` - - `openshift` - - **Lookup Plugins**: - - `k8s` - - **Modules**: - - `k8s` - - `k8s_auth` - - `k8s_info` - - `k8s_scale` - - `k8s_service` diff --git a/CHANGELOG.rst b/CHANGELOG.rst new file mode 100644 index 00000000..8a9bd301 --- /dev/null +++ b/CHANGELOG.rst @@ -0,0 +1,167 @@ +=================================== +Kubernetes Collection Release Notes +=================================== + +.. contents:: Topics + + +v1.0.0 +====== + +Major Changes +------------- + +- helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). +- helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). +- k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). + +Minor Changes +------------- + +- Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). +- Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). +- helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). +- helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). +- helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). +- helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). +- helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). +- helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). +- k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). +- k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). + +Security Fixes +-------------- + +- kubectl - connection plugin now redact kubectl_token and kubectl_password in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). +- kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). + +Bugfixes +-------- + +- Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). +- Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). + +New Modules +----------- + +- helm_plugin - Manage Helm plugins +- helm_plugin_info - Gather information about Helm plugins + +v0.11.1 +======= + +Major Changes +------------- + +- Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). + +Minor Changes +------------- + +- Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). +- Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). +- Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). +- Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). +- Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). +- Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). +- Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). +- Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). +- Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). +- Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). +- Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). +- Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). +- Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). +- helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). +- helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). +- helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). +- k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). + +Bugfixes +-------- + +- Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). +- Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). +- Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). +- k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). +- k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). +- raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). + +v0.11.0 +======= + +Major Changes +------------- + +- helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). +- helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). +- helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). + +Minor Changes +------------- + +- Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). + +Bugfixes +-------- + +- Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). +- Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). +- k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). + +New Modules +----------- + +- helm - Manages Kubernetes packages with the Helm package manager +- helm_info - Get information from Helm package deployed inside the cluster +- helm_repository - Add and remove Helm repository + +v0.10.0 +======= + +Major Changes +------------- + +- k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). +- k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). + +Minor Changes +------------- + +- k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). + +Security Fixes +-------------- + +- kubectl - Warn about information disclosure when using options like ``kubectl_password``, ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). + +Bugfixes +-------- + +- k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). +- k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). +- k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation examples (https://github.com/ansible-collections/community.kubernetes/pull/56). +- k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). +- kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). + +New Modules +----------- + +- k8s_exec - Execute command in Pod +- k8s_log - Fetch logs from Kubernetes resources + +v0.9.0 +====== + +Major Changes +------------- + +- k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. +- k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. +- k8s - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes collection. +- k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. +- k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. +- kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. +- openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..0fcae83f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +## Getting Started + +General information about setting up your Python environment, testing modules, +Ansible coding styles, and more can be found in the [Ansible Community Guide]( +https://docs.ansible.com/ansible/latest/community/index.html). + + +## Kubernetes Collections + +### community.kubernetes + +This collection contains modules and plugins contributed and maintained by the Ansible Kubernetes +community. + +New modules and plugins developed by the community should be proposed to `community.kubernetes`. + +## Submitting Issues +All software has bugs, and the `community.kubernetes` collection is no exception. When you find a bug, +you can help tremendously by [telling us about it](https://github.com/ansible-collections/community.kubernetes/issues/new/choose). + +If you should discover that the bug you're trying to file already exists in an issue, +you can help by verifying the behavior of the reported bug with a comment in that +issue, or by reporting any additional information. + +## Pull Requests + +All modules MUST have integration tests for new features. +Bug fixes for modules that currently have integration tests SHOULD have tests added. +New modules should be submitted to the [community.kubernetes](https://github.com/ansible-collections/community.kubernetes) collection and MUST have integration tests. + +Expected test criteria: +* Resource creation under check mode +* Resource creation +* Resource creation again (idempotency) under check mode +* Resource creation again (idempotency) +* Resource modification under check mode +* Resource modification +* Resource modification again (idempotency) under check mode +* Resource modification again (idempotency) +* Resource deletion under check mode +* Resource deletion +* Resource deletion (of a non-existent resource) under check mode +* Resource deletion (of a non-existent resource) + +Where modules have multiple parameters we recommend running through the 4-step modification cycle for each parameter the module accepts, as well as a modification cycle where as most, if not all, parameters are modified at the same time. + +For general information on running the integration tests see the +[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#testing-integration), +especially the section on configuration for cloud tests. For questions about writing tests the Ansible Kubernetes community can be found on Freenode IRC as detailed below. + + +### Code of Conduct +The `community.kubernetes` collection follows the Ansible project's +[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). +Please read and familiarize yourself with this document. + +### IRC +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see +[Freenode's Nickname Registration guide](https://freenode.net/kb/answer/registration) for instructions. + +The `#ansible-kubernetes` channel on Freenode IRC is the main and official place to discuss use and development of the `community.kubernetes` collection. + +For more information about Ansible's Kubernetes integration, browse the resources in the [Kubernetes Working Group](https://github.com/ansible/community/wiki/Kubernetes) Community wiki page. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a6f0938c --- /dev/null +++ b/Makefile @@ -0,0 +1,43 @@ +# Also needs to be updated in galaxy.yml +VERSION = 1.0.0 + +TEST_ARGS ?= "" +PYTHON_VERSION ?= `python -c 'import platform; print("{0}.{1}".format(platform.python_version_tuple()[0], platform.python_version_tuple()[1]))'` + +clean: + rm -f community-kubernetes-${VERSION}.tar.gz + rm -rf ansible_collections + rm -rf tests/output + +build: clean + ansible-galaxy collection build + +release: build + ansible-galaxy collection publish community-kubernetes-${VERSION}.tar.gz + +install: build + ansible-galaxy collection install -p ansible_collections community-kubernetes-${VERSION}.tar.gz + +test-sanity: + ansible-test sanity --docker -v --color --python $(PYTHON_VERSION) $(?TEST_ARGS) + +test-integration: + ansible-test integration --docker -v --color --retry-on-error --python $(PYTHON_VERSION) --continue-on-error --diff --coverage $(?TEST_ARGS) + +test-molecule: + molecule test + +downstream-test-sanity: + ./utils/downstream.sh -s + +downstream-test-integration: + ./utils/downstream.sh -i + +downstream-test-molecule: + ./utils/downstream.sh -m + +downstream-build: + ./utils/downstream.sh -b + +downstream-release: + ./utils/downstream.sh -r diff --git a/README.md b/README.md index 65ccc2ba..e3d7c6b9 100644 --- a/README.md +++ b/README.md @@ -11,31 +11,33 @@ The collection includes a variety of Ansible content to help automate the manage Click on the name of a plugin or module to view that content's documentation: - **Connection Plugins**: - - [kubectl](https://docs.ansible.com/ansible/latest/plugins/connection/kubectl.html) + - [kubectl](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/kubectl_connection.html) - **Filter Plugins**: - [k8s_config_resource_name](https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#kubernetes-filters) - **Inventory Source**: - - [k8s](https://docs.ansible.com/ansible/latest/plugins/inventory/k8s.html) - - [openshift](https://docs.ansible.com/ansible/latest/plugins/inventory/openshift.html) + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_inventory.html) + - [openshift](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/openshift_inventory.html) - **Lookup Plugins**: - - [k8s](https://docs.ansible.com/ansible/latest/plugins/lookup/k8s.html) + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_lookup.html) - **Modules**: - - [k8s](https://docs.ansible.com/ansible/latest/modules/k8s_module.html) - - [k8s_auth](https://docs.ansible.com/ansible/latest/modules/k8s_auth_module.html) - - [k8s_exec](https://github.com/ansible-collections/community.kubernetes/blob/master/plugins/modules/k8s_exec.py) - - [k8s_log](https://github.com/ansible-collections/community.kubernetes/blob/master/plugins/modules/k8s_log.py) - - [k8s_info](https://docs.ansible.com/ansible/latest/modules/k8s_info_module.html) - - [k8s_scale](https://docs.ansible.com/ansible/latest/modules/k8s_scale_module.html) - - [k8s_service](https://docs.ansible.com/ansible/latest/modules/k8s_service_module.html) - - [helm](https://github.com/ansible-collections/community.kubernetes/blob/master/plugins/modules/helm.py) - - [helm_info](https://github.com/ansible-collections/community.kubernetes/blob/master/plugins/modules/helm_info.py) - - [helm_repository](https://github.com/ansible-collections/community.kubernetes/blob/master/plugins/modules/helm_repository.py) + - [k8s](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_module.html) + - [k8s_auth](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_auth_module.html) + - [k8s_exec](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_exec_module.html) + - [k8s_info](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_info_module.html) + - [k8s_log](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_log_module.html) + - [k8s_scale](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_scale_module.html) + - [k8s_service](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/k8s_service_module.html) + - [helm](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_module.html) + - [helm_info](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_info_module.html) + - [helm_plugin](https://github.com/ansible-collections/community.kubernetes/blob/main/plugins/modules/helm_plugin.py) + - [helm_plugin_info](https://github.com/ansible-collections/community.kubernetes/blob/main/plugins/modules/helm_plugin_info.py) + - [helm_repository](https://docs.ansible.com/ansible/2.10/collections/community/kubernetes/helm_repository_module.html) ## Installation and Usage ### Installing the Collection from Ansible Galaxy -Before using the Kuberentes collection, you need to install it with the Ansible Galaxy CLI: +Before using the Kubernetes collection, you need to install it with the Ansible Galaxy CLI: ansible-galaxy collection install community.kubernetes @@ -45,7 +47,7 @@ You can also include it in a `requirements.yml` file and install it via `ansible --- collections: - name: community.kubernetes - version: 0.11.0 + version: 1.0.0 ``` ### Installing the OpenShift Python Library @@ -56,7 +58,51 @@ Content in this collection requires the [OpenShift Python client](https://pypi.o ### Using modules from the Kubernetes Collection in your playbooks -You can either call modules by their Fully Qualified Collection Namespace (FQCN), like `community.kubernetes.k8s_info`, or you can call modules by their short name if you list the `community.kubernetes` collection in the playbook's `collections`, like so: +It's preferable to use content in this collection using their Fully Qualified Collection Namespace (FQCN), for example `community.kubernetes.k8s_info`: + +```yaml +--- +- hosts: localhost + gather_facts: false + connection: local + + tasks: + - name: Ensure the myapp Namespace exists. + community.kubernetes.k8s: + api_version: v1 + kind: Namespace + name: myapp + state: present + + - name: Ensure the myapp Service exists in the myapp Namespace. + community.kubernetes.k8s: + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: myapp + namespace: myapp + spec: + type: LoadBalancer + ports: + - port: 8080 + targetPort: 8080 + selector: + app: myapp + + - name: Get a list of all Services in the myapp namespace. + community.kubernetes.k8s_info: + kind: Service + namespace: myapp + register: myapp_services + + - name: Display number of Services in the myapp namespace. + debug: + var: myapp_services.resources | count +``` + +If upgrading older playbooks which were built prior to Ansible 2.10 and this collection's existence, you can also define `collections` in your play and refer to this collection's modules as you did in Ansible 2.9 and below, as in this example: ```yaml --- @@ -74,34 +120,6 @@ You can either call modules by their Fully Qualified Collection Namespace (FQCN) kind: Namespace name: myapp state: present - - - name: Ensure the myapp Service exists in the myapp Namespace. - k8s: - state: present - definition: - apiVersion: v1 - kind: Service - metadata: - name: myapp - namespace: myapp - spec: - type: LoadBalancer - ports: - - port: 8080 - targetPort: 8080 - selector: - app: myapp - - - name: Get a list of all Services in the myapp namespace. - k8s_info: - kind: Service - namespace: myapp - register: myapp_services - - - name: Display number of Services in the myapp namespace. - debug: - var: myapp_services.resources | count - ``` For documentation on how to use individual modules and other content included in this collection, please see the links in the 'Included content' section earlier in this README. @@ -110,43 +128,41 @@ For documentation on how to use individual modules and other content included in If you want to develop new content for this collection or improve what's already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATHS`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. +See [Contributing to community.kubernetes](CONTRIBUTING.md). + ### Testing with `ansible-test` The `tests` directory contains configuration for running sanity and integration tests using [`ansible-test`](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html). You can run the collection's test suites with the commands: - ansible-test sanity --docker -v --color - ansible-test integration --docker -v --color + make test-sanity + make test-integration ### Testing with `molecule` -There are also integration tests in the `molecule` directory which are meant to be run against a local Kubernetes cluster, e.g. using [KinD](https://kind.sigs.k8s.io) or [Minikube](https://minikube.sigs.k8s.io). To run the tests, set up a local cluster, then run Molecule: +There are also integration tests in the `molecule` directory which are meant to be run against a local Kubernetes cluster, e.g. using [KinD](https://kind.sigs.k8s.io) or [Minikube](https://minikube.sigs.k8s.io). To setup a local cluster using KinD and run Molecule: kind create cluster - molecule test + make test-molecule ## Publishing New Versions -The current process for publishing new versions of the Kubernetes Collection is manual, and requires a user who has access to the `community.kubernetes` namespace on Ansible Galaxy to publish the build artifact. See [Issue #43](https://github.com/ansible-collections/community.kubernetes/issues/43) for progress in automating this process. +Releases are automatically built and pushed to Ansible Galaxy for any new tag. Before tagging a release, make sure to do the following: - 1. Ensure you're running Ansible from devel, so the [`build_ignore` key](https://github.com/ansible/ansible/issues/67130) in `galaxy.yml` is used. - 1. Run `git clean -x -d -f` in this repository's directory to clean out any extra files which should not be included. - 1. Ensure `CHANGELOG.md` contains all the latest changes. 1. Update `galaxy.yml` and this README's `requirements.yml` example with the new `version` for the collection. + 1. Update the CHANGELOG: + 1. Make sure you have [`antsibull-changelog`](https://pypi.org/project/antsibull-changelog/) installed. + 1. Make sure there are fragments for all known changes in `changelogs/fragments`. + 1. Run `antsibull-changelog release`. + 1. Commit the changes and create a PR with the changes. Wait for tests to pass, then merge it once they have. 1. Tag the version in Git and push to GitHub. - 1. Run the following commands to build and release the new version on Galaxy: - - ``` - ansible-galaxy collection build - ansible-galaxy collection publish ./community-kubernetes-$VERSION_HERE.tar.gz - ``` After the version is published, verify it exists on the [Kubernetes Collection Galaxy page](https://galaxy.ansible.com/community/kubernetes). ## More Information -For more information about Ansible's Kubernetes integration, join the `#ansible-community` channel on Freenode IRC, and browse the resources in the [Kubernetes Working Group](https://github.com/ansible/community/wiki/Kubernetes) Community wiki page. +For more information about Ansible's Kubernetes integration, join the `#ansible-kubernetes` channel on Freenode IRC, and browse the resources in the [Kubernetes Working Group](https://github.com/ansible/community/wiki/Kubernetes) Community wiki page. ## License diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml new file mode 100644 index 00000000..65d8e949 --- /dev/null +++ b/changelogs/changelog.yaml @@ -0,0 +1,188 @@ +ancestor: null +releases: + 0.10.0: + changes: + bugfixes: + - k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). + - k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). + - k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation + examples (https://github.com/ansible-collections/community.kubernetes/pull/56). + - k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). + - kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). + major_changes: + - k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). + - k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). + minor_changes: + - k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). + security_fixes: + - kubectl - Warn about information disclosure when using options like ``kubectl_password``, + ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the + command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). + fragments: + - 13-fix-elements-argspec.yaml + - 14-k8s_exec-new-module.yaml + - 16-k8s_log-new-module.yaml + - 33-k8s_service-fix-argspec.yaml + - 49-k8s-add-persist_config-option.yaml + - 51-kubectl-security-disclosure.yaml + - 52-kubectl-connection-docsfix.yaml + - 54-k8s-add-exception-handling.yaml + - 56-k8s-from_yaml-docs-examples.yaml + modules: + - description: Execute command in Pod + name: k8s_exec + namespace: '' + - description: Fetch logs from Kubernetes resources + name: k8s_log + namespace: '' + release_date: '2020-03-23' + 0.11.0: + changes: + bugfixes: + - Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). + - Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). + - k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). + major_changes: + - helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). + minor_changes: + - Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). + fragments: + - 61-helm-new-modules.yaml + - 69-k8s_log-dont-parse-as-json.yaml + - 78-github-actions-workflow.yaml + - 81-rename-repository.yaml + - 85-exclude-unnecessary-files-when-building.yaml + modules: + - description: Manages Kubernetes packages with the Helm package manager + name: helm + namespace: '' + - description: Get information from Helm package deployed inside the cluster + name: helm_info + namespace: '' + - description: Add and remove Helm repository + name: helm_repository + namespace: '' + release_date: '2020-05-04' + 0.11.1: + changes: + bugfixes: + - Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). + - Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). + - Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). + - k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). + - k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). + - raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). + major_changes: + - Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). + minor_changes: + - Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). + - Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). + - Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). + - Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). + - Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). + - Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). + - Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). + - Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). + - Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). + - Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). + - Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). + - Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). + - Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). + - helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). + - helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). + - helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). + - k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). + fragments: + - 100-k8s_scale-fix-wait.yaml + - 102-dont-require-codecov-check-prs.yaml + - 103-fix-inventory-docs-structure.yaml + - 105-improved-k8s-diffs.yaml + - 107-action-groups-module_defaults.yaml + - 110-helm-minor-refactor.yaml + - 114-minor-docs-fixes.yaml + - 115-helm-add-atomic.yaml + - 117-helm-minor-refactor.yaml + - 118-k8s-lookup-handle-set-object.yaml + - 119-handle-kubeconfig-error.yaml + - 123-flake8.yaml + - 126-requires_ansible-version-constraints.yaml + - 127-remove-action_groups_redirection.yaml + - 130-add-sanity-ignore-211.yaml + - 131-changelog-fragments.yaml + - 139-fix-manifest-ends-with-separator.yml + - 146-fqcn-in-docs.yaml + - 35-wait-conditions.yaml + - 84-check_mode-service-change.yaml + - 94-openshift-apply-test.yaml + - 95-remove-ANSIBLE_METADATA.yaml + - 97-remove-k8s_facts-deprecation.yaml + - 98-add-version_added.yaml + release_date: '2020-07-01' + 0.9.0: + changes: + major_changes: + - k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes + collection. + - k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. + - kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. + - openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + fragments: + - 4-k8s-prepare-collection-for-release.yaml + release_date: '2020-02-05' + 1.0.0: + changes: + bugfixes: + - Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). + - Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). + major_changes: + - helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). + minor_changes: + - Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). + - Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). + - helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). + - helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). + - helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment + variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment + variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). + - k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). + security_fixes: + - kubectl - connection plugin now redact kubectl_token and kubectl_password + in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). + - kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). + fragments: + - 122_k8s_exec_rc.yml + - 140-kubeconfig-env.yaml + - 141-helm-add-k8s-env-vars.yaml + - 148-update-base-branch-main.yaml + - 152-helm-context-aliases.yml + - 153-update-openshift-requirements.yaml + - 154-helm_plugin-helm_plugin_info-new-modules.yaml + - 155-ensure-check-mode-waits.yaml + - 156-k8s_info-vars-example.yaml + - 157-helm-create-namespace.yaml + - 158-k8s_exec-return-rc.yaml + - 159-kubectl-redact-token-and-password.yaml + - 168-test-stable-ansible.yaml + - 65_kubectl.yml + modules: + - description: Manage Helm plugins + name: helm_plugin + namespace: '' + - description: Gather information about Helm plugins + name: helm_plugin_info + namespace: '' + release_date: '2020-07-28' diff --git a/changelogs/config.yaml b/changelogs/config.yaml new file mode 100644 index 00000000..519494a3 --- /dev/null +++ b/changelogs/config.yaml @@ -0,0 +1,30 @@ +--- +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: true +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Kubernetes Collection +trivial_section_name: trivial diff --git a/changelogs/fragments/100-k8s_scale-fix-wait.yaml b/changelogs/fragments/100-k8s_scale-fix-wait.yaml new file mode 100644 index 00000000..9dc860fa --- /dev/null +++ b/changelogs/fragments/100-k8s_scale-fix-wait.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_scale - Fix scale wait and add tests (https://github.com/ansible-collections/community.kubernetes/pull/100). diff --git a/changelogs/fragments/102-dont-require-codecov-check-prs.yaml b/changelogs/fragments/102-dont-require-codecov-check-prs.yaml new file mode 100644 index 00000000..829caf09 --- /dev/null +++ b/changelogs/fragments/102-dont-require-codecov-check-prs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Don't require project coverage check on PRs (https://github.com/ansible-collections/community.kubernetes/pull/102). diff --git a/changelogs/fragments/103-fix-inventory-docs-structure.yaml b/changelogs/fragments/103-fix-inventory-docs-structure.yaml new file mode 100644 index 00000000..d26d99c1 --- /dev/null +++ b/changelogs/fragments/103-fix-inventory-docs-structure.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Fix suboption docs structure for inventory plugins (https://github.com/ansible-collections/community.kubernetes/pull/103). diff --git a/changelogs/fragments/105-improved-k8s-diffs.yaml b/changelogs/fragments/105-improved-k8s-diffs.yaml new file mode 100644 index 00000000..5e1f74ea --- /dev/null +++ b/changelogs/fragments/105-improved-k8s-diffs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Use improved kubernetes diffs where possible (https://github.com/ansible-collections/community.kubernetes/pull/105). diff --git a/changelogs/fragments/106-helm_replace.yml b/changelogs/fragments/106-helm_replace.yml new file mode 100644 index 00000000..6fd7b654 --- /dev/null +++ b/changelogs/fragments/106-helm_replace.yml @@ -0,0 +1,2 @@ +bugfixes: +- helm - add replace parameter (https://github.com/ansible-collections/community.kubernetes/issues/106). diff --git a/changelogs/fragments/107-action-groups-module_defaults.yaml b/changelogs/fragments/107-action-groups-module_defaults.yaml new file mode 100644 index 00000000..06672d98 --- /dev/null +++ b/changelogs/fragments/107-action-groups-module_defaults.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add action groups for playbooks with module_defaults (https://github.com/ansible-collections/community.kubernetes/pull/107). diff --git a/changelogs/fragments/110-helm-minor-refactor.yaml b/changelogs/fragments/110-helm-minor-refactor.yaml new file mode 100644 index 00000000..4e929176 --- /dev/null +++ b/changelogs/fragments/110-helm-minor-refactor.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - minor code refactoring (https://github.com/ansible-collections/community.kubernetes/pull/110). diff --git a/changelogs/fragments/114-minor-docs-fixes.yaml b/changelogs/fragments/114-minor-docs-fixes.yaml new file mode 100644 index 00000000..106841a9 --- /dev/null +++ b/changelogs/fragments/114-minor-docs-fixes.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Minor documentation fixes and use of FQCN in some examples (https://github.com/ansible-collections/community.kubernetes/pull/114). diff --git a/changelogs/fragments/115-helm-add-atomic.yaml b/changelogs/fragments/115-helm-add-atomic.yaml new file mode 100644 index 00000000..68f5bab3 --- /dev/null +++ b/changelogs/fragments/115-helm-add-atomic.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - add 'atomic' option (https://github.com/ansible-collections/community.kubernetes/pull/115). diff --git a/changelogs/fragments/117-helm-minor-refactor.yaml b/changelogs/fragments/117-helm-minor-refactor.yaml new file mode 100644 index 00000000..391932fe --- /dev/null +++ b/changelogs/fragments/117-helm-minor-refactor.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm_info and helm_repository - minor code refactor (https://github.com/ansible-collections/community.kubernetes/pull/117). diff --git a/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml b/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml new file mode 100644 index 00000000..6580f873 --- /dev/null +++ b/changelogs/fragments/118-k8s-lookup-handle-set-object.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s - Handle set object retrieved from lookup plugin (https://github.com/ansible-collections/community.kubernetes/pull/118). diff --git a/changelogs/fragments/119-handle-kubeconfig-error.yaml b/changelogs/fragments/119-handle-kubeconfig-error.yaml new file mode 100644 index 00000000..97c2201f --- /dev/null +++ b/changelogs/fragments/119-handle-kubeconfig-error.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Handle invalid kubeconfig parsing error (https://github.com/ansible-collections/community.kubernetes/pull/119). diff --git a/changelogs/fragments/122_k8s_exec_rc.yml b/changelogs/fragments/122_k8s_exec_rc.yml new file mode 100644 index 00000000..91d09b67 --- /dev/null +++ b/changelogs/fragments/122_k8s_exec_rc.yml @@ -0,0 +1,2 @@ +minor_changes: +- k8s_exec - return RC for the command executed (https://github.com/ansible-collections/community.kubernetes/issues/122). diff --git a/changelogs/fragments/123-flake8.yaml b/changelogs/fragments/123-flake8.yaml new file mode 100644 index 00000000..3f9abc20 --- /dev/null +++ b/changelogs/fragments/123-flake8.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Check Python code using flake8 (https://github.com/ansible-collections/community.kubernetes/pull/123). diff --git a/changelogs/fragments/126-requires_ansible-version-constraints.yaml b/changelogs/fragments/126-requires_ansible-version-constraints.yaml new file mode 100644 index 00000000..3ff31dfa --- /dev/null +++ b/changelogs/fragments/126-requires_ansible-version-constraints.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add requires_ansible version constraints to runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/126). diff --git a/changelogs/fragments/127-remove-action_groups_redirection.yaml b/changelogs/fragments/127-remove-action_groups_redirection.yaml new file mode 100644 index 00000000..1827457c --- /dev/null +++ b/changelogs/fragments/127-remove-action_groups_redirection.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Remove action_groups_redirection entry from meta/runtime.yml (https://github.com/ansible-collections/community.kubernetes/pull/127). diff --git a/changelogs/fragments/13-fix-elements-argspec.yaml b/changelogs/fragments/13-fix-elements-argspec.yaml new file mode 100644 index 00000000..a9e7ef89 --- /dev/null +++ b/changelogs/fragments/13-fix-elements-argspec.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Fix argspec for 'elements' (https://github.com/ansible-collections/community.kubernetes/issues/13). diff --git a/changelogs/fragments/130-add-sanity-ignore-211.yaml b/changelogs/fragments/130-add-sanity-ignore-211.yaml new file mode 100644 index 00000000..ad7af68a --- /dev/null +++ b/changelogs/fragments/130-add-sanity-ignore-211.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add sanity test ignore file for Ansible 2.11 (https://github.com/ansible-collections/community.kubernetes/pull/130). diff --git a/changelogs/fragments/131-changelog-fragments.yaml b/changelogs/fragments/131-changelog-fragments.yaml new file mode 100644 index 00000000..3b3d0397 --- /dev/null +++ b/changelogs/fragments/131-changelog-fragments.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - Add changelog and fragments and document changelog process (https://github.com/ansible-collections/community.kubernetes/pull/131). diff --git a/changelogs/fragments/139-fix-manifest-ends-with-separator.yml b/changelogs/fragments/139-fix-manifest-ends-with-separator.yml new file mode 100644 index 00000000..2742241d --- /dev/null +++ b/changelogs/fragments/139-fix-manifest-ends-with-separator.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - raw - handle condition when definition is none (https://github.com/ansible-collections/community.kubernetes/pull/139). diff --git a/changelogs/fragments/14-k8s_exec-new-module.yaml b/changelogs/fragments/14-k8s_exec-new-module.yaml new file mode 100644 index 00000000..40594b80 --- /dev/null +++ b/changelogs/fragments/14-k8s_exec-new-module.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_exec - New module for executing commands on pods via Kubernetes API (https://github.com/ansible-collections/community.kubernetes/pull/14). diff --git a/changelogs/fragments/140-kubeconfig-env.yaml b/changelogs/fragments/140-kubeconfig-env.yaml new file mode 100644 index 00000000..0d00dd7b --- /dev/null +++ b/changelogs/fragments/140-kubeconfig-env.yaml @@ -0,0 +1,4 @@ +--- +minor_changes: + - helm - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). + - helm_info - add support for K8S_AUTH_KUBECONFIG and K8S_AUTH_CONTEXT environment variable (https://github.com/ansible-collections/community.kubernetes/issues/140). diff --git a/changelogs/fragments/141-helm-add-k8s-env-vars.yaml b/changelogs/fragments/141-helm-add-k8s-env-vars.yaml new file mode 100644 index 00000000..e6518d49 --- /dev/null +++ b/changelogs/fragments/141-helm-add-k8s-env-vars.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Add support for K8S_AUTH_CONTEXT, K8S_AUTH_KUBECONFIG env (https://github.com/ansible-collections/community.kubernetes/pull/141). diff --git a/changelogs/fragments/146-fqcn-in-docs.yaml b/changelogs/fragments/146-fqcn-in-docs.yaml new file mode 100644 index 00000000..0ff70860 --- /dev/null +++ b/changelogs/fragments/146-fqcn-in-docs.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Use FQCN in module docs and plugin examples (https://github.com/ansible-collections/community.kubernetes/pull/146). diff --git a/changelogs/fragments/148-update-base-branch-main.yaml b/changelogs/fragments/148-update-base-branch-main.yaml new file mode 100644 index 00000000..268200c5 --- /dev/null +++ b/changelogs/fragments/148-update-base-branch-main.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Update base branch to 'main' (https://github.com/ansible-collections/community.kubernetes/issues/148). diff --git a/changelogs/fragments/152-helm-context-aliases.yml b/changelogs/fragments/152-helm-context-aliases.yml new file mode 100644 index 00000000..4a3d0876 --- /dev/null +++ b/changelogs/fragments/152-helm-context-aliases.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - helm - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). + - helm_info - add aliases context for kube_context (https://github.com/ansible-collections/community.kubernetes/pull/152). diff --git a/changelogs/fragments/153-update-openshift-requirements.yaml b/changelogs/fragments/153-update-openshift-requirements.yaml new file mode 100644 index 00000000..502449b0 --- /dev/null +++ b/changelogs/fragments/153-update-openshift-requirements.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Update openshift requirements in k8s module doc (https://github.com/ansible-collections/community.kubernetes/pull/153). diff --git a/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml b/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml new file mode 100644 index 00000000..33a550f8 --- /dev/null +++ b/changelogs/fragments/154-helm_plugin-helm_plugin_info-new-modules.yaml @@ -0,0 +1,4 @@ +--- +major_changes: + - helm_plugin - new module to manage Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). + - helm_plugin_info - new modules to gather information about Helm plugins (https://github.com/ansible-collections/community.kubernetes/pull/154). diff --git a/changelogs/fragments/155-ensure-check-mode-waits.yaml b/changelogs/fragments/155-ensure-check-mode-waits.yaml new file mode 100644 index 00000000..5a68c343 --- /dev/null +++ b/changelogs/fragments/155-ensure-check-mode-waits.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Ensure check mode results are as expected (https://github.com/ansible-collections/community.kubernetes/pull/155). diff --git a/changelogs/fragments/156-k8s_info-vars-example.yaml b/changelogs/fragments/156-k8s_info-vars-example.yaml new file mode 100644 index 00000000..0ebe4318 --- /dev/null +++ b/changelogs/fragments/156-k8s_info-vars-example.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s_info - Update example using vars (https://github.com/ansible-collections/community.kubernetes/pull/156). diff --git a/changelogs/fragments/157-helm-create-namespace.yaml b/changelogs/fragments/157-helm-create-namespace.yaml new file mode 100644 index 00000000..8e3ace60 --- /dev/null +++ b/changelogs/fragments/157-helm-create-namespace.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - helm - Allow creating namespaces with Helm (https://github.com/ansible-collections/community.kubernetes/pull/157). diff --git a/changelogs/fragments/158-k8s_exec-return-rc.yaml b/changelogs/fragments/158-k8s_exec-return-rc.yaml new file mode 100644 index 00000000..f8a10fb7 --- /dev/null +++ b/changelogs/fragments/158-k8s_exec-return-rc.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_exec - Return rc for the command executed (https://github.com/ansible-collections/community.kubernetes/pull/158). diff --git a/changelogs/fragments/159-kubectl-redact-token-and-password.yaml b/changelogs/fragments/159-kubectl-redact-token-and-password.yaml new file mode 100644 index 00000000..b58ffd97 --- /dev/null +++ b/changelogs/fragments/159-kubectl-redact-token-and-password.yaml @@ -0,0 +1,3 @@ +--- +security_fixes: + - kubectl - redacted token and password from console log (https://github.com/ansible-collections/community.kubernetes/pull/159). diff --git a/changelogs/fragments/16-k8s_log-new-module.yaml b/changelogs/fragments/16-k8s_log-new-module.yaml new file mode 100644 index 00000000..e6ae7335 --- /dev/null +++ b/changelogs/fragments/16-k8s_log-new-module.yaml @@ -0,0 +1,3 @@ +--- +major_changes: + - k8s_log - New module for retrieving pod logs (https://github.com/ansible-collections/community.kubernetes/pull/16). diff --git a/changelogs/fragments/168-test-stable-ansible.yaml b/changelogs/fragments/168-test-stable-ansible.yaml new file mode 100644 index 00000000..d05a4b0a --- /dev/null +++ b/changelogs/fragments/168-test-stable-ansible.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Test against stable ansible branch so molecule tests work (https://github.com/ansible-collections/community.kubernetes/pull/168). diff --git a/changelogs/fragments/200_kubectl_fix.yml b/changelogs/fragments/200_kubectl_fix.yml new file mode 100644 index 00000000..91430dc1 --- /dev/null +++ b/changelogs/fragments/200_kubectl_fix.yml @@ -0,0 +1,2 @@ +minor_changes: +- kubectl plugin - correct console log (https://github.com/ansible-collections/community.kubernetes/issues/200). diff --git a/changelogs/fragments/202_k8s_info.yml b/changelogs/fragments/202_k8s_info.yml new file mode 100644 index 00000000..78f26256 --- /dev/null +++ b/changelogs/fragments/202_k8s_info.yml @@ -0,0 +1,2 @@ +minor_changes: +- k8s_info - update custom resource example (https://github.com/ansible-collections/community.kubernetes/issues/202). diff --git a/changelogs/fragments/33-k8s_service-fix-argspec.yaml b/changelogs/fragments/33-k8s_service-fix-argspec.yaml new file mode 100644 index 00000000..91daa0d6 --- /dev/null +++ b/changelogs/fragments/33-k8s_service-fix-argspec.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_service - Fix argspec (https://github.com/ansible-collections/community.kubernetes/issues/33). diff --git a/changelogs/fragments/35-wait-conditions.yaml b/changelogs/fragments/35-wait-conditions.yaml new file mode 100644 index 00000000..2f6dd67c --- /dev/null +++ b/changelogs/fragments/35-wait-conditions.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Improve k8s Deployment and Daemonset wait conditions (https://github.com/ansible-collections/community.kubernetes/pull/35). diff --git a/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml b/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml new file mode 100644 index 00000000..4f9a7bcd --- /dev/null +++ b/changelogs/fragments/4-k8s-prepare-collection-for-release.yaml @@ -0,0 +1,12 @@ +--- +major_changes: + - k8s - Inventory source migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s - Lookup plugin migrated from Ansible 2.9 to Kubernetes collection. + - k8s_auth - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_info - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_scale - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_service - Module migrated from Ansible 2.9 to Kubernetes collection. + - k8s_config_resource_name - Filter plugin migrated from Ansible 2.9 to Kubernetes collection. + - kubectl - Connection plugin migrated from Ansible 2.9 to Kubernetes collection. + - openshift - Inventory source migrated from Ansible 2.9 to Kubernetes collection. diff --git a/changelogs/fragments/49-k8s-add-persist_config-option.yaml b/changelogs/fragments/49-k8s-add-persist_config-option.yaml new file mode 100644 index 00000000..ef4c3f4d --- /dev/null +++ b/changelogs/fragments/49-k8s-add-persist_config-option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - k8s - Added ``persist_config`` option for persisting refreshed tokens (https://github.com/ansible-collections/community.kubernetes/issues/49). diff --git a/changelogs/fragments/51-kubectl-security-disclosure.yaml b/changelogs/fragments/51-kubectl-security-disclosure.yaml new file mode 100644 index 00000000..0d2fd915 --- /dev/null +++ b/changelogs/fragments/51-kubectl-security-disclosure.yaml @@ -0,0 +1,3 @@ +--- +security_fixes: + - kubectl - Warn about information disclosure when using options like ``kubectl_password``, ``kubectl_extra_args``, and ``kubectl_token`` to pass data through to the command line using the ``kubectl`` connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/51). diff --git a/changelogs/fragments/52-kubectl-connection-docsfix.yaml b/changelogs/fragments/52-kubectl-connection-docsfix.yaml new file mode 100644 index 00000000..ff71021c --- /dev/null +++ b/changelogs/fragments/52-kubectl-connection-docsfix.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - kubectl - Fix documentation in kubectl connection plugin (https://github.com/ansible-collections/community.kubernetes/pull/52). diff --git a/changelogs/fragments/54-k8s-add-exception-handling.yaml b/changelogs/fragments/54-k8s-add-exception-handling.yaml new file mode 100644 index 00000000..73fd6c54 --- /dev/null +++ b/changelogs/fragments/54-k8s-add-exception-handling.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Add exception handling when retrieving k8s client (https://github.com/ansible-collections/community.kubernetes/pull/54). diff --git a/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml b/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml new file mode 100644 index 00000000..399b1ef8 --- /dev/null +++ b/changelogs/fragments/56-k8s-from_yaml-docs-examples.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s - Use ``from_yaml`` filter with lookup examples in ``k8s`` module documentation examples (https://github.com/ansible-collections/community.kubernetes/pull/56). diff --git a/changelogs/fragments/61-helm-new-modules.yaml b/changelogs/fragments/61-helm-new-modules.yaml new file mode 100644 index 00000000..d741500d --- /dev/null +++ b/changelogs/fragments/61-helm-new-modules.yaml @@ -0,0 +1,5 @@ +--- +major_changes: + - helm - New module for managing Helm charts (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_info - New module for retrieving Helm chart information (https://github.com/ansible-collections/community.kubernetes/pull/61). + - helm_repository - New module for managing Helm repositories (https://github.com/ansible-collections/community.kubernetes/pull/61). diff --git a/changelogs/fragments/65_kubectl.yml b/changelogs/fragments/65_kubectl.yml new file mode 100644 index 00000000..0c30a61b --- /dev/null +++ b/changelogs/fragments/65_kubectl.yml @@ -0,0 +1,2 @@ +security_fixes: +- kubectl - connection plugin now redact kubectl_token and kubectl_password in console log (https://github.com/ansible-collections/community.kubernetes/issues/65). diff --git a/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml b/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml new file mode 100644 index 00000000..bf34a741 --- /dev/null +++ b/changelogs/fragments/69-k8s_log-dont-parse-as-json.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_log - Module no longer attempts to parse log as JSON (https://github.com/ansible-collections/community.kubernetes/pull/69). diff --git a/changelogs/fragments/78-github-actions-workflow.yaml b/changelogs/fragments/78-github-actions-workflow.yaml new file mode 100644 index 00000000..7985746e --- /dev/null +++ b/changelogs/fragments/78-github-actions-workflow.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Update GitHub Actions workflow for better CI stability (https://github.com/ansible-collections/community.kubernetes/pull/78). diff --git a/changelogs/fragments/81-rename-repository.yaml b/changelogs/fragments/81-rename-repository.yaml new file mode 100644 index 00000000..dd77b24e --- /dev/null +++ b/changelogs/fragments/81-rename-repository.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Rename repository to ``community.kubernetes`` (https://github.com/ansible-collections/community.kubernetes/pull/81). diff --git a/changelogs/fragments/84-check_mode-service-change.yaml b/changelogs/fragments/84-check_mode-service-change.yaml new file mode 100644 index 00000000..974b2ace --- /dev/null +++ b/changelogs/fragments/84-check_mode-service-change.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Make sure Service changes run correctly in check_mode (https://github.com/ansible-collections/community.kubernetes/pull/84). diff --git a/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml b/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml new file mode 100644 index 00000000..f7f4be75 --- /dev/null +++ b/changelogs/fragments/85-exclude-unnecessary-files-when-building.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - Make sure extra files are not included in built collection (https://github.com/ansible-collections/community.kubernetes/pull/85). diff --git a/changelogs/fragments/94-openshift-apply-test.yaml b/changelogs/fragments/94-openshift-apply-test.yaml new file mode 100644 index 00000000..e5a53376 --- /dev/null +++ b/changelogs/fragments/94-openshift-apply-test.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add test for openshift apply bug (https://github.com/ansible-collections/community.kubernetes/pull/94). diff --git a/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml b/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml new file mode 100644 index 00000000..e0370031 --- /dev/null +++ b/changelogs/fragments/95-remove-ANSIBLE_METADATA.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Remove deprecated ANSIBLE_METADATA field (https://github.com/ansible-collections/community.kubernetes/pull/95). diff --git a/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml b/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml new file mode 100644 index 00000000..5fa7abcb --- /dev/null +++ b/changelogs/fragments/97-remove-k8s_facts-deprecation.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - k8s_info - remove unneccessary k8s_facts deprecation notice (https://github.com/ansible-collections/community.kubernetes/pull/97). diff --git a/changelogs/fragments/98-add-version_added.yaml b/changelogs/fragments/98-add-version_added.yaml new file mode 100644 index 00000000..1c82b0e9 --- /dev/null +++ b/changelogs/fragments/98-add-version_added.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - Add version_added to each new collection module (https://github.com/ansible-collections/community.kubernetes/pull/98). diff --git a/codecov.yml b/codecov.yml index 33c8f6ee..71e957c6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -3,3 +3,6 @@ coverage: precision: 2 round: down range: "70...100" + status: + project: + default: false diff --git a/galaxy.yml b/galaxy.yml index c17e1d06..fbebf31b 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -26,7 +26,7 @@ tags: - openshift - okd - cluster -version: 0.11.0 +version: 1.0.0 build_ignore: - .DS_Store - '*.tar.gz' diff --git a/meta/runtime.yml b/meta/runtime.yml new file mode 100644 index 00000000..0e5e3690 --- /dev/null +++ b/meta/runtime.yml @@ -0,0 +1,43 @@ +--- +requires_ansible: '>=2.9' + +action_groups: + helm: + - helm + - helm_info + - helm_repository + k8s: + - k8s + - k8s_auth + - k8s_exec + - k8s_facts + - k8s_info + - k8s_log + - k8s_scale + - k8s_service + +plugin_routing: + modules: + # k8s_facts was originally slated for removal in Ansible 2.13. + k8s_facts: + redirect: community.kubernetes.k8s_info + deprecation: + removal_version: 2.0.0 + warning_text: Use community.kubernetes.k8s_info instead. + k8s_raw: + tombstone: + removal_version: 0.1.0 + warning_text: The k8s_raw module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. + openshift_raw: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift_raw module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. + openshift_scale: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift_scale module was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s_scale instead. + lookup: + openshift: + tombstone: + removal_version: 0.1.0 + warning_text: The openshift lookup plugin was slated for deprecation in Ansible 2.10 and has been removed. Use community.kubernetes.k8s instead. diff --git a/molecule/default/converge.yml b/molecule/default/converge.yml index ebac0811..f1aa68fc 100644 --- a/molecule/default/converge.yml +++ b/molecule/default/converge.yml @@ -21,6 +21,7 @@ that: (pod_list.resources | count) > 5 - include_tasks: tasks/delete.yml + - include_tasks: tasks/scale.yml - include_tasks: tasks/apply.yml - include_tasks: tasks/waiter.yml - include_tasks: tasks/full.yml @@ -29,3 +30,87 @@ roles: - helm + + post_tasks: + - name: Ensure namespace exists + k8s: + api_version: v1 + kind: Namespace + name: inventory + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: inventory + namespace: inventory + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 120 + vars: + k8s_pod_name: inventory + k8s_pod_image: python + k8s_pod_command: + - python + - '-m' + - http.server + k8s_pod_env: + - name: TEST + value: test + + - meta: refresh_inventory + +- name: Verify inventory and connection plugins + hosts: namespace_inventory_pods + gather_facts: no + + vars: + file_content: | + Hello world + + tasks: + - name: End play if host not running (TODO should we not add these to the inventory?) + meta: end_host + when: pod_phase != "Running" + + - debug: var=hostvars + - setup: + + - debug: var=ansible_facts + + - name: Assert the TEST environment variable was retrieved + assert: + that: ansible_facts.env.TEST == 'test' + + - name: Copy a file into the host + copy: + content: '{{ file_content }}' + dest: /tmp/test_file + + - name: Retrieve the file from the host + slurp: + src: /tmp/test_file + register: slurped_file + + - name: Assert the file content matches expectations + assert: + that: (slurped_file.content|b64decode) == file_content + +- name: Delete inventory namespace + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Remove inventory namespace + k8s: + api_version: v1 + kind: Namespace + name: inventory + state: absent diff --git a/molecule/default/molecule.yml b/molecule/default/molecule.yml index 90ba2e53..693cd351 100644 --- a/molecule/default/molecule.yml +++ b/molecule/default/molecule.yml @@ -9,18 +9,26 @@ driver: lint: | set -e yamllint . + flake8 platforms: - name: instance-kind provisioner: name: ansible log: true + config_options: + inventory: + enable_plugins: community.kubernetes.k8s lint: {} inventory: + hosts: + plugin: community.kubernetes.k8s host_vars: localhost: ansible_python_interpreter: '{{ ansible_playbook_python }}' env: ANSIBLE_FORCE_COLOR: 'true' + options: + vvv: True scenario: name: default test_sequence: diff --git a/molecule/default/roles/helm/tasks/main.yml b/molecule/default/roles/helm/tasks/main.yml index 458775de..e86d33df 100644 --- a/molecule/default/roles/helm/tasks/main.yml +++ b/molecule/default/roles/helm/tasks/main.yml @@ -4,4 +4,4 @@ loop_control: loop_var: helm_version with_items: - - "v3.1.2" + - "v3.2.4" diff --git a/molecule/default/roles/helm/tasks/run_test.yml b/molecule/default/roles/helm/tasks/run_test.yml index 42c54d09..0384a2e4 100644 --- a/molecule/default/roles/helm/tasks/run_test.yml +++ b/molecule/default/roles/helm/tasks/run_test.yml @@ -24,6 +24,9 @@ - from_repository - from_url +- name: Test helm plugin + include_tasks: tests_helm_plugin.yml + - name: Clean helm install file: path: "{{ item }}" diff --git a/molecule/default/roles/helm/tasks/tests_chart.yml b/molecule/default/roles/helm/tasks/tests_chart.yml index eb6d518f..26cf19f0 100644 --- a/molecule/default/roles/helm/tasks/tests_chart.yml +++ b/molecule/default/roles/helm/tasks/tests_chart.yml @@ -1,11 +1,4 @@ --- -- name: Create helm namespace - k8s: - api_version: v1 - kind: Namespace - name: "{{ helm_namespace }}" - wait: true - - name: Check helm_info empty helm_info: binary_path: "{{ helm_binary }}" @@ -18,6 +11,22 @@ that: - empty_info.status is undefined +- name: "Install fail {{ chart_test }} from {{ source }}" + helm: + binary_path: "{{ helm_binary }}" + name: test + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + ignore_errors: yes + register: install_fail + +- name: "Assert that Install fail {{ chart_test }} from {{ source }}" + assert: + that: + - install_fail is failed + - "'Error: create: failed to create: namespaces \"' + helm_namespace + '\" not found' in install_fail.stderr" + - name: "Install {{ chart_test }} from {{ source }}" helm: binary_path: "{{ helm_binary }}" @@ -25,6 +34,7 @@ chart_ref: "{{ chart_source }}" chart_version: "{{ chart_source_version | default(omit) }}" namespace: "{{ helm_namespace }}" + create_namespace: true register: install - name: "Assert that {{ chart_test }} chart is installed from {{ source }}" @@ -191,6 +201,63 @@ that: - install is not changed +# Test --replace +- name: Install chart for replace option + helm: + binary_path: "{{ helm_binary }}" + name: test-0001 + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + register: install + +- name: "Assert that {{ chart_test }} chart is installed from {{ source }}" + assert: + that: + - install is changed + +- name: Remove {{ chart_test }} with --purge + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test-0001 + purge: False + namespace: "{{ helm_namespace }}" + register: install + +- name: Check if chart is removed + assert: + that: + - install is changed + +- name: Install chart again with same name test-0001 + helm: + binary_path: "{{ helm_binary }}" + name: test-0001 + chart_ref: "{{ chart_source }}" + chart_version: "{{ chart_source_version | default(omit) }}" + namespace: "{{ helm_namespace }}" + replace: True + register: install + +- name: "Assert that {{ chart_test }} chart is installed from {{ source }}" + assert: + that: + - install is changed + +- name: Remove {{ chart_test }} (cleanup) + helm: + binary_path: "{{ helm_binary }}" + state: absent + name: test-0001 + namespace: "{{ helm_namespace }}" + register: install + +- name: Check if chart is removed + assert: + that: + - install is changed + - name: Remove helm namespace k8s: api_version: v1 diff --git a/molecule/default/roles/helm/tasks/tests_helm_plugin.yml b/molecule/default/roles/helm/tasks/tests_helm_plugin.yml new file mode 100644 index 00000000..720a06d5 --- /dev/null +++ b/molecule/default/roles/helm/tasks/tests_helm_plugin.yml @@ -0,0 +1,84 @@ +--- +- name: Install env plugin in check mode + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: check_install_env + check_mode: true + +- assert: + that: + - check_install_env.changed + +- name: Install env plugin + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: install_env + +- assert: + that: + - install_env.changed + +- name: Gather info about all plugin + helm_plugin_info: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + register: plugin_info + +- assert: + that: + - plugin_info.plugin_list is defined + +- name: Install env plugin again + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: present + plugin_path: https://github.com/adamreese/helm-env + register: install_env + +- assert: + that: + - not install_env.changed + +- name: Uninstall env plugin in check mode + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: check_uninstall_env + check_mode: true + +- assert: + that: + - check_uninstall_env.changed + +- name: Uninstall env plugin + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: uninstall_env + +- assert: + that: + - uninstall_env.changed + +- name: Uninstall env plugin again + helm_plugin: + binary_path: "{{ helm_binary }}" + namespace: "{{ helm_namespace }}" + state: absent + plugin_name: env + register: uninstall_env + +- assert: + that: + - not uninstall_env.changed diff --git a/molecule/default/tasks/apply.yml b/molecule/default/tasks/apply.yml index f5362220..2f579755 100644 --- a/molecule/default/tasks/apply.yml +++ b/molecule/default/tasks/apply.yml @@ -154,6 +154,30 @@ that: - k8s_service_2 is not changed + - name: Add exactly same service in check mode + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8080 + targetPort: 8080 + apply: yes + register: k8s_service_3 + check_mode: yes + + - name: Check nothing changed + assert: + that: + - k8s_service_3 is not changed + - name: Change service ports k8s: definition: @@ -170,14 +194,572 @@ port: 8081 targetPort: 8081 apply: yes - register: k8s_service_3 + register: k8s_service_4 - name: Check ports are correct assert: that: - - k8s_service_3 is changed - - k8s_service_3.result.spec.ports | length == 1 - - k8s_service_3.result.spec.ports[0].port == 8081 + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 1 + - k8s_service_4.result.spec.ports[0].port == 8081 + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + + - name: Update the earlier deployment in check mode + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + check_mode: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: update_deploy_check_mode + + - name: Ensure check mode change took + assert: + that: + - update_deploy_check_mode is changed + - "update_deploy_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: update_deploy_for_real + + - name: Ensure change took + assert: + that: + - update_deploy_for_real is changed + - "update_deploy_for_real.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple'" + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Apply deployment after service account removed + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + k8s_pod_resources: + requests: + cpu: 50m + limits: + cpu: 50m + memory: 50Mi + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed + + - name: Insert new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: mesh + port: 8080 + targetPort: 8080 + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_4 + + - name: Check ports are correct + assert: + that: + - k8s_service_4 is changed + - k8s_service_4.result.spec.ports | length == 2 + - k8s_service_4.result.spec.ports[0].port == 8080 + - k8s_service_4.result.spec.ports[1].port == 8081 + + - name: Remove new service port (check mode) + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + check_mode: yes + register: k8s_service_check + + - name: Check ports are correct + assert: + that: + - k8s_service_check is changed + - k8s_service_check.result.spec.ports | length == 1 + - k8s_service_check.result.spec.ports[0].port == 8081 + + - name: Remove new service port + k8s: + definition: + apiVersion: v1 + kind: Service + metadata: + name: apply-svc + namespace: "{{ apply_namespace }}" + spec: + selector: + app: whatever + ports: + - name: http + port: 8081 + targetPort: 8081 + apply: yes + register: k8s_service_5 + + - name: Check ports are correct + assert: + that: + - k8s_service_5 is changed + - k8s_service_5.result.spec.ports | length == 1 + - k8s_service_5.result.spec.ports[0].port == 8081 + + - name: Add a serviceaccount + k8s: + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Remove the serviceaccount + k8s: + state: absent + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + + - name: Update the earlier deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: apply-deploy + namespace: "{{ apply_namespace }}" + spec: + replicas: 2 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + apply: yes + vars: + k8s_pod_name: apply-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple + k8s_pod_service_account: apply-deploy + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: deploy_after_serviceaccount_removal + ignore_errors: yes + + - name: Ensure that updating deployment after service account removal failed + assert: + that: + - deploy_after_serviceaccount_removal is failed always: - name: Remove namespace diff --git a/molecule/default/tasks/exec.yml b/molecule/default/tasks/exec.yml index 77a69f3e..5397ab95 100644 --- a/molecule/default/tasks/exec.yml +++ b/molecule/default/tasks/exec.yml @@ -43,6 +43,19 @@ that: - "'nameserver' in output.stdout" + - name: Check if rc is returned for the given command + k8s_exec: + namespace: "{{ exec_namespace }}" + pod: "{{ pod }}" + command: 'false' + register: command_status + ignore_errors: True + + - name: Check last command status + assert: + that: + - command_status.return_code != 0 + always: - name: "Cleanup namespace" k8s: diff --git a/molecule/default/tasks/full.yml b/molecule/default/tasks/full.yml index 85d5d7d4..d2666797 100644 --- a/molecule/default/tasks/full.yml +++ b/molecule/default/tasks/full.yml @@ -185,6 +185,13 @@ metadata: name: testing1 + ### https://github.com/ansible-collections/community.kubernetes/issues/111 + - set_fact: + api_groups: "{{ lookup('k8s', cluster_info='api_groups') }}" + + - debug: + var: api_groups + - name: Namespace should exist k8s_info: kind: Namespace diff --git a/molecule/default/tasks/scale.yml b/molecule/default/tasks/scale.yml new file mode 100644 index 00000000..32b718df --- /dev/null +++ b/molecule/default/tasks/scale.yml @@ -0,0 +1,210 @@ +--- +- block: + - set_fact: + scale_namespace: scale + + - name: Ensure namespace exists + k8s: + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ scale_namespace }}" + + - name: Add a deployment + k8s: + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + + - name: Scale the deployment + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 0 + wait: yes + register: scale_down + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_down_deploy_pods + until: "{{ scale_down_deploy_pods.resources | length == 0 }}" + retries: 6 + delay: 5 + + - name: Ensure that scale down took effect + assert: + that: + - scale_down is changed + - '"duration" in scale_down' + - scale_down.diff + + - name: Reapply the earlier deployment + k8s: + definition: + api_version: apps/v1 + kind: Deployment + metadata: + name: scale-deploy + namespace: "{{ scale_namespace }}" + spec: + replicas: 1 + selector: + matchLabels: + app: "{{ k8s_pod_name }}" + template: "{{ k8s_pod_template }}" + wait: yes + wait_timeout: 60 + apply: yes + vars: + k8s_pod_name: scale-deploy + k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green + k8s_pod_ports: + - containerPort: 8080 + name: http + protocol: TCP + register: reapply_after_scale + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + field_selectors: + - status.phase=Running + register: scale_up_deploy_pods + + - name: Ensure that reapply after scale worked + assert: + that: + - reapply_after_scale is changed + - scale_up_deploy_pods.resources | length == 1 + + - name: Scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + wait_timeout: 60 + register: scale_up + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_further_deploy_pods + + - name: Ensure that scale up worked + assert: + that: + - scale_up is changed + - '"duration" in scale_up' + - scale_up.diff + - scale_up_further_deploy_pods.resources | length == 2 + + - name: Don't scale the deployment up + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 2 + wait: yes + register: scale_up_noop + + - name: Get pods in scale-deploy + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + field_selectors: + - status.phase=Running + namespace: "{{ scale_namespace }}" + register: scale_up_noop_pods + + - name: Ensure that no-op scale up worked + assert: + that: + - scale_up_noop is not changed + - not scale_up_noop.diff + - scale_up_noop_pods.resources | length == 2 + - '"duration" in scale_up_noop' + + - name: Scale deployment down without wait + k8s_scale: + api_version: apps/v1 + kind: Deployment + name: scale-deploy + namespace: "{{ scale_namespace }}" + replicas: 1 + wait: no + register: scale_down_no_wait + + - name: Ensure that scale down succeeds + k8s_info: + kind: Pod + label_selectors: + - app=scale-deploy + namespace: "{{ scale_namespace }}" + register: scale_down_no_wait_pods + retries: 6 + delay: 5 + until: "{{ scale_down_no_wait_pods.resources | length == 1 }}" + + - name: Ensure that scale down without wait worked + assert: + that: + - scale_down_no_wait is changed + - scale_down_no_wait.diff + - scale_down_no_wait_pods.resources | length == 1 + + always: + - name: Remove namespace + k8s: + kind: Namespace + name: "{{ scale_namespace }}" + state: absent diff --git a/molecule/default/tasks/waiter.yml b/molecule/default/tasks/waiter.yml index 98ee6416..4049f6ef 100644 --- a/molecule/default/tasks/waiter.yml +++ b/molecule/default/tasks/waiter.yml @@ -83,11 +83,13 @@ k8s_pod_name: wait-ds k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2 register: update_ds_check_mode + check_mode: yes - - name: Check that check_mode returned changed + - name: Check that check_mode result contains the changes assert: that: - update_ds_check_mode is changed + - "update_ds_check_mode.result.spec.template.spec.containers[0].image == 'gcr.io/kuar-demo/kuard-amd64:2'" - name: Update a daemonset k8s: @@ -119,6 +121,8 @@ namespace: "{{ wait_namespace }}" label_selectors: - app=wait-ds + field_selectors: + - status.phase=Running register: updated_ds_pods - name: Check that daemonset wait worked @@ -229,6 +233,12 @@ protocol: TCP register: update_deploy + # It looks like the Deployment is updated to have the desired state *before* the pods are terminated + # Wait a couple of seconds to allow the old pods to at least get to Terminating state + - name: Avoid race condition + pause: + seconds: 2 + - name: Get updated pods k8s_info: api_version: v1 @@ -236,13 +246,17 @@ namespace: "{{ wait_namespace }}" label_selectors: - app=wait-deploy + field_selectors: + - status.phase=Running register: updated_deploy_pods + until: "{{ updated_deploy_pods.resources[0].spec.containers[0].image.endswith(':2') }}" + retries: 6 + delay: 5 - name: Check that deployment wait worked assert: that: - deploy.result.status.availableReplicas == deploy.result.status.replicas - - updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2") - name: Pause a deployment k8s: diff --git a/molecule/default/vars/main.yml b/molecule/default/vars/main.yml index 8368ae27..a478de97 100644 --- a/molecule/default/vars/main.yml +++ b/molecule/default/vars/main.yml @@ -4,6 +4,7 @@ k8s_pod_metadata: app: "{{ k8s_pod_name }}" k8s_pod_spec: + serviceAccount: "{{ k8s_pod_service_account }}" containers: - image: "{{ k8s_pod_image }}" imagePullPolicy: Always @@ -14,16 +15,24 @@ k8s_pod_spec: exec: command: - /bin/true - resources: - limits: - cpu: "100m" - memory: "100Mi" + resources: "{{ k8s_pod_resources }}" ports: "{{ k8s_pod_ports }}" + env: "{{ k8s_pod_env }}" + + +k8s_pod_service_account: default + +k8s_pod_resources: + limits: + cpu: "100m" + memory: "100Mi" k8s_pod_command: [] k8s_pod_ports: [] +k8s_pod_env: [] + k8s_pod_template: metadata: "{{ k8s_pod_metadata }}" spec: "{{ k8s_pod_spec }}" diff --git a/plugins/action/helm.py b/plugins/action/helm.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/helm.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/helm_info.py b/plugins/action/helm_info.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/helm_info.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/helm_plugin.py b/plugins/action/helm_plugin.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/helm_plugin.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/helm_plugin_info.py b/plugins/action/helm_plugin_info.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/helm_plugin_info.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/helm_repository.py b/plugins/action/helm_repository.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/helm_repository.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s.py b/plugins/action/k8s.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s_auth.py b/plugins/action/k8s_auth.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s_auth.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s_exec.py b/plugins/action/k8s_exec.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s_exec.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s_info.py b/plugins/action/k8s_info.py new file mode 100644 index 00000000..2b9b84ad --- /dev/null +++ b/plugins/action/k8s_info.py @@ -0,0 +1,82 @@ +# Copyright (c) 2012-2014, Michael DeHaan +# Copyright (c) 2017, Toshio Kuratomi +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import copy +import traceback + +from ansible.module_utils._text import to_text +from ansible.plugins.action import ActionBase +from ansible.errors import AnsibleError + + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def _ensure_invocation(self, result): + # NOTE: adding invocation arguments here needs to be kept in sync with + # any no_log specified in the argument_spec in the module. + if 'invocation' not in result: + if self._play_context.no_log: + result['invocation'] = "CENSORED: no_log is set" + else: + result['invocation'] = self._task.args.copy() + result['invocation']['module_args'] = self._task.args.copy() + + return result + + def run(self, tmp=None, task_vars=None): + ''' handler for k8s options ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + new_module_args = copy.deepcopy(self._task.args) + kubeconfig = self._task.args.get('kubeconfig', None) + # find the file in the expected search path + if kubeconfig: + try: + # find in expected paths + kubeconfig = self._find_needle('files', kubeconfig) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if kubeconfig: + # decrypt kubeconfig found + actual_file = self._loader.get_real_file(kubeconfig, decrypt=True) + new_module_args['kubeconfig'] = actual_file + + # find the file in the expected search path + src = self._task.args.get('src', None) + if src: + try: + # find in expected paths + src = self._find_needle('files', src) + except AnsibleError as e: + result['failed'] = True + result['msg'] = to_text(e) + result['exception'] = traceback.format_exc() + return result + + if src: + new_module_args['src'] = src + + # Execute the k8s_* module. + module_return = self._execute_module(module_name=self._task.action, module_args=new_module_args, task_vars=task_vars) + + # Delete tmp path + self._remove_tmp_path(self._connection._shell.tmpdir) + + result.update(module_return) + + return self._ensure_invocation(result) diff --git a/plugins/action/k8s_log.py b/plugins/action/k8s_log.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s_log.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s_scale.py b/plugins/action/k8s_scale.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s_scale.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/action/k8s_service.py b/plugins/action/k8s_service.py new file mode 120000 index 00000000..667980ed --- /dev/null +++ b/plugins/action/k8s_service.py @@ -0,0 +1 @@ +k8s_info.py \ No newline at end of file diff --git a/plugins/connection/kubectl.py b/plugins/connection/kubectl.py index b320c494..5c16fe9e 100644 --- a/plugins/connection/kubectl.py +++ b/plugins/connection/kubectl.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = """ +DOCUMENTATION = r""" author: - xuxinkun @@ -38,7 +38,8 @@ DOCUMENTATION = """ options: kubectl_pod: description: - - Pod name. Required when the host name does not match pod name. + - Pod name. + - Required when the host name does not match pod name. default: '' vars: - name: ansible_kubectl_pod @@ -46,7 +47,8 @@ DOCUMENTATION = """ - name: K8S_AUTH_POD kubectl_container: description: - - Container name. Required when a pod contains more than one container. + - Container name. + - Required when a pod contains more than one container. default: '' vars: - name: ansible_kubectl_container @@ -173,7 +175,6 @@ import os import os.path import subprocess -import ansible.constants as C from ansible.parsing.yaml.loader import AnsibleLoader from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.module_utils.six.moves import shlex_quote @@ -228,6 +229,7 @@ class Connection(ConnectionBase): """ Build the local kubectl exec command to run cmd on remote_host """ local_cmd = [self.transport_cmd] + censored_local_cmd = [self.transport_cmd] # Build command options based on doc string doc_yaml = AnsibleLoader(self.documentation).get_single_data() @@ -236,28 +238,36 @@ class Connection(ConnectionBase): # Translate verify_ssl to skip_verify_ssl, and output as string skip_verify_ssl = not self.get_option(key) local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower())) + censored_local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower())) elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key): cmd_arg = self.connection_options[key] local_cmd += [cmd_arg, self.get_option(key)] + # Redact password and token from console log + if key.endswith(('_token', '_password')): + censored_local_cmd += [cmd_arg, '********'] extra_args_name = u'{0}_extra_args'.format(self.transport) if self.get_option(extra_args_name): local_cmd += self.get_option(extra_args_name).split(' ') + censored_local_cmd += self.get_option(extra_args_name).split(' ') pod = self.get_option(u'{0}_pod'.format(self.transport)) if not pod: pod = self._play_context.remote_addr # -i is needed to keep stdin open which allows pipelining to work local_cmd += ['exec', '-i', pod] + censored_local_cmd += ['exec', '-i', pod] # if the pod has more than one container, then container is required container_arg_name = u'{0}_container'.format(self.transport) if self.get_option(container_arg_name): local_cmd += ['-c', self.get_option(container_arg_name)] + censored_local_cmd += ['-c', self.get_option(container_arg_name)] local_cmd += ['--'] + cmd + censored_local_cmd += ['--'] + cmd - return local_cmd + return local_cmd, censored_local_cmd def _connect(self, port=None): """ Connect to the container. Nothing to do """ @@ -270,9 +280,9 @@ class Connection(ConnectionBase): """ Run a command in the container """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) + local_cmd, censored_local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) - display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) + display.vvv("EXEC %s" % (censored_local_cmd,), host=self._play_context.remote_addr) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -312,7 +322,7 @@ class Connection(ConnectionBase): count = ' count=0' else: count = '' - args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) + args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] try: p = subprocess.Popen(args, stdin=in_file, @@ -334,7 +344,7 @@ class Connection(ConnectionBase): # kubectl doesn't have native support for fetching files from # running containers, so we use kubectl exec to implement this - args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) + args, dummy = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)]) args = [to_bytes(i, errors='surrogate_or_strict') for i in args] actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file: diff --git a/plugins/doc_fragments/helm_common_options.py b/plugins/doc_fragments/helm_common_options.py new file mode 100644 index 00000000..f13bc1e3 --- /dev/null +++ b/plugins/doc_fragments/helm_common_options.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# Copyright: (c) 2020, Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Options for common Helm modules + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + binary_path: + description: + - The path of a helm binary to use. + required: false + type: path + context: + description: + - Helm option to specify which kubeconfig context to use. + - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_CONTEXT) will be used instead. + type: str + aliases: [ kube_context ] + kubeconfig: + description: + - Helm option to specify kubeconfig path to use. + - If the value is not specified in the task, the value of environment variable C(K8S_AUTH_KUBECONFIG) will be used instead. + type: path + aliases: [ kubeconfig_path ] +''' diff --git a/plugins/doc_fragments/k8s_name_options.py b/plugins/doc_fragments/k8s_name_options.py index 088a5637..fe4e5c47 100644 --- a/plugins/doc_fragments/k8s_name_options.py +++ b/plugins/doc_fragments/k8s_name_options.py @@ -15,9 +15,10 @@ class ModuleDocFragment(object): options: api_version: description: - - Use to specify the API version. Use to create, delete, or discover an object without providing a full - resource definition. Use in conjunction with I(kind), I(name), and I(namespace) to identify a - specific object. If I(resource definition) is provided, the I(apiVersion) from the I(resource_definition) + - Use to specify the API version. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(kind), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(apiVersion) value from the I(resource_definition) will override this option. type: str default: v1 @@ -26,23 +27,26 @@ options: - version kind: description: - - Use to specify an object model. Use to create, delete, or discover an object without providing a full - resource definition. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a - specific object. If I(resource definition) is provided, the I(kind) from the I(resource_definition) + - Use to specify an object model. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(kind) value from the I(resource_definition) will override this option. type: str name: description: - - Use to specify an object name. Use to create, delete, or discover an object without providing a full - resource definition. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a - specific object. If I(resource definition) is provided, the I(metadata.name) value from the - I(resource_definition) will override this option. + - Use to specify an object name. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(metadata.name) value from the I(resource_definition) + will override this option. type: str namespace: description: - - Use to specify an object namespace. Useful when creating, deleting, or discovering an object without - providing a full resource definition. Use in conjunction with I(api_version), I(kind), and I(name) - to identify a specfic object. If I(resource definition) is provided, the I(metadata.namespace) value - from the I(resource_definition) will override this option. + - Use to specify an object namespace. + - Useful when creating, deleting, or discovering an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(kind), and I(name) to identify a specific object. + - If I(resource definition) is provided, the I(metadata.namespace) value from the I(resource_definition) + will override this option. type: str ''' diff --git a/plugins/doc_fragments/k8s_resource_options.py b/plugins/doc_fragments/k8s_resource_options.py index 0742eed4..b5721453 100644 --- a/plugins/doc_fragments/k8s_resource_options.py +++ b/plugins/doc_fragments/k8s_resource_options.py @@ -15,8 +15,8 @@ class ModuleDocFragment(object): options: resource_definition: description: - - "Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating. NOTE: I(kind), I(api_version), I(name), - and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)." + - Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating. + - "NOTE: I(kind), I(api_version), I(name), and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)." aliases: - definition - inline diff --git a/plugins/filter/k8s.py b/plugins/filter/k8s.py index f6cb0579..3597b852 100644 --- a/plugins/filter/k8s.py +++ b/plugins/filter/k8s.py @@ -6,13 +6,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - - try: from openshift.helper.hashes import generate_hash HAS_GENERATE_HASH = True diff --git a/plugins/inventory/k8s.py b/plugins/inventory/k8s.py index 6f845c4e..4c9bab82 100644 --- a/plugins/inventory/k8s.py +++ b/plugins/inventory/k8s.py @@ -28,60 +28,61 @@ DOCUMENTATION = ''' - Optional list of cluster connection settings. If no connections are provided, the default I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces the active user is authorized to access. - name: - description: - - Optional name to assign to the cluster. If not provided, a name is constructed from the server - and port. - kubeconfig: - description: - - Path to an existing Kubernetes config file. If not provided, and no other connection - options are provided, the OpenShift client will attempt to load the default - configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG - environment variable. - context: - description: - - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment - variable. - host: - description: - - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. - api_key: - description: - - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment - variable. - username: - description: - - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME - environment variable. - password: - description: - - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD - environment variable. - client_cert: - description: - - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE - environment variable. - aliases: [ cert_file ] - client_key: - description: - - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE - environment variable. - aliases: [ key_file ] - ca_cert: - description: - - Path to a CA certificate used to authenticate with the API. Can also be specified via - K8S_AUTH_SSL_CA_CERT environment variable. - aliases: [ ssl_ca_cert ] - validate_certs: - description: - - "Whether or not to verify the API server's SSL certificates. Can also be specified via - K8S_AUTH_VERIFY_SSL environment variable." - type: bool - aliases: [ verify_ssl ] - namespaces: - description: - - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized - to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the OpenShift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE + environment variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - "Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable." + type: bool + aliases: [ verify_ssl ] + namespaces: + description: + - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized + to access. requirements: - "python >= 2.7" @@ -93,20 +94,20 @@ EXAMPLES = ''' # File must be named k8s.yaml or k8s.yml # Authenticate with token, and return all pods and services for all namespaces -plugin: k8s +plugin: community.kubernetes.k8s connections: - host: https://192.168.64.4:8443 - token: xxxxxxxxxxxxxxxx + api_key: xxxxxxxxxxxxxxxx validate_certs: false # Use default config (~/.kube/config) file and active context, and return objects for a specific namespace -plugin: k8s +plugin: community.kubernetes.k8s connections: - namespaces: - testing # Use a custom config file, and a specific context. -plugin: k8s +plugin: community.kubernetes.k8s connections: - kubeconfig: /path/to/config context: 'awx/192-168-64-4:8443/developer' @@ -142,6 +143,7 @@ class K8sInventoryException(Exception): class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleMixin): NAME = 'community.kubernetes.k8s' + connection_plugin = 'kubectl' transport = 'kubectl' def parse(self, inventory, loader, path, cache=True): @@ -275,7 +277,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleM self.inventory.set_variable(container_name, 'container_state', 'Waiting') self.inventory.set_variable(container_name, 'container_ready', container.ready) self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/') - self.inventory.set_variable(container_name, 'ansible_connection', self.transport) + self.inventory.set_variable(container_name, 'ansible_connection', self.connection_plugin) self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport), pod_name) self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport), @@ -316,7 +318,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleM try: self.inventory.add_child(namespace_services_group, service_name) - except AnsibleError as e: + except AnsibleError: raise ports = [{'name': port.name, diff --git a/plugins/inventory/openshift.py b/plugins/inventory/openshift.py index ddee2836..f6c393bd 100644 --- a/plugins/inventory/openshift.py +++ b/plugins/inventory/openshift.py @@ -28,60 +28,61 @@ DOCUMENTATION = ''' - Optional list of cluster connection settings. If no connections are provided, the default I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces the active user is authorized to access. - name: - description: - - Optional name to assign to the cluster. If not provided, a name is constructed from the server - and port. - kubeconfig: - description: - - Path to an existing Kubernetes config file. If not provided, and no other connection - options are provided, the OpenShift client will attempt to load the default - configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG - environment variable. - context: - description: - - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment - variable. - host: - description: - - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. - api_key: - description: - - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment - variable. - username: - description: - - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME - environment variable. - password: - description: - - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD - environment variable. - client_cert: - description: - - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE - environment variable. - aliases: [ cert_file ] - client_key: - description: - - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE - environment variable. - aliases: [ key_file ] - ca_cert: - description: - - Path to a CA certificate used to authenticate with the API. Can also be specified via - K8S_AUTH_SSL_CA_CERT environment variable. - aliases: [ ssl_ca_cert ] - validate_certs: - description: - - "Whether or not to verify the API server's SSL certificates. Can also be specified via - K8S_AUTH_VERIFY_SSL environment variable." - type: bool - aliases: [ verify_ssl ] - namespaces: - description: - - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized - to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the OpenShift client will attempt to load the default + configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE + environment variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - "Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable." + type: bool + aliases: [ verify_ssl ] + namespaces: + description: + - List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized + to access. requirements: - "python >= 2.7" @@ -93,20 +94,20 @@ EXAMPLES = ''' # File must be named openshift.yaml or openshift.yml # Authenticate with token, and return all pods and services for all namespaces -plugin: openshift +plugin: community.kubernetes.openshift connections: - host: https://192.168.64.4:8443 api_key: xxxxxxxxxxxxxxxx verify_ssl: false # Use default config (~/.kube/config) file and active context, and return objects for a specific namespace -plugin: openshift +plugin: community.kubernetes.openshift connections: - namespaces: - testing # Use a custom config file, and a specific context. -plugin: openshift +plugin: community.kubernetes.openshift connections: - kubeconfig: /path/to/config context: 'awx/192-168-64-4:8443/developer' diff --git a/plugins/lookup/k8s.py b/plugins/lookup/k8s.py index 314c52c0..68849053 100644 --- a/plugins/lookup/k8s.py +++ b/plugins/lookup/k8s.py @@ -133,23 +133,23 @@ DOCUMENTATION = ''' EXAMPLES = """ - name: Fetch a list of namespaces set_fact: - projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}" + projects: "{{ lookup('community.kubernetes.k8s', api_version='v1', kind='Namespace') }}" - name: Fetch all deployments set_fact: - deployments: "{{ lookup('k8s', kind='Deployment') }}" + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment') }}" - name: Fetch all deployments in a namespace set_fact: - deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}" + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing') }}" - name: Fetch a specific deployment by name set_fact: - deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}" + deployments: "{{ lookup('community.kubernetes.k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}" - name: Fetch with label selector set_fact: - service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}" + service: "{{ lookup('community.kubernetes.k8s', kind='Service', label_selector='app=galaxy') }}" # Use parameters from a YAML config @@ -159,11 +159,11 @@ EXAMPLES = """ - name: Using the config (loaded from a file in prior task), fetch the latest version of the object set_fact: - service: "{{ lookup('k8s', resource_definition=config) }}" + service: "{{ lookup('community.kubernetes.k8s', resource_definition=config) }}" - name: Use a config from the local filesystem set_fact: - service: "{{ lookup('k8s', src='service.yml') }}" + service: "{{ lookup('community.kubernetes.k8s', src='service.yml') }}" """ RETURN = """ @@ -194,15 +194,14 @@ RETURN = """ type: complex """ +from ansible.errors import AnsibleError +from ansible.module_utils.common._collections_compat import KeysView from ansible.plugins.lookup import LookupBase from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin -from ansible.errors import AnsibleError - try: - from openshift.dynamic import DynamicClient from openshift.dynamic.exceptions import NotFoundError HAS_K8S_MODULE_HELPER = True k8s_import_exception = None @@ -210,12 +209,6 @@ except ImportError as e: HAS_K8S_MODULE_HELPER = False k8s_import_exception = e -try: - import yaml - HAS_YAML = True -except ImportError: - HAS_YAML = False - class KubernetesLookup(K8sAnsibleMixin): @@ -226,11 +219,6 @@ class KubernetesLookup(K8sAnsibleMixin): "Requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception) ) - if not HAS_YAML: - raise Exception( - "Requires PyYAML. Try `pip install PyYAML`" - ) - self.kind = None self.name = None self.namespace = None @@ -253,6 +241,8 @@ class KubernetesLookup(K8sAnsibleMixin): if cluster_info == 'version': return [self.client.version] if cluster_info == 'api_groups': + if isinstance(self.client.resources.api_groups, KeysView): + return [list(self.client.resources.api_groups)] return [self.client.resources.api_groups] self.kind = kwargs.get('kind') diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 7c88f5be..58bb0855 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -18,14 +18,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import copy -import json +from datetime import datetime +import time import os import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.dict_transformations import recursive_diff from ansible.module_utils.six import iteritems, string_types from ansible.module_utils._text import to_native @@ -34,7 +33,7 @@ try: import kubernetes import openshift from openshift.dynamic import DynamicClient - from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError + from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError, NotFoundError HAS_K8S_MODULE_HELPER = True k8s_import_exception = None except ImportError as e: @@ -56,13 +55,14 @@ try: except ImportError: pass +try: + from openshift.dynamic.apply import recursive_diff +except ImportError: + from ansible.module_utils.common.dict_transformations import recursive_diff + def list_dict_str(value): - if isinstance(value, list): - return value - elif isinstance(value, dict): - return value - elif isinstance(value, string_types): + if isinstance(value, (list, dict, string_types)): return value raise TypeError @@ -78,6 +78,9 @@ COMMON_ARG_SPEC = { 'type': 'bool', 'default': False, }, +} + +RESOURCE_ARG_SPEC = { 'resource_definition': { 'type': list_dict_str, 'aliases': ['definition', 'inline'] @@ -85,6 +88,9 @@ COMMON_ARG_SPEC = { 'src': { 'type': 'path', }, +} + +NAME_ARG_SPEC = { 'kind': {}, 'name': {}, 'namespace': {}, @@ -149,20 +155,15 @@ AUTH_ARG_MAP = { class K8sAnsibleMixin(object): - _argspec_cache = None - @property - def argspec(self): - """ - Introspect the model properties, and return an Ansible module arg_spec dict. - :return: dict - """ - if self._argspec_cache: - return self._argspec_cache - argument_spec = copy.deepcopy(COMMON_ARG_SPEC) - argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC)) - self._argspec_cache = argument_spec - return self._argspec_cache + def __init__(self, *args, **kwargs): + if not HAS_K8S_MODULE_HELPER: + self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR, + error=to_native(k8s_import_exception)) + self.openshift_version = openshift.__version__ + + if not HAS_YAML: + self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) def get_api_client(self, **auth_params): auth_params = auth_params or getattr(self, 'params', {}) @@ -186,13 +187,19 @@ class K8sAnsibleMixin(object): # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig pass elif auth_set('kubeconfig') or auth_set('context'): - kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + try: + kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + except Exception as err: + self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) else: # First try to do incluster config, then kubeconfig try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: - kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + try: + kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config')) + except Exception as err: + self.fail(msg='Failed to load kubeconfig due to %s' % to_native(err)) # Override any values in the default configuration with Ansible parameters configuration = kubernetes.client.Configuration() @@ -204,7 +211,10 @@ class K8sAnsibleMixin(object): setattr(configuration, key, value) kubernetes.client.Configuration.set_default(configuration) - return DynamicClient(kubernetes.client.ApiClient(configuration)) + try: + return DynamicClient(kubernetes.client.ApiClient(configuration)) + except Exception as err: + self.fail(msg='Failed to get client due to %s' % to_native(err)) def find_resource(self, kind, api_version, fail=False): for attribute in ['kind', 'name', 'singular_name']: @@ -258,36 +268,165 @@ class K8sAnsibleMixin(object): self.fail(msg="Error loading resource_definition: {0}".format(exc)) return result - @staticmethod - def diff_objects(existing, new): + def diff_objects(self, existing, new): result = dict() diff = recursive_diff(existing, new) - if diff: - result['before'] = diff[0] - result['after'] = diff[1] - return not diff, result + if not diff: + return True, result + result['before'] = diff[0] + result['after'] = diff[1] -class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin): - resource_definition = None - api_version = None - kind = None + # If only metadata.generation and metadata.resourceVersion changed, ignore it + ignored_keys = set(['generation', 'resourceVersion']) - def __init__(self, *args, **kwargs): + if list(result['after'].keys()) != ['metadata'] or list(result['before'].keys()) != ['metadata']: + return False, result - kwargs['argument_spec'] = self.argspec - AnsibleModule.__init__(self, *args, **kwargs) + if not set(result['after']['metadata'].keys()).issubset(ignored_keys): + return False, result + if not set(result['before']['metadata'].keys()).issubset(ignored_keys): + return False, result - if not HAS_K8S_MODULE_HELPER: - self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR, - error=to_native(k8s_import_exception)) - self.openshift_version = openshift.__version__ + if hasattr(self, 'warn'): + self.warn('No meaningful diff was generated, but the API may not be idempotent (only metadata.generation or metadata.resourceVersion were changed)') - if not HAS_YAML: - self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR) - - def execute_module(self): - raise NotImplementedError() + return True, result def fail(self, msg=None): self.fail_json(msg=msg) + + def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): + start = datetime.now() + + def _wait_for_elapsed(): + return (datetime.now() - start).seconds + + response = None + while _wait_for_elapsed() < timeout: + try: + response = resource.get(name=name, namespace=namespace) + if predicate(response): + if response: + return True, response.to_dict(), _wait_for_elapsed() + else: + return True, {}, _wait_for_elapsed() + time.sleep(sleep) + except NotFoundError: + if state == 'absent': + return True, {}, _wait_for_elapsed() + if response: + response = response.to_dict() + return False, response, _wait_for_elapsed() + + def wait(self, resource, definition, sleep, timeout, state='present', condition=None): + + def _deployment_ready(deployment): + # FIXME: frustratingly bool(deployment.status) is True even if status is empty + # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty + # deployment.status.replicas is None is perfectly ok if desired replicas == 0 + # Scaling up means that we also need to check that we're not in a + # situation where status.replicas == status.availableReplicas + # but spec.replicas != status.replicas + return (deployment.status + and deployment.spec.replicas == (deployment.status.replicas or 0) + and deployment.status.availableReplicas == deployment.status.replicas + and deployment.status.observedGeneration == deployment.metadata.generation + and not deployment.status.unavailableReplicas) + + def _pod_ready(pod): + return (pod.status and pod.status.containerStatuses is not None + and all([container.ready for container in pod.status.containerStatuses])) + + def _daemonset_ready(daemonset): + return (daemonset.status and daemonset.status.desiredNumberScheduled is not None + and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled + and daemonset.status.observedGeneration == daemonset.metadata.generation + and not daemonset.status.unavailableReplicas) + + def _custom_condition(resource): + if not resource.status or not resource.status.conditions: + return False + match = [x for x in resource.status.conditions if x.type == condition['type']] + if not match: + return False + # There should never be more than one condition of a specific type + match = match[0] + if match.status == 'Unknown': + if match.status == condition['status']: + if 'reason' not in condition: + return True + if condition['reason']: + return match.reason == condition['reason'] + return False + status = True if match.status == 'True' else False + if status == condition['status']: + if condition.get('reason'): + return match.reason == condition['reason'] + return True + return False + + def _resource_absent(resource): + return not resource + + waiter = dict( + Deployment=_deployment_ready, + DaemonSet=_daemonset_ready, + Pod=_pod_ready + ) + kind = definition['kind'] + if state == 'present' and not condition: + predicate = waiter.get(kind, lambda x: x) + elif state == 'present' and condition: + predicate = _custom_condition + else: + predicate = _resource_absent + return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state) + + def set_resource_definitions(self): + resource_definition = self.params.get('resource_definition') + + self.resource_definitions = [] + + if resource_definition: + if isinstance(resource_definition, string_types): + try: + self.resource_definitions = yaml.safe_load_all(resource_definition) + except (IOError, yaml.YAMLError) as exc: + self.fail(msg="Error loading resource_definition: {0}".format(exc)) + elif isinstance(resource_definition, list): + self.resource_definitions = resource_definition + else: + self.resource_definitions = [resource_definition] + + src = self.params.get('src') + if src: + self.resource_definitions = self.load_resource_definitions(src) + try: + self.resource_definitions = [item for item in self.resource_definitions if item] + except AttributeError: + pass + + if not resource_definition and not src: + implicit_definition = dict( + kind=self.kind, + apiVersion=self.api_version, + metadata=dict(name=self.name) + ) + if self.namespace: + implicit_definition['metadata']['namespace'] = self.namespace + self.resource_definitions = [implicit_definition] + + +class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin): + # NOTE: This class KubernetesAnsibleModule is deprecated in favor of + # class K8sAnsibleMixin and will be removed 2.0.0 release. + # Please use K8sAnsibleMixin instead. + + def __init__(self, *args, **kwargs): + kwargs['argument_spec'] = self.argspec + AnsibleModule.__init__(self, *args, **kwargs) + K8sAnsibleMixin.__init__(self, *args, **kwargs) + + self.warn("class KubernetesAnsibleModule is deprecated" + " and will be removed in 2.0.0. Please use K8sAnsibleMixin instead.") diff --git a/plugins/module_utils/raw.py b/plugins/module_utils/raw.py index 767ab11b..85181600 100644 --- a/plugins/module_utils/raw.py +++ b/plugins/module_utils/raw.py @@ -20,32 +20,24 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import copy -from datetime import datetime from distutils.version import LooseVersion -import time import sys import traceback -from ansible.module_utils.basic import missing_required_lib -from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC -from ansible.module_utils.six import string_types -from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule +from ansible.module_utils.basic import missing_required_lib, AnsibleModule +from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC, K8sAnsibleMixin) try: - import yaml from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing import urllib3 except ImportError: # Exceptions handled in common pass -try: - import kubernetes_validate - HAS_KUBERNETES_VALIDATE = True -except ImportError: - HAS_KUBERNETES_VALIDATE = False K8S_CONFIG_HASH_IMP_ERR = None try: @@ -63,7 +55,7 @@ except ImportError: HAS_K8S_APPLY = False -class KubernetesRawModule(KubernetesAnsibleModule): +class KubernetesRawModule(K8sAnsibleMixin): @property def validate_spec(self): @@ -84,6 +76,8 @@ class KubernetesRawModule(KubernetesAnsibleModule): @property def argspec(self): argument_spec = copy.deepcopy(COMMON_ARG_SPEC) + argument_spec.update(copy.deepcopy(NAME_ARG_SPEC)) + argument_spec.update(copy.deepcopy(RESOURCE_ARG_SPEC)) argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC)) argument_spec['merge_type'] = dict(type='list', elements='str', choices=['json', 'merge', 'strategic-merge']) argument_spec['wait'] = dict(type='bool', default=False) @@ -104,15 +98,25 @@ class KubernetesRawModule(KubernetesAnsibleModule): ('merge_type', 'apply'), ] - KubernetesAnsibleModule.__init__(self, *args, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True, - **kwargs) + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.check_mode = self.module.check_mode + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + + super(KubernetesRawModule, self).__init__() + self.kind = k8s_kind or self.params.get('kind') self.api_version = self.params.get('api_version') self.name = self.params.get('name') self.namespace = self.params.get('namespace') - resource_definition = self.params.get('resource_definition') validate = self.params.get('validate') if validate: if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"): @@ -129,34 +133,7 @@ class KubernetesRawModule(KubernetesAnsibleModule): if self.apply: if not HAS_K8S_APPLY: self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply")) - - if resource_definition: - if isinstance(resource_definition, string_types): - try: - self.resource_definitions = yaml.safe_load_all(resource_definition) - except (IOError, yaml.YAMLError) as exc: - self.fail(msg="Error loading resource_definition: {0}".format(exc)) - elif isinstance(resource_definition, list): - self.resource_definitions = resource_definition - else: - self.resource_definitions = [resource_definition] - src = self.params.get('src') - if src: - self.resource_definitions = self.load_resource_definitions(src) - try: - self.resource_definitions = [item for item in self.resource_definitions if item] - except AttributeError: - pass - - if not resource_definition and not src: - implicit_definition = dict( - kind=self.kind, - apiVersion=self.api_version, - metadata=dict(name=self.name) - ) - if self.namespace: - implicit_definition['metadata']['namespace'] = self.namespace - self.resource_definitions = [implicit_definition] + self.set_resource_definitions() def flatten_list_kind(self, list_resource, definitions): flattened = [] @@ -178,9 +155,11 @@ class KubernetesRawModule(KubernetesAnsibleModule): flattened_definitions = [] for definition in self.resource_definitions: + if definition is None: + continue kind = definition.get('kind', self.kind) api_version = definition.get('apiVersion', self.api_version) - if kind.endswith('List'): + if kind and kind.endswith('List'): resource = self.find_resource(kind, api_version, fail=False) flattened_definitions.extend(self.flatten_list_kind(resource, definition)) else: @@ -274,6 +253,9 @@ class KubernetesRawModule(KubernetesAnsibleModule): except DynamicApiError as exc: self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), error=exc.status, status=exc.status, reason=exc.reason) + except Exception as exc: + self.fail_json(msg='Failed to retrieve requested object: {0}'.format(to_native(exc)), + error='', status='', reason='') if state == 'absent': result['method'] = "delete" @@ -299,7 +281,11 @@ class KubernetesRawModule(KubernetesAnsibleModule): else: if self.apply: if self.check_mode: - ignored, k8s_obj = apply_object(resource, definition) + ignored, patch = apply_object(resource, definition) + if existing: + k8s_obj = dict_merge(existing.to_dict(), patch) + else: + k8s_obj = patch else: try: k8s_obj = resource.apply(definition, namespace=namespace).to_dict() @@ -310,7 +296,7 @@ class KubernetesRawModule(KubernetesAnsibleModule): self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) success = True result['result'] = k8s_obj - if wait: + if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) if existing: existing = existing.to_dict() @@ -369,7 +355,7 @@ class KubernetesRawModule(KubernetesAnsibleModule): match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) success = True result['result'] = k8s_obj - if wait: + if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match @@ -397,7 +383,7 @@ class KubernetesRawModule(KubernetesAnsibleModule): success = True result['result'] = k8s_obj - if wait: + if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match @@ -428,6 +414,12 @@ class KubernetesRawModule(KubernetesAnsibleModule): msg += "\n" + "\n ".join(self.warnings) error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings) return None, error + except Exception as exc: + msg = "Failed to patch object: {0}".format(exc) + if self.warnings: + msg += "\n" + "\n ".join(self.warnings) + error = dict(msg=msg, error=to_native(exc), status='', reason='', warnings=self.warnings) + return None, error def create_project_request(self, definition): definition['kind'] = 'ProjectRequest' @@ -443,83 +435,3 @@ class KubernetesRawModule(KubernetesAnsibleModule): result['changed'] = True result['method'] = 'create' return result - - def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): - start = datetime.now() - - def _wait_for_elapsed(): - return (datetime.now() - start).seconds - - response = None - while _wait_for_elapsed() < timeout: - try: - response = resource.get(name=name, namespace=namespace) - if predicate(response): - if response: - return True, response.to_dict(), _wait_for_elapsed() - else: - return True, {}, _wait_for_elapsed() - time.sleep(sleep) - except NotFoundError: - if state == 'absent': - return True, {}, _wait_for_elapsed() - if response: - response = response.to_dict() - return False, response, _wait_for_elapsed() - - def wait(self, resource, definition, sleep, timeout, state='present', condition=None): - - def _deployment_ready(deployment): - # FIXME: frustratingly bool(deployment.status) is True even if status is empty - # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty - return (deployment.status and deployment.status.replicas is not None and - deployment.status.availableReplicas == deployment.status.replicas and - deployment.status.observedGeneration == deployment.metadata.generation) - - def _pod_ready(pod): - return (pod.status and pod.status.containerStatuses is not None and - all([container.ready for container in pod.status.containerStatuses])) - - def _daemonset_ready(daemonset): - return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and - daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and - daemonset.status.observedGeneration == daemonset.metadata.generation) - - def _custom_condition(resource): - if not resource.status or not resource.status.conditions: - return False - match = [x for x in resource.status.conditions if x.type == condition['type']] - if not match: - return False - # There should never be more than one condition of a specific type - match = match[0] - if match.status == 'Unknown': - if match.status == condition['status']: - if 'reason' not in condition: - return True - if condition['reason']: - return match.reason == condition['reason'] - return False - status = True if match.status == 'True' else False - if status == condition['status']: - if condition.get('reason'): - return match.reason == condition['reason'] - return True - return False - - def _resource_absent(resource): - return not resource - - waiter = dict( - Deployment=_deployment_ready, - DaemonSet=_daemonset_ready, - Pod=_pod_ready - ) - kind = definition['kind'] - if state == 'present' and not condition: - predicate = waiter.get(kind, lambda x: x) - elif state == 'present' and condition: - predicate = _custom_condition - else: - predicate = _resource_absent - return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state) diff --git a/plugins/module_utils/scale.py b/plugins/module_utils/scale.py index 4b798bb2..55bab010 100644 --- a/plugins/module_utils/scale.py +++ b/plugins/module_utils/scale.py @@ -20,21 +20,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import copy -import math -import time -from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC -from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule -from ansible.module_utils.six import string_types +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC, K8sAnsibleMixin) try: - import yaml - from openshift import watch - from openshift.dynamic.client import ResourceInstance - from openshift.helper.exceptions import KubernetesException -except ImportError as exc: - class KubernetesException(Exception): - pass + from openshift.dynamic.exceptions import NotFoundError +except ImportError: + pass SCALE_ARG_SPEC = { @@ -46,7 +40,7 @@ SCALE_ARG_SPEC = { } -class KubernetesAnsibleScaleModule(KubernetesAnsibleModule): +class KubernetesAnsibleScaleModule(K8sAnsibleMixin): def __init__(self, k8s_kind=None, *args, **kwargs): self.client = None @@ -56,39 +50,25 @@ class KubernetesAnsibleScaleModule(KubernetesAnsibleModule): ('resource_definition', 'src'), ] - KubernetesAnsibleModule.__init__(self, *args, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True, - **kwargs) + module = AnsibleModule( + argument_spec=self.argspec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) + + self.module = module + self.params = self.module.params + self.check_mode = self.module.check_mode + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesAnsibleScaleModule, self).__init__() + self.kind = k8s_kind or self.params.get('kind') self.api_version = self.params.get('api_version') self.name = self.params.get('name') self.namespace = self.params.get('namespace') - resource_definition = self.params.get('resource_definition') - - if resource_definition: - if isinstance(resource_definition, string_types): - try: - self.resource_definitions = yaml.safe_load_all(resource_definition) - except (IOError, yaml.YAMLError) as exc: - self.fail(msg="Error loading resource_definition: {0}".format(exc)) - elif isinstance(resource_definition, list): - self.resource_definitions = resource_definition - else: - self.resource_definitions = [resource_definition] - src = self.params.get('src') - if src: - self.resource_definitions = self.load_resource_definitions(src) - - if not resource_definition and not src: - implicit_definition = dict( - kind=self.kind, - apiVersion=self.api_version, - metadata=dict(name=self.name) - ) - if self.namespace: - implicit_definition['metadata']['namespace'] = self.namespace - self.resource_definitions = [implicit_definition] + self.set_resource_definitions() def execute_module(self): definition = self.resource_definitions[0] @@ -107,14 +87,16 @@ class KubernetesAnsibleScaleModule(KubernetesAnsibleModule): wait_time = self.params.get('wait_timeout') existing = None existing_count = None - return_attributes = dict(changed=False, result=dict()) + return_attributes = dict(changed=False, result=dict(), diff=dict()) + if wait: + return_attributes['duration'] = 0 resource = self.find_resource(kind, api_version, fail=True) try: existing = resource.get(name=name, namespace=namespace) return_attributes['result'] = existing.to_dict() - except KubernetesException as exc: + except NotFoundError as exc: self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc), error=exc.value.get('status')) @@ -137,108 +119,48 @@ class KubernetesAnsibleScaleModule(KubernetesAnsibleModule): if not self.check_mode: if self.kind == 'job': existing.spec.parallelism = replicas - k8s_obj = resource.patch(existing.to_dict()) + return_attributes['result'] = resource.patch(existing.to_dict()).to_dict() else: - k8s_obj = self.scale(resource, existing, replicas, wait, wait_time) - return_attributes['result'] = k8s_obj.to_dict() + return_attributes = self.scale(resource, existing, replicas, wait, wait_time) self.exit_json(**return_attributes) @property def argspec(self): - args = copy.deepcopy(COMMON_ARG_SPEC) - args.pop('state') - args.pop('force') + args = copy.deepcopy(SCALE_ARG_SPEC) + args.update(RESOURCE_ARG_SPEC) + args.update(NAME_ARG_SPEC) args.update(AUTH_ARG_SPEC) - args.update(SCALE_ARG_SPEC) return args def scale(self, resource, existing_object, replicas, wait, wait_time): name = existing_object.metadata.name namespace = existing_object.metadata.namespace + kind = existing_object.kind if not hasattr(resource, 'scale'): self.fail_json( msg="Cannot perform scale on resource of kind {0}".format(resource.kind) ) - scale_obj = {'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}} + scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}} - return_obj = None - stream = None - - if wait: - w, stream = self._create_stream(resource, namespace, wait_time) + existing = resource.get(name=name, namespace=namespace) try: resource.scale.patch(body=scale_obj) except Exception as exc: - self.fail_json( - msg="Scale request failed: {0}".format(exc) - ) + self.fail_json(msg="Scale request failed: {0}".format(exc)) - if wait and stream is not None: - return_obj = self._read_stream(resource, w, stream, name, replicas) + k8s_obj = resource.get(name=name, namespace=namespace).to_dict() + match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) + result = dict() + result['result'] = k8s_obj + result['changed'] = not match + result['diff'] = diffs - if not return_obj: - return_obj = self._wait_for_response(resource, name, namespace) - - return return_obj - - def _create_stream(self, resource, namespace, wait_time): - """ Create a stream of events for the object """ - w = None - stream = None - try: - w = watch.Watch() - w._api_client = self.client.client - if namespace: - stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time) - else: - stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time) - except KubernetesException: - pass - return w, stream - - def _read_stream(self, resource, watcher, stream, name, replicas): - """ Wait for ready_replicas to equal the requested number of replicas. """ - return_obj = None - try: - for event in stream: - if event.get('object'): - obj = ResourceInstance(resource, event['object']) - if obj.metadata.name == name and hasattr(obj, 'status'): - if replicas == 0: - if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas: - return_obj = obj - watcher.stop() - break - if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas: - return_obj = obj - watcher.stop() - break - except Exception as exc: - self.fail_json(msg="Exception reading event stream: {0}".format(exc)) - - if not return_obj: - self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.") - if replicas and return_obj.status.readyReplicas is None: - self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.") - if replicas and return_obj.status.readyReplicas != replicas: - self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within " - "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas)) - return return_obj - - def _wait_for_response(self, resource, name, namespace): - """ Wait for an API response """ - tries = 0 - half = math.ceil(20 / 2) - obj = None - - while tries <= half: - obj = resource.get(name=name, namespace=namespace) - if obj: - break - tries += 2 - time.sleep(2) - return obj + if wait: + success, result['result'], result['duration'] = self.wait(resource, scale_obj, 5, wait_time) + if not success: + self.fail_json(msg="Resource scaling timed out", **result) + return result diff --git a/plugins/modules/helm.py b/plugins/modules/helm.py index 2cdd9672..c6870efa 100644 --- a/plugins/modules/helm.py +++ b/plugins/modules/helm.py @@ -6,16 +6,15 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: helm short_description: Manages Kubernetes packages with the Helm package manager +version_added: "0.11.0" + author: - Lucas Boisserie (@LucasBoisserie) - Matthieu Diehr (@d-matt) @@ -28,11 +27,6 @@ description: - Install, upgrade, delete packages with the Helm package manager. options: - binary_path: - description: - - The path of a helm binary to use. - required: false - type: path chart_ref: description: - chart_reference on chart repository. @@ -96,15 +90,6 @@ options: - Helm option to force reinstall, ignore on new install. default: False type: bool - kube_context: - description: - - Helm option to specify which kubeconfig context to use. - type: str - kubeconfig_path: - description: - - Helm option to specify kubeconfig path to use. - type: path - aliases: [ kubeconfig ] purge: description: - Remove the release from the store and make its name free for later use. @@ -119,24 +104,44 @@ options: description: - Timeout when wait option is enabled (helm2 is a number of seconds, helm3 is a duration). type: str + atomic: + description: + - If set, the installation process deletes the installation on failure. + type: bool + default: False + create_namespace: + description: + - Create the release namespace if not present. + type: bool + default: False + version_added: "0.11.1" + replace: + description: + - Reuse the given name, only if that name is a deleted release which remains in the history. + - This is unsafe in production environment. + type: bool + default: False + version_added: "1.11.0" +extends_documentation_fragment: + - community.kubernetes.helm_common_options ''' -EXAMPLES = ''' -- name: Create helm namespace as HELM 3 doesn't create it automatically - k8s: - api_version: v1 - kind: Namespace - name: "monitoring" - wait: true +EXAMPLES = r''' +- name: Deploy latest version of Prometheus chart inside monitoring namespace (and create it) + community.kubernetes.helm: + name: test + chart_ref: stable/prometheus + release_namespace: monitoring + create_namespace: true # From repository - name: Add stable chart repo - helm_repository: + community.kubernetes.helm_repository: name: stable repo_url: "https://kubernetes-charts.storage.googleapis.com" - name: Deploy latest version of Grafana chart inside monitoring namespace with values - helm: + community.kubernetes.helm: name: test chart_ref: stable/grafana release_namespace: monitoring @@ -144,39 +149,39 @@ EXAMPLES = ''' replicas: 2 - name: Deploy Grafana chart on 5.0.12 with values loaded from template - helm: + community.kubernetes.helm: name: test chart_ref: stable/grafana chart_version: 5.0.12 values: "{{ lookup('template', 'somefile.yaml') | from_yaml }}" - name: Remove test release and waiting suppression ending - helm: + community.kubernetes.helm: name: test state: absent wait: true # From git - name: Git clone stable repo on HEAD - git: + ansible.builtin.git: repo: "http://github.com/helm/charts.git" dest: /tmp/helm_repo - name: Deploy Grafana chart from local path - helm: + community.kubernetes.helm: name: test chart_ref: /tmp/helm_repo/stable/grafana release_namespace: monitoring # From url - name: Deploy Grafana chart on 5.0.12 from url - helm: + community.kubernetes.helm: name: test chart_ref: "https://kubernetes-charts.storage.googleapis.com/grafana-5.0.12.tgz" release_namespace: monitoring ''' -RETURN = """ +RETURN = r""" status: type: complex description: A dictionary of status output @@ -231,6 +236,7 @@ command: sample: helm upgrade ... """ +import tempfile import traceback try: @@ -240,32 +246,42 @@ except ImportError: IMP_YAML_ERR = traceback.format_exc() IMP_YAML = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback module = None -# Get Values from deployed release -def get_values(command, release_name): - get_command = command + " get values --output=yaml " + release_name - - rc, out, err = module.run_command(get_command) - +def exec_command(command): + rc, out, err = module.run_command(command) if rc != 0: module.fail_json( msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), - command=get_command + stdout=out, + stderr=err, + command=command, ) + return rc, out, err + +def get_values(command, release_name): + """ + Get Values from deployed release + """ + + get_command = command + " get values --output=yaml " + release_name + + rc, out, err = exec_command(get_command) # Helm 3 return "null" string when no values are set if out.rstrip("\n") == "null": return {} - else: - return yaml.safe_load(out) + return yaml.safe_load(out) -# Get Release from all deployed releases def get_release(state, release_name): + """ + Get Release from all deployed releases + """ + if state is not None: for release in state: if release['name'] == release_name: @@ -273,17 +289,14 @@ def get_release(state, release_name): return None -# Get Release state from deployed release def get_release_status(command, release_name): + """ + Get Release state from deployed release + """ + list_command = command + " list --output=yaml --filter " + release_name - rc, out, err = module.run_command(list_command) - - if rc != 0: - module.fail_json( - msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), - command=list_command - ) + rc, out, err = exec_command(list_command) release = get_release(yaml.safe_load(out), release_name) @@ -295,56 +308,61 @@ def get_release_status(command, release_name): return release -# Run Repo update def run_repo_update(command): + """ + Run Repo update + """ repo_update_command = command + " repo update" - - rc, out, err = module.run_command(repo_update_command) - if rc != 0: - module.fail_json( - msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), - command=repo_update_command - ) + rc, out, err = exec_command(repo_update_command) -# Get chart info def fetch_chart_info(command, chart_ref): + """ + Get chart info + """ inspect_command = command + " show chart " + chart_ref - rc, out, err = module.run_command(inspect_command) - if rc != 0: - module.fail_json( - msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), - command=inspect_command - ) + rc, out, err = exec_command(inspect_command) return yaml.safe_load(out) -# Install/upgrade/rollback release chart -def deploy(command, release_name, release_values, chart_name, wait, wait_timeout, disable_hook, force): - deploy_command = command + " upgrade -i" # install/upgrade +def deploy(command, release_name, release_values, chart_name, wait, + wait_timeout, disable_hook, force, atomic=False, create_namespace=False, + replace=False): + """ + Install/upgrade/rollback release chart + """ + if replace: + # '--replace' is not supported by 'upgrade -i' + deploy_command = command + " install" + else: + deploy_command = command + " upgrade -i" # install/upgrade - # Always reset values to keep release_values equal to values released - deploy_command += " --reset-values" + # Always reset values to keep release_values equal to values released + deploy_command += " --reset-values" if wait: deploy_command += " --wait" if wait_timeout is not None: deploy_command += " --timeout " + wait_timeout + if atomic: + deploy_command += " --atomic" + if force: deploy_command += " --force" + if replace: + deploy_command += " --replace" + if disable_hook: deploy_command += " --no-hooks" - if release_values != {}: - try: - import tempfile - except ImportError: - module.fail_json(msg=missing_required_lib("tempfile"), exception=traceback.format_exc()) + if create_namespace: + deploy_command += " --create-namespace" + if release_values != {}: fd, path = tempfile.mkstemp(suffix='.yml') with open(path, 'w') as yaml_file: yaml.dump(release_values, yaml_file, default_flow_style=False) @@ -355,8 +373,11 @@ def deploy(command, release_name, release_values, chart_name, wait, wait_timeout return deploy_command -# Delete release chart def delete(command, release_name, purge, disable_hook): + """ + Delete release chart + """ + delete_command = command + " uninstall " if not purge: @@ -387,11 +408,14 @@ def main(): # Helm options disable_hook=dict(type='bool', default=False), force=dict(type='bool', default=False), - kube_context=dict(type='str'), - kubeconfig_path=dict(type='path', aliases=['kubeconfig']), + kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), purge=dict(type='bool', default=True), wait=dict(type='bool', default=False), wait_timeout=dict(type='str'), + atomic=dict(type='bool', default=False), + create_namespace=dict(type='bool', default=False), + replace=dict(type='bool', default=False), ), required_if=[ ('release_state', 'present', ['release_name', 'chart_ref']), @@ -423,6 +447,9 @@ def main(): purge = module.params.get('purge') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') + atomic = module.params.get('atomic') + create_namespace = module.params.get('create_namespace') + replace = module.params.get('replace') if bin_path is not None: helm_cmd_common = bin_path @@ -446,6 +473,9 @@ def main(): # keep helm_cmd_common for get_release_status in module_exit_json helm_cmd = helm_cmd_common if release_state == "absent" and release_status is not None: + if replace: + module.fail_json(msg="replace is not applicable when state is absent") + helm_cmd = delete(helm_cmd, release_name, purge, disable_hook) changed = True elif release_state == "present": @@ -461,30 +491,42 @@ def main(): if release_status is None: # Not installed helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, - disable_hook, False) + disable_hook, False, atomic=atomic, create_namespace=create_namespace, + replace=replace) changed = True elif force or release_values != release_status['values'] \ or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"]: helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, - disable_hook, force) + disable_hook, force, atomic=atomic, create_namespace=create_namespace, + replace=replace) changed = True if module.check_mode: - module.exit_json(changed=changed) + module.exit_json( + changed=changed, + command=helm_cmd, + stdout='', + stderr='', + ) elif not changed: - module.exit_json(changed=False, status=release_status) - - rc, out, err = module.run_command(helm_cmd) - - if rc != 0: - module.fail_json( - msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err), - command=helm_cmd + module.exit_json( + changed=False, + status=release_status, + stdout='', + stderr='', + command=helm_cmd, ) - module.exit_json(changed=changed, stdout=out, stderr=err, - status=get_release_status(helm_cmd_common, release_name), command=helm_cmd) + rc, out, err = exec_command(helm_cmd) + + module.exit_json( + changed=changed, + stdout=out, + stderr=err, + status=get_release_status(helm_cmd_common, release_name), + command=helm_cmd, + ) if __name__ == '__main__': diff --git a/plugins/modules/helm_info.py b/plugins/modules/helm_info.py index 5d773c21..03ebdde3 100644 --- a/plugins/modules/helm_info.py +++ b/plugins/modules/helm_info.py @@ -1,20 +1,19 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: Ansible Project +# Copyright: (c) 2020, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: helm_info -short_description: Get informations from Helm package deployed inside the cluster +short_description: Get information from Helm package deployed inside the cluster + +version_added: "0.11.0" author: - Lucas Boisserie (@LucasBoisserie) @@ -24,14 +23,9 @@ requirements: - "yaml (https://pypi.org/project/PyYAML/)" description: - - Get informations (values, states, ...) from Helm package deployed inside the cluster + - Get information (values, states, ...) from Helm package deployed inside the cluster. options: - binary_path: - description: - - The path of a helm binary to use. - required: false - type: path release_name: description: - Release name to manage. @@ -44,27 +38,18 @@ options: required: true type: str aliases: [ namespace ] - -#Helm options - kube_context: - description: - - Helm option to specify which kubeconfig context to use. - type: str - kubeconfig_path: - description: - - Helm option to specify kubeconfig path to use. - type: path - aliases: [ kubeconfig ] +extends_documentation_fragment: + - community.kubernetes.helm_common_options ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Deploy latest version of Grafana chart inside monitoring namespace - helm_info: + community.kubernetes.helm_info: name: test release_namespace: monitoring ''' -RETURN = """ +RETURN = r''' status: type: complex description: A dictionary of status output @@ -102,7 +87,7 @@ status: type: str returned: always description: Dict of Values used to deploy -""" +''' import traceback @@ -113,7 +98,7 @@ except ImportError: IMP_YAML_ERR = traceback.format_exc() IMP_YAML = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback module = None @@ -178,8 +163,8 @@ def main(): release_namespace=dict(type='str', required=True, aliases=['namespace']), # Helm options - kube_context=dict(type='str'), - kubeconfig_path=dict(type='path', aliases=['kubeconfig']), + kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), ), supports_check_mode=True, ) @@ -212,8 +197,8 @@ def main(): if release_status is not None: module.exit_json(changed=False, status=release_status) - else: - module.exit_json(changed=False) + + module.exit_json(changed=False) if __name__ == '__main__': diff --git a/plugins/modules/helm_plugin.py b/plugins/modules/helm_plugin.py new file mode 100644 index 00000000..a212394f --- /dev/null +++ b/plugins/modules/helm_plugin.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_plugin +short_description: Manage Helm plugins +version_added: "1.0.0" +author: + - Abhijeet Kasurde (@Akasurde) +requirements: + - "helm (https://github.com/helm/helm/releases)" +description: + - Install, uninstall Helm plugins. +options: + release_namespace: + description: + - Kubernetes namespace where the helm plugin should be installed. + required: true + type: str + aliases: [ namespace ] + +#Helm options + state: + description: + - If C(state=present), Helm plugin will be installed. + - If C(state=absent), Helm plugin will be uninstalled. + choices: [ absent, present ] + default: present + type: str + plugin_name: + description: + - Name of Helm plugin. + - Required only if C(state=absent). + type: str + plugin_path: + description: + - Plugin path to a plugin on your local file system or a url of a remote VCS repo. + - If plugin path from file system is provided, make sure that tar is present on remote + machine and not on Ansible controller. + - Required only if C(state=present). + type: str +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Install Helm env plugin + community.kubernetes.helm_plugin: + plugin_path: https://github.com/adamreese/helm-env + state: present + +- name: Install Helm plugin from local filesystem + community.kubernetes.helm_plugin: + plugin_path: https://domain/path/to/plugin.tar.gz + state: present + +- name: Uninstall Helm env plugin + community.kubernetes.helm_plugin: + plugin_name: env + state: absent +''' + +RETURN = r''' +stdout: + type: str + description: Full `helm` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +stderr: + type: str + description: Full `helm` command stderr, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: helm plugin list ... +msg: + type: str + description: Info about successful command + returned: always + sample: "Plugin installed successfully" +rc: + type: int + description: Helm plugin command return code + returned: always + sample: 1 +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + state=dict(type='str', default='present', choices=['present', 'absent']), + plugin_path=dict(type='str',), + plugin_name=dict(type='str',), + # Helm options + context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + ), + supports_check_mode=True, + required_if=[ + ("state", "present", ("plugin_path",)), + ("state", "absent", ("plugin_name",)), + ], + mutually_exclusive=[ + ['plugin_name', 'plugin_path'], + ], + ) + + bin_path = module.params.get('binary_path') + release_namespace = module.params.get('release_namespace') + state = module.params.get('state') + + # Helm options + kube_context = module.params.get('context') + kubeconfig_path = module.params.get('kubeconfig') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = 'helm' + + helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True) + + helm_cmd_common += " plugin" + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + helm_cmd_common += " --namespace=" + release_namespace + + if state == 'present': + helm_cmd_common += " install %s" % module.params.get('plugin_path') + if not module.check_mode: + rc, out, err = module.run_command(helm_cmd_common) + else: + rc, out, err = (0, '', '') + + if rc == 1 and 'plugin already exists' in err: + module.exit_json( + failed=False, + changed=False, + msg="Plugin already exists", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc + ) + elif rc == 0: + module.exit_json( + failed=False, + changed=True, + msg="Plugin installed successfully", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc, + ) + else: + module.fail_json( + msg="Failure when executing Helm command.", + command=helm_cmd_common, + stdout=out, + stderr=err, + rc=rc, + ) + elif state == 'absent': + plugin_name = module.params.get('plugin_name') + helm_plugin_list = helm_cmd_common + " list" + rc, out, err = module.run_command(helm_plugin_list) + if rc != 0 or (out == '' and err == ''): + module.fail_json( + msg="Failed to get Helm plugin info", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + ) + + if out: + found = False + for line in out.splitlines(): + if line.startswith("NAME"): + continue + name, dummy, dummy = line.split('\t', 3) + name = name.strip() + if name == plugin_name: + found = True + break + if found: + helm_uninstall_cmd = "%s uninstall %s" % (helm_cmd_common, plugin_name) + if not module.check_mode: + rc, out, err = module.run_command(helm_uninstall_cmd) + else: + rc, out, err = (0, '', '') + + if rc == 0: + module.exit_json( + changed=True, + msg="Plugin uninstalled successfully", + command=helm_uninstall_cmd, + stdout=out, + stderr=err, + rc=rc + ) + module.fail_json( + msg="Failed to get Helm plugin uninstall", + command=helm_uninstall_cmd, + stdout=out, + stderr=err, + rc=rc, + ) + else: + module.exit_json( + failed=False, + changed=False, + msg="Plugin not found or is already uninstalled", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/helm_plugin_info.py b/plugins/modules/helm_plugin_info.py new file mode 100644 index 00000000..951f1ed9 --- /dev/null +++ b/plugins/modules/helm_plugin_info.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: helm_plugin_info +short_description: Gather information about Helm plugins +version_added: "1.0.0" +author: + - Abhijeet Kasurde (@Akasurde) +requirements: + - "helm (https://github.com/helm/helm/releases)" +description: + - Gather information about Helm plugins installed in namespace. +options: + release_namespace: + description: + - Kubernetes namespace where the helm plugins are installed. + required: true + type: str + aliases: [ namespace ] + +#Helm options + plugin_name: + description: + - Name of Helm plugin, to gather particular plugin info. + type: str +extends_documentation_fragment: + - community.kubernetes.helm_common_options +''' + +EXAMPLES = r''' +- name: Gather Helm plugin info + community.kubernetes.helm_plugin_info: + +- name: Gather Helm plugin info + community.kubernetes.helm_plugin_info: + plugin_name: env +''' + +RETURN = r''' +stdout: + type: str + description: Full `helm` command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +stderr: + type: str + description: Full `helm` command stderr, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full `helm` command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: helm plugin list ... +plugin_list: + type: list + description: Helm plugin dict inside a list + returned: always + sample: { + "name": "env", + "version": "0.1.0", + "description": "Print out the helm environment." + } +rc: + type: int + description: Helm plugin command return code + returned: always + sample: 1 +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + binary_path=dict(type='path'), + release_namespace=dict(type='str', required=True, aliases=['namespace']), + plugin_name=dict(type='str',), + # Helm options + context=dict(type='str', aliases=['kube_context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), + kubeconfig=dict(type='path', aliases=['kubeconfig_path'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), + ), + supports_check_mode=True, + ) + + bin_path = module.params.get('binary_path') + release_namespace = module.params.get('release_namespace') + + # Helm options + kube_context = module.params.get('context') + kubeconfig_path = module.params.get('kubeconfig') + + if bin_path is not None: + helm_cmd_common = bin_path + else: + helm_cmd_common = 'helm' + + helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True) + + helm_cmd_common += " plugin" + + if kube_context is not None: + helm_cmd_common += " --kube-context " + kube_context + + if kubeconfig_path is not None: + helm_cmd_common += " --kubeconfig " + kubeconfig_path + + helm_cmd_common += " --namespace=" + release_namespace + + plugin_name = module.params.get('plugin_name') + helm_plugin_list = helm_cmd_common + " list" + rc, out, err = module.run_command(helm_plugin_list) + if rc != 0 or (out == '' and err == ''): + module.fail_json( + msg="Failed to get Helm plugin info", + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + ) + + plugin_list = [] + if out: + for line in out.splitlines(): + if line.startswith("NAME"): + continue + name, version, description = line.split('\t', 3) + name = name.strip() + version = version.strip() + description = description.strip() + if plugin_name is None: + plugin_list.append({ + 'name': name, + 'version': version, + 'description': description, + }) + continue + + if plugin_name == name: + plugin_list.append({ + 'name': name, + 'version': version, + 'description': description, + }) + break + + module.exit_json( + changed=True, + command=helm_plugin_list, + stdout=out, + stderr=err, + rc=rc, + plugin_list=plugin_list, + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/helm_repository.py b/plugins/modules/helm_repository.py index 908c0418..cc0cadbb 100644 --- a/plugins/modules/helm_repository.py +++ b/plugins/modules/helm_repository.py @@ -1,21 +1,20 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: Ansible Project +# Copyright: (c) 2020, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: helm_repository short_description: Add and remove Helm repository +version_added: "0.11.0" + author: - Lucas Boisserie (@LucasBoisserie) @@ -24,7 +23,7 @@ requirements: - "yaml (https://pypi.org/project/PyYAML/)" description: - - Manage Helm repositories + - Manage Helm repositories. options: binary_path: @@ -60,21 +59,21 @@ options: repo_state: choices: ['present', 'absent'] description: - - Desirated state of repositoriy. + - Desirated state of repository. required: false default: present aliases: [ state ] type: str ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Add default repository - helm_repository: + community.kubernetes.helm_repository: name: stable repo_url: https://kubernetes-charts.storage.googleapis.com ''' -RETURN = ''' # ''' +RETURN = r''' # ''' import traceback diff --git a/plugins/modules/k8s.py b/plugins/modules/k8s.py index 13cb985a..23cf2f45 100644 --- a/plugins/modules/k8s.py +++ b/plugins/modules/k8s.py @@ -9,11 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s @@ -28,7 +24,7 @@ description: - Pass the object definition from a source file or inline. See examples for reading files and using Jinja templates or vault-encrypted files. - Access to the full range of K8s APIs. - - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind) + - Use the M(community.kubernetes.k8s_info) module to obtain a list of items about an object of type C(kind) - Authenticate using either a config file, certificates, password or token. - Supports check mode. @@ -115,7 +111,7 @@ options: validate: description: - how (if at all) to validate the resource definition against the kubernetes schema. - Requires the kubernetes-validate python module + Requires the kubernetes-validate python module and openshift >= 0.8.0 suboptions: fail_on_error: description: whether to fail on validation errors. @@ -136,12 +132,14 @@ options: - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including the generated hash and append_hash=no) + - Requires openshift >= 0.7.2 type: bool apply: description: - C(apply) compares the desired resource definition with the previously supplied resource definition, ignoring properties that are automatically generated - C(apply) works better with Services than 'force=yes' + - Requires openshift >= 0.9.2 - mutually exclusive with C(merge_type) type: bool @@ -151,16 +149,16 @@ requirements: - "PyYAML >= 3.11" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Create a k8s namespace - k8s: + community.kubernetes.k8s: name: testing api_version: v1 kind: Namespace state: present - name: Create a Service object from an inline definition - k8s: + community.kubernetes.k8s: state: present definition: apiVersion: v1 @@ -182,7 +180,7 @@ EXAMPLES = ''' port: 8000 - name: Remove an existing Service object - k8s: + community.kubernetes.k8s: state: absent api_version: v1 kind: Service @@ -192,31 +190,31 @@ EXAMPLES = ''' # Passing the object definition from a file - name: Create a Deployment by reading the definition from a local file - k8s: + community.kubernetes.k8s: state: present src: /testing/deployment.yml - name: >- Read definition file from the Ansible controller file system. If the definition file has been encrypted with Ansible Vault it will automatically be decrypted. - k8s: + community.kubernetes.k8s: state: present definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}" - name: Read definition file from the Ansible controller file system after Jinja templating - k8s: + community.kubernetes.k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" - name: fail on validation errors - k8s: + community.kubernetes.k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: fail_on_error: yes - name: warn on validation errors, check for unexpected properties - k8s: + community.kubernetes.k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}" validate: @@ -224,7 +222,7 @@ EXAMPLES = ''' strict: yes ''' -RETURN = ''' +RETURN = r''' result: description: - The created, patched, or otherwise present object. Will be empty in the case of a deletion. diff --git a/plugins/modules/k8s_auth.py b/plugins/modules/k8s_auth.py index 4184ff40..3af297ba 100644 --- a/plugins/modules/k8s_auth.py +++ b/plugins/modules/k8s_auth.py @@ -9,11 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_auth @@ -79,7 +75,7 @@ requirements: - requests-oauthlib ''' -EXAMPLES = ''' +EXAMPLES = r''' - hosts: localhost module_defaults: group/k8s: @@ -92,7 +88,7 @@ EXAMPLES = ''' - include_vars: k8s_passwords.yml - name: Log in (obtain access token) - k8s_auth: + community.kubernetes.k8s_auth: username: admin password: "{{ k8s_admin_password }}" register: k8s_auth_results @@ -100,7 +96,7 @@ EXAMPLES = ''' # Previous task provides the token/api_key, while all other parameters # are taken from module_defaults - name: Get a list of all pods from any namespace - k8s_info: + community.kubernetes.k8s_info: api_key: "{{ k8s_auth_results.k8s_auth.api_key }}" kind: Pod register: pod_list @@ -108,7 +104,7 @@ EXAMPLES = ''' always: - name: If login succeeded, try to log out (revoke access token) when: k8s_auth_results.k8s_auth.api_key is defined - k8s_auth: + community.kubernetes.k8s_auth: state: absent api_key: "{{ k8s_auth_results.k8s_auth.api_key }}" ''' @@ -116,7 +112,7 @@ EXAMPLES = ''' # Returned value names need to match k8s modules parameter names, to make it # easy to pass returned values of k8s_auth to other k8s modules. # Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899 -RETURN = ''' +RETURN = r''' k8s_auth: description: Kubernetes authentication facts. returned: success @@ -255,7 +251,7 @@ class KubernetesAuthModule(AnsibleModule): self.openshift_auth_endpoint = oauth_info['authorization_endpoint'] self.openshift_token_endpoint = oauth_info['token_endpoint'] - except Exception as e: + except Exception: self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.", exception=traceback.format_exc()) @@ -315,7 +311,7 @@ class KubernetesAuthModule(AnsibleModule): "kind": "DeleteOptions" } - ret = requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca) + requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca) # Ignore errors, the token will time out eventually anyway def fail(self, msg=None): diff --git a/plugins/modules/k8s_exec.py b/plugins/modules/k8s_exec.py index 649b58ef..e540b9b6 100644 --- a/plugins/modules/k8s_exec.py +++ b/plugins/modules/k8s_exec.py @@ -9,16 +9,14 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_exec short_description: Execute command in Pod +version_added: "0.10.0" + author: "Tristan de Cacqueray (@tristanC)" description: @@ -32,10 +30,13 @@ requirements: - "openshift == 0.4.3" - "PyYAML >= 3.11" +notes: +- Return code C(return_code) for the command executed is added in output in version 1.0.0. options: proxy: description: - - The URL of an HTTP proxy to use for the connection. Can also be specified via K8S_AUTH_PROXY environment variable. + - The URL of an HTTP proxy to use for the connection. + - Can also be specified via I(K8S_AUTH_PROXY) environment variable. - Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY). type: str namespace: @@ -50,7 +51,8 @@ options: required: yes container: description: - - The name of the container in the pod to connect to. Defaults to only container if there is only one container in the pod. + - The name of the container in the pod to connect to. + - Defaults to only container if there is only one container in the pod. type: str required: no command: @@ -60,15 +62,28 @@ options: required: yes ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Execute a command - k8s_exec: + community.kubernetes.k8s_exec: namespace: myproject pod: zuul-scheduler command: zuul-scheduler full-reconfigure + +- name: Check RC status of command executed + community.kubernetes.k8s_exec: + namespace: myproject + pod: busybox-test + command: cmd_with_non_zero_exit_code + register: command_status + ignore_errors: True + +- name: Check last command status + debug: + msg: "cmd failed" + when: command_status.return_code != 0 ''' -RETURN = ''' +RETURN = r''' result: description: - The command object @@ -87,12 +102,25 @@ result: stderr_lines: description: The command stderr type: str + return_code: + description: The command status code + type: int ''' import copy import shlex -from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule -from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC + +try: + import yaml +except ImportError: + # ImportError are managed by the common module already. + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC +) try: from kubernetes.client.apis import core_v1_api @@ -102,7 +130,18 @@ except ImportError: pass -class KubernetesExecCommand(KubernetesAnsibleModule): +class KubernetesExecCommand(K8sAnsibleMixin): + + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + super(KubernetesExecCommand, self).__init__() + @property def argspec(self): spec = copy.deepcopy(AUTH_ARG_SPEC) @@ -112,36 +151,54 @@ class KubernetesExecCommand(KubernetesAnsibleModule): spec['command'] = dict(type='str', required=True) return spec + def execute_module(self): + # Load kubernetes.client.Configuration + self.get_api_client() + api = core_v1_api.CoreV1Api() + + # hack because passing the container as None breaks things + optional_kwargs = {} + if self.params.get('container'): + optional_kwargs['container'] = self.params['container'] + try: + resp = stream( + api.connect_get_namespaced_pod_exec, + self.params["pod"], + self.params["namespace"], + command=shlex.split(self.params["command"]), + stdout=True, + stderr=True, + stdin=False, + tty=False, + _preload_content=False, **optional_kwargs) + except Exception as e: + self.module.fail_json(msg="Failed to execute on pod %s" + " due to : %s" % (self.params.get('pod'), to_native(e))) + stdout, stderr, rc = [], [], 0 + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + stdout.append(resp.read_stdout()) + if resp.peek_stderr(): + stderr.append(resp.read_stderr()) + err = resp.read_channel(3) + err = yaml.safe_load(err) + if err['status'] == 'Success': + rc = 0 + else: + rc = int(err['details']['causes'][0]['message']) + + self.module.exit_json( + # Some command might change environment, but ultimately failing at end + changed=True, + stdout="".join(stdout), + stderr="".join(stderr), + return_code=rc + ) + def main(): - module = KubernetesExecCommand() - # Load kubernetes.client.Configuration - module.get_api_client() - api = core_v1_api.CoreV1Api() - - # hack because passing the container as None breaks things - optional_kwargs = {} - if module.params.get('container'): - optional_kwargs['container'] = module.params['container'] - resp = stream( - api.connect_get_namespaced_pod_exec, - module.params["pod"], - module.params["namespace"], - command=shlex.split(module.params["command"]), - stdout=True, - stderr=True, - stdin=False, - tty=False, - _preload_content=False, **optional_kwargs) - stdout, stderr = [], [] - while resp.is_open(): - resp.update(timeout=1) - if resp.peek_stdout(): - stdout.append(resp.read_stdout()) - if resp.peek_stderr(): - stderr.append(resp.read_stderr()) - module.exit_json( - changed=True, stdout="".join(stdout), stderr="".join(stderr)) + KubernetesExecCommand().execute_module() if __name__ == '__main__': diff --git a/plugins/modules/k8s_info.py b/plugins/modules/k8s_info.py index 4498cab8..219c4eec 100644 --- a/plugins/modules/k8s_info.py +++ b/plugins/modules/k8s_info.py @@ -9,11 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_info short_description: Describe Kubernetes (K8s) objects @@ -29,31 +25,15 @@ description: - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change. options: - api_version: - description: - - Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a - specific object. - default: v1 - aliases: - - api - - version - type: str kind: description: - - Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a - specific object. - required: yes - type: str - name: - description: - - Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a - specific object. - type: str - namespace: - description: - - Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name) - to identify a specific object. + - Use to specify an object model. + - Use to create, delete, or discover an object without providing a full resource definition. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If I(resource definition) is provided, the I(kind) value from the I(resource_definition) + will override this option. type: str + required: True label_selectors: description: List of label selectors to use to filter results type: list @@ -65,6 +45,7 @@ options: extends_documentation_fragment: - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_name_options requirements: - "python >= 2.7" @@ -72,9 +53,9 @@ requirements: - "PyYAML >= 3.11" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Get an existing Service object - k8s_info: + community.kubernetes.k8s_info: api_version: v1 kind: Service name: web @@ -82,32 +63,45 @@ EXAMPLES = ''' register: web_service - name: Get a list of all service objects - k8s_info: + community.kubernetes.k8s_info: api_version: v1 kind: Service namespace: testing register: service_list - name: Get a list of all pods from any namespace - k8s_info: + community.kubernetes.k8s_info: kind: Pod register: pod_list - name: Search for all Pods labelled app=web - k8s_info: + community.kubernetes.k8s_info: kind: Pod label_selectors: - app = web - tier in (dev, test) +- name: Using vars while using label_selectors + community.kubernetes.k8s_info: + kind: Pod + label_selectors: + - "app = {{ app_label_web }}" + vars: + app_label_web: web + - name: Search for all running pods - k8s_info: + community.kubernetes.k8s_info: kind: Pod field_selectors: - status.phase=Running + +- name: List custom objects created using CRD + community.kubernetes.k8s_info: + kind: MyCustomObject + api_version: "stable.example.com/v1" ''' -RETURN = ''' +RETURN = r''' resources: description: - The object(s) that exists @@ -136,19 +130,25 @@ resources: type: dict ''' - -from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule, AUTH_ARG_SPEC import copy +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC) -class KubernetesInfoModule(KubernetesAnsibleModule): + +class KubernetesInfoModule(K8sAnsibleMixin): def __init__(self, *args, **kwargs): - KubernetesAnsibleModule.__init__(self, *args, - supports_check_mode=True, - **kwargs) - if self._name == 'k8s_facts': - self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'", version='2.13') + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesInfoModule, self).__init__() def execute_module(self): self.client = self.get_api_client() diff --git a/plugins/modules/k8s_log.py b/plugins/modules/k8s_log.py index 02f08640..e7b75711 100644 --- a/plugins/modules/k8s_log.py +++ b/plugins/modules/k8s_log.py @@ -9,15 +9,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_log short_description: Fetch logs from Kubernetes resources +version_added: "0.10.0" + author: - "Fabian von Feilitzsch (@fabianvf)" @@ -28,40 +26,25 @@ description: - Analogous to `kubectl logs` or `oc logs` extends_documentation_fragment: - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_name_options options: - api_version: - description: - - Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a - specific object. - - If using I(label_selector), cannot be overridden - default: v1 - aliases: - - api - - version - type: str kind: description: - - Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a - specific object. - - If using I(label_selector), cannot be overridden - required: no + - Use to specify an object model. + - Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object. + - If using I(label_selectors), cannot be overridden. + type: str default: Pod - type: str - namespace: - description: - - Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name) - to identify a specfic object. - type: str name: description: - - Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a - specific object. - - Only one of I(name) or I(label_selector) may be provided + - Use to specify an object name. + - Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a specific object. + - Only one of I(name) or I(label_selectors) may be provided. type: str label_selectors: description: - List of label selectors to use to filter results - - Only one of I(name) or I(label_selector) may be provided + - Only one of I(name) or I(label_selectors) may be provided. type: list elements: str container: @@ -78,16 +61,16 @@ requirements: - "PyYAML >= 3.11" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Get a log from a Pod - k8s_log: + community.kubernetes.k8s_log: name: example-1 namespace: testing register: log # This will get the log from the first Pod found matching the selector - name: Log a Pod matching a label selector - k8s_log: + community.kubernetes.k8s_log: namespace: testing label_selectors: - app=example @@ -95,7 +78,7 @@ EXAMPLES = ''' # This will get the log from a single Pod managed by this Deployment - name: Get a log from a Deployment - k8s_log: + community.kubernetes.k8s_log: api_version: apps/v1 kind: Deployment namespace: testing @@ -104,7 +87,7 @@ EXAMPLES = ''' # This will get the log from a single Pod managed by this DeploymentConfig - name: Get a log from a DeploymentConfig - k8s_log: + community.kubernetes.k8s_log: api_version: apps.openshift.io/v1 kind: DeploymentConfig namespace: testing @@ -112,7 +95,7 @@ EXAMPLES = ''' register: log ''' -RETURN = ''' +RETURN = r''' log: type: str description: @@ -128,28 +111,34 @@ log_lines: import copy +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import PY2 -from ansible_collections.community.kubernetes.plugins.module_utils.common import KubernetesAnsibleModule -from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC +from ansible_collections.community.kubernetes.plugins.module_utils.common import ( + K8sAnsibleMixin, AUTH_ARG_SPEC, NAME_ARG_SPEC) -class KubernetesLogModule(KubernetesAnsibleModule): +class KubernetesLogModule(K8sAnsibleMixin): - def __init__(self, *args, **kwargs): - KubernetesAnsibleModule.__init__(self, *args, - supports_check_mode=True, - **kwargs) + def __init__(self): + module = AnsibleModule( + argument_spec=self.argspec, + supports_check_mode=True, + ) + self.module = module + self.params = self.module.params + self.fail_json = self.module.fail_json + self.fail = self.module.fail_json + self.exit_json = self.module.exit_json + super(KubernetesLogModule, self).__init__() @property def argspec(self): args = copy.deepcopy(AUTH_ARG_SPEC) + args.update(NAME_ARG_SPEC) args.update( dict( - kind=dict(default='Pod'), - api_version=dict(default='v1', aliases=['api', 'version']), - name=dict(), - namespace=dict(), + kind=dict(type='str', default='Pod'), container=dict(), label_selectors=dict(type='list', elements='str', default=[]), ) @@ -158,6 +147,7 @@ class KubernetesLogModule(KubernetesAnsibleModule): def execute_module(self): name = self.params.get('name') + namespace = self.params.get('namespace') label_selector = ','.join(self.params.get('label_selectors', {})) if name and label_selector: self.fail(msg='Only one of name or label_selectors can be provided') @@ -167,16 +157,16 @@ class KubernetesLogModule(KubernetesAnsibleModule): v1_pods = self.find_resource('Pod', 'v1', fail=True) if 'log' not in resource.subresources: - if not self.params.get('name'): + if not name: self.fail(msg='name must be provided for resources that do not support the log subresource') - instance = resource.get(name=self.params['name'], namespace=self.params.get('namespace')) + instance = resource.get(name=name, namespace=namespace) label_selector = ','.join(self.extract_selectors(instance)) resource = v1_pods if label_selector: - instances = v1_pods.get(namespace=self.params['namespace'], label_selector=label_selector) + instances = v1_pods.get(namespace=namespace, label_selector=label_selector) if not instances.items: - self.fail(msg='No pods in namespace {0} matched selector {1}'.format(self.params['namespace'], label_selector)) + self.fail(msg='No pods in namespace {0} matched selector {1}'.format(namespace, label_selector)) # This matches the behavior of kubectl when logging pods via a selector name = instances.items[0].metadata.name resource = v1_pods @@ -187,7 +177,7 @@ class KubernetesLogModule(KubernetesAnsibleModule): log = serialize_log(resource.log.get( name=name, - namespace=self.params.get('namespace'), + namespace=namespace, serialize=False, **kwargs )) diff --git a/plugins/modules/k8s_scale.py b/plugins/modules/k8s_scale.py index ad4cbd18..9e63366a 100644 --- a/plugins/modules/k8s_scale.py +++ b/plugins/modules/k8s_scale.py @@ -9,11 +9,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_scale @@ -39,9 +36,9 @@ requirements: - "PyYAML >= 3.11" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Scale deployment up, and extend timeout - k8s_scale: + community.kubernetes.k8s_scale: api_version: v1 kind: Deployment name: elastic @@ -50,7 +47,7 @@ EXAMPLES = ''' wait_timeout: 60 - name: Scale deployment down when current replicas match - k8s_scale: + community.kubernetes.k8s_scale: api_version: v1 kind: Deployment name: elastic @@ -59,7 +56,7 @@ EXAMPLES = ''' replicas: 2 - name: Increase job parallelism - k8s_scale: + community.kubernetes.k8s_scale: api_version: batch/v1 kind: job name: pi-with-timeout @@ -69,25 +66,25 @@ EXAMPLES = ''' # Match object using local file or inline definition - name: Scale deployment based on a file from the local filesystem - k8s_scale: + community.kubernetes.k8s_scale: src: /myproject/elastic_deployment.yml replicas: 3 wait: no - name: Scale deployment based on a template output - k8s_scale: + community.kubernetes.k8s_scale: resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}" replicas: 3 wait: no - name: Scale deployment based on a file from the Ansible controller filesystem - k8s_scale: + community.kubernetes.k8s_scale: resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}" replicas: 3 wait: no ''' -RETURN = ''' +RETURN = r''' result: description: - If a change was made, will return the patched object, otherwise returns the existing object. @@ -114,6 +111,11 @@ result: description: Current status details for the object. returned: success type: complex + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 ''' from ansible_collections.community.kubernetes.plugins.module_utils.scale import KubernetesAnsibleScaleModule diff --git a/plugins/modules/k8s_service.py b/plugins/modules/k8s_service.py index 97d3ce0c..932825f7 100644 --- a/plugins/modules/k8s_service.py +++ b/plugins/modules/k8s_service.py @@ -9,11 +9,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: k8s_service @@ -26,43 +22,10 @@ description: extends_documentation_fragment: - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_resource_options + - community.kubernetes.k8s_state_options options: - resource_definition: - description: - - A partial YAML definition of the Service object being created/updated. Here you can define Kubernetes - Service Resource parameters not covered by this module's parameters. - - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g. - I(metadata.namespace) here, that value will be ignored and I(metadata) used instead." - aliases: - - definition - - inline - type: dict - src: - description: - - "Provide a path to a file containing a valid YAML definition of an object dated. Mutually - exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace) - will be overwritten by corresponding values found in the configuration read in from the I(src) file." - - Reads from the local file system. To read from the Ansible controller's file system, use the file lookup - plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to - I(resource_definition). See Examples below. - type: path - state: - description: - - Determines if an object should be created, patched, or deleted. When set to C(present), an object will be - created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to - C(present), an existing object will be patched, if its attributes differ from those specified using - module options and I(resource_definition). - default: present - choices: - - present - - absent - type: str - force: - description: - - If set to C(True), and I(state) is C(present), an existing object will be replaced. - default: false - type: bool merge_type: description: - Whether to override the default patch merge approach with a specific type. By default, the strategic @@ -125,9 +88,9 @@ requirements: - openshift >= 0.6.2 ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: Expose https port with ClusterIP - k8s_service: + community.kubernetes.k8s_service: state: present name: test-https namespace: default @@ -138,7 +101,7 @@ EXAMPLES = ''' key: special - name: Expose https port with ClusterIP using spec - k8s_service: + community.kubernetes.k8s_service: state: present name: test-https namespace: default @@ -151,7 +114,7 @@ EXAMPLES = ''' key: special ''' -RETURN = ''' +RETURN = r''' result: description: - The created, patched, or otherwise present Service object. Will be empty in the case of a deletion. @@ -185,7 +148,7 @@ import traceback from collections import defaultdict -from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC +from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC, RESOURCE_ARG_SPEC from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule @@ -194,25 +157,10 @@ SERVICE_ARG_SPEC = { 'type': 'bool', 'default': False, }, - 'state': { - 'default': 'present', - 'choices': ['present', 'absent'], - }, - 'force': { - 'type': 'bool', - 'default': False, - }, - 'resource_definition': { - 'type': 'dict', - 'aliases': ['definition', 'inline'] - }, 'name': {'required': True}, 'namespace': {'required': True}, 'merge_type': {'type': 'list', 'elements': 'str', 'choices': ['json', 'merge', 'strategic-merge']}, 'selector': {'type': 'dict'}, - 'src': { - 'type': 'path', - }, 'type': { 'type': 'str', 'choices': [ @@ -244,6 +192,8 @@ class KubernetesService(KubernetesRawModule): def argspec(self): """ argspec property builder """ argument_spec = copy.deepcopy(AUTH_ARG_SPEC) + argument_spec.update(COMMON_ARG_SPEC) + argument_spec.update(RESOURCE_ARG_SPEC) argument_spec.update(SERVICE_ARG_SPEC) return argument_spec diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..29c924b8 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 160 +ignore = W503,E402 diff --git a/tests/integration/targets/kubernetes/library/test_tempfile.py b/tests/integration/targets/kubernetes/library/test_tempfile.py index 820f5f59..c89f5a31 100644 --- a/tests/integration/targets/kubernetes/library/test_tempfile.py +++ b/tests/integration/targets/kubernetes/library/test_tempfile.py @@ -8,9 +8,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} DOCUMENTATION = ''' --- @@ -21,8 +18,8 @@ short_description: Creates temporary files and directories description: - The C(test_tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible - you need to use M(file) module. - - For Windows targets, use the M(win_tempfile) module instead. + you need to use M(ansible.builtin.file) module. + - For Windows targets, use the M(ansible.builtin.win_tempfile) module instead. options: state: diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 0fa8c1c3..3f72c0e6 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,5 +1,8 @@ +plugins/modules/helm.py validate-modules:parameter-type-not-in-doc +plugins/modules/helm_info.py validate-modules:parameter-type-not-in-doc plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc plugins/modules/k8s.py validate-modules:return-syntax-error plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc plugins/modules/k8s_scale.py validate-modules:return-syntax-error plugins/modules/k8s_service.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..326af051 --- /dev/null +++ b/tests/sanity/ignore-2.11.txt @@ -0,0 +1,6 @@ +plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s.py validate-modules:return-syntax-error +plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_scale.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:return-syntax-error +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index bbd327f7..9739b5d8 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,2 +1,5 @@ +plugins/modules/helm.py validate-modules:parameter-type-not-in-doc +plugins/modules/helm_info.py validate-modules:parameter-type-not-in-doc plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc +plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc diff --git a/utils/downstream.sh b/utils/downstream.sh new file mode 100755 index 00000000..ce7d539a --- /dev/null +++ b/utils/downstream.sh @@ -0,0 +1,223 @@ +#!/bin/bash -eu + +# Script to dual-home the upstream and downstream Collection in a single repo +# +# This script will build or test a downstream collection, removing any +# upstream components that will not ship in the downstream release +# +# NOTES: +# - All functions are prefixed with f_ so it's obvious where they come +# from when in use throughout the script + +DOWNSTREAM_VERSION="1.0.0" +KEEP_DOWNSTREAM_TMPDIR="${KEEP_DOWNSTREAM_TMPDIR:-''}" + + +f_log_info() +{ + printf "%s:LOG:INFO: %s\n" "${0}" "${1}\n" +} + +f_prep() +{ + f_log_info "${FUNCNAME[0]}" + # Array of excluded files from downstream build (relative path) + _file_exclude=( + ) + + # Files to copy downstream (relative repo root dir path) + _file_manifest=( + CHANGELOG.rst + galaxy.yml + LICENSE + README.md + Makefile + setup.cfg + .yamllint + ) + + # Directories to recursively copy downstream (relative repo root dir path) + _dir_manifest=( + changelogs + meta + plugins + tests + molecule + ) + + # Temp build dir + _tmp_dir=$(mktemp -d) + _build_dir="${_tmp_dir}/ansible_collections/kubernetes/core" + mkdir -p "${_build_dir}" +} + +f_show_help() +{ + printf "Usage: downstream.sh [OPTION]\n" + printf "\t-s\t\tCreate a temporary downstream release and perform sanity tests.\n" + printf "\t-i\t\tCreate a temporary downstream release and perform integration tests.\n" + printf "\t-m\t\tCreate a temporary downstream release and perform molecule tests.\n" + printf "\t-b\t\tCreate a downstream release and stage for release.\n" + printf "\t-r\t\tCreate a downstream release and publish release.\n" +} + +f_text_sub() +{ + # Switch FQCN and dependent components + sed -i "s/community-kubernetes/kubernetes-core/" "${_build_dir}/Makefile" + sed -i "s/community\/kubernetes/kubernetes\/core/" "${_build_dir}/Makefile" + sed -i "s/^VERSION\:/VERSION: ${DOWNSTREAM_VERSION}/" "${_build_dir}/Makefile" + sed -i "s/community.kubernetes/kubernetes.core/" "${_build_dir}/galaxy.yml" + sed -i "s/name\:.*$/name: core/" "${_build_dir}/galaxy.yml" + sed -i "s/namespace\:.*$/namespace: kubernetes/" "${_build_dir}/galaxy.yml" + sed -i "s/^version\:.*$/version: ${DOWNSTREAM_VERSION}/" "${_build_dir}/galaxy.yml" + find "${_build_dir}" -type f -exec sed -i "s/community\.kubernetes/kubernetes\.core/g" {} \; +} + +f_cleanup() +{ + f_log_info "${FUNCNAME[0]}" + if [[ -n ${KEEP_DOWNSTREAM_TMPDIR} ]]; then + if [[ -d ${_build_dir} ]]; then + rm -fr "${_build_dir}" + fi + fi +} + +# Exit and handle cleanup processes if needed +f_exit() +{ + f_cleanup + exit "$0" +} + +f_create_collection_dir_structure() +{ + f_log_info "${FUNCNAME[0]}" + # Create the Collection + for f_name in "${_file_manifest[@]}"; + do + cp "./${f_name}" "${_build_dir}/${f_name}" + done + for d_name in "${_dir_manifest[@]}"; + do + cp -r "./${d_name}" "${_build_dir}/${d_name}" + done + for exclude_file in "${_file_exclude[@]}"; + do + if [[ -f "${_build_dir}/${exclude_file}" ]]; then + rm -f "${_build_dir}/${exclude_file}" + fi + done +} + +f_copy_collection_to_working_dir() +{ + f_log_info "${FUNCNAME[0]}" + # Copy the Collection build result into original working dir + cp "${_build_dir}"/*.tar.gz ./ +} + +f_common_steps() +{ + f_log_info "${FUNCNAME[0]}" + f_prep + f_create_collection_dir_structure + f_text_sub +} + +# Run the test sanity scanerio +f_test_sanity_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "SANITY TEST PWD: ${PWD}" + make test-sanity + popd || return + f_cleanup +} + +# Run the test integration +f_test_integration_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "INTEGRATION TEST WD: ${PWD}" + make test-integration + popd || return + f_cleanup +} + +# Run the molecule tests +f_test_molecule_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "MOLECULE TEST WD: ${PWD}" + make test-molecule + popd || return + f_cleanup +} + +# Run the release scanerio +f_release_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "RELEASE WD: ${PWD}" + make release + popd || return + f_cleanup +} + +# Run the build scanerio +f_build_option() +{ + f_log_info "${FUNCNAME[0]}" + f_common_steps + pushd "${_build_dir}" || return + f_log_info "BUILD WD: ${PWD}" + make build + popd || return + f_copy_collection_to_working_dir + f_cleanup +} + +# If no options are passed, display usage and exit +if [[ "${#}" -eq "0" ]]; then + f_show_help + f_exit 0 +fi + +# Handle options +while getopts ":simrb" option +do + case $option in + s) + f_test_sanity_option + ;; + i) + f_test_integration_option + ;; + m) + f_test_molecule_option + ;; + r) + f_release_option + ;; + b) + f_build_option + ;; + *) + printf "ERROR: Unimplemented option chosen.\n" + f_show_help + f_exit 1 + ;; # Default. + esac +done + +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4