first commit

Signed-off-by: Felix Matouschek <fmatouschek@redhat.com>
This commit is contained in:
Felix Matouschek
2023-07-14 10:30:20 +02:00
commit 216df38df9
45 changed files with 3390 additions and 0 deletions

120
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,120 @@
name: CI
on:
push:
branches:
- main
pull_request:
schedule:
- cron: '0 6 * * *'
jobs:
sanity:
uses: ansible-network/github_actions/.github/workflows/sanity.yml@main
with:
matrix_include: "[]"
matrix_exclude: >-
[
{
"ansible-version": "stable-2.9"
},
{
"ansible-version": "stable-2.12",
"python-version": "3.7"
},
{
"ansible-version": "stable-2.12",
"python-version": "3.11"
},
{
"ansible-version": "stable-2.13",
"python-version": "3.7"
},
{
"ansible-version": "stable-2.13",
"python-version": "3.11"
},
{
"ansible-version": "stable-2.14",
"python-version": "3.7"
},
{
"ansible-version": "stable-2.14",
"python-version": "3.8"
},
{
"ansible-version": "stable-2.14",
"python-version": "3.11"
},
{
"ansible-version": "stable-2.15",
"python-version": "3.7"
},
{
"ansible-version": "stable-2.15",
"python-version": "3.8"
},
{
"ansible-version": "stable-2.15",
"python-version": "3.11"
},
{
"ansible-version": "milestone",
"python-version": "3.7"
},
{
"ansible-version": "milestone",
"python-version": "3.8"
},
{
"ansible-version": "devel",
"python-version": "3.7"
},
{
"ansible-version": "devel",
"python-version": "3.8"
}
]
integration:
runs-on: ubuntu-latest
name: I (${{ matrix.ansible }}+py${{ matrix.python }})
strategy:
fail-fast: false
matrix:
ansible:
- stable-2.9
- stable-2.11
- stable-2.12
- stable-2.13
- devel
python:
- '3.8'
- '3.9'
- '3.10'
exclude:
# Because ansible-test doesn't support Python 3.9 for Ansible 2.9
# and Python 3.10 is supported in 2.12 or later.
- ansible: stable-2.9
python: '3.9'
- ansible: stable-2.9
python: '3.10'
- ansible: stable-2.10
python: '3.10'
- ansible: stable-2.11
python: '3.10'
steps:
- name: >-
Perform integration testing against
Ansible version ${{ matrix.ansible }}
under Python ${{ matrix.python }}
uses: ansible-community/ansible-test-gh-action@release/v1
with:
ansible-core-version: ${{ matrix.ansible }}
# OPTIONAL command to run before invoking `ansible-test integration`
# pre-test-cmd:
target-python-version: ${{ matrix.python }}
testing-type: integration
# OPTIONAL If your integration tests require code
# from other collections, install them like this
test-deps: >-
ansible.netcommon
kubernetes.core

67
.github/workflows/docs.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
---
name: Documentation
on:
push:
branches:
- main
tags:
- "[0-9]+.[0-9]+.[0-9]+"
workflow_dispatch:
env:
COLORTERM: 'yes'
TERM: 'xterm-256color'
PYTEST_ADDOPTS: '--color=yes'
jobs:
docs:
runs-on: ubuntu-latest
if: github.repository == 'kubevirt/kubernetes.kubevirt'
permissions:
actions: write
checks: write
contents: write
deployments: write
packages: write
pages: write
steps:
- name: Check out code
uses: actions/checkout@v2
with:
path: ansible_collections/kubernetes/kubevirt
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.9
cache: 'pip'
- name: Install doc dependencies
run: |
python -m pip install --upgrade pip
pip install -r ansible_collections/kubernetes/kubevirt/docs/requirements.txt
pip install -r ansible_collections/kubernetes/kubevirt/requirements.txt
ansible-galaxy collection install -r ansible_collections/kubernetes/kubevirt/requirements.yml -p /home/runner/.ansible/collections --force-with-deps
sudo apt install -y sed hub
- name: Create default collection path
run: |
mkdir -p /home/runner/.ansible/
cp -rp /home/runner/work/kubevirt/kubevirt/ansible_collections /home/runner/.ansible/collections/
ls -l /home/runner/.ansible/collections/ansible_collections/
ls -l /home/runner/.ansible/collections/ansible_collections/kubernetes/
ls -l /home/runner/.ansible/collections/ansible_collections/kubernetes/kubevirt/
- name: Create changelog and documentation
uses: ansible-middleware/collection-docs-action@main
with:
collection_fqcn: kubernetes.kubevirt
collection_repo: kubevirt/kubernetes.kubevirt
dependencies: false
commit_changelog: false
commit_ghpages: true
changelog_release: false
generate_docs: true
path: /home/runner/.ansible/collections/ansible_collections/kubernetes/kubevirt
token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,34 @@
name: Lint extra docsite docs and links
on:
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
push:
branches:
- main
- stable-*
pull_request:
# Run CI once per day (at 06:00 UTC)
# This ensures that even if there haven't been commits that we are still testing against latest version of ansible-test for each ansible-base version
schedule:
- cron: '0 6 * * *'
jobs:
docsite:
name: Lint extra docsite docs and links
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.10'
- name: Install antsibull-docs
run: pip install antsibull-docs --disable-pip-version-check
- name: Run collection docs linter
run: antsibull-docs lint-collection-docs .

90
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,90 @@
---
name: Release collection
on:
workflow_dispatch:
jobs:
release:
runs-on: ubuntu-latest
if: github.repository == 'kubevirt/kubernetes.kubevirt'
permissions:
actions: write
checks: write
contents: write
deployments: write
packages: write
pages: write
outputs:
tag_version: ${{ steps.get_version.outputs.TAG_VERSION }}
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.KUBEVIRT_PAT }}
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.x"
cache: 'pip'
- name: Get current version
id: get_version
run: echo "::set-output name=TAG_VERSION::$(grep version galaxy.yml | awk -F'"' '{ print $2 }')"
- name: Check if tag exists
id: check_tag
run: echo "::set-output name=TAG_EXISTS::$(git tag | grep ${{ steps.get_version.outputs.TAG_VERSION }})"
- name: Fail if tag exists
if: ${{ steps.get_version.outputs.TAG_VERSION == steps.check_tag.outputs.TAG_EXISTS }}
uses: actions/github-script@v3
with:
script: |
core.setFailed('Release tag already exists')
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ansible-core antsibull
ansible-galaxy collection install -r requirements.yml -p /home/runner/.ansible/collections --force-with-deps
sudo apt install -y sed hub
- name: Build collection
run: |
ansible-galaxy collection build .
- name: Create changelog and documentation
uses: ansible-middleware/collection-docs-action@main
with:
collection_fqcn: kubernetes.kubevirt
collection_repo: kubevirt/kubernetes.kubevirt
dependencies: false
commit_changelog: true
commit_ghpages: false
changelog_release: true
generate_docs: false
token: ${{ secrets.GITHUB_TOKEN }}
- name: Publish collection
env:
ANSIBLE_GALAXY_API_KEY: ${{ secrets.ANSIBLE_GALAXY_API_KEY }}
run: |
ansible-galaxy collection publish *.tar.gz --api-key $ANSIBLE_GALAXY_API_KEY
- name: Create release tag
run: |
git config user.name github-actions
git config user.email github-actions@github.com
git tag -a ${{ steps.get_version.outputs.TAG_VERSION }} -m "Release v${{ steps.get_version.outputs.TAG_VERSION }}" || true
git push origin --tags
- name: Publish Release
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.get_version.outputs.TAG_VERSION }}
files: "*.tar.gz"
body_path: gh-release.md

138
.gitignore vendored Normal file
View File

@@ -0,0 +1,138 @@
/tests/output/
/changelogs/.plugin-cache.yaml
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
docs/plugins/
docs/roles/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
.vscode/
.idea/
bin/

9
CHANGELOG.rst Normal file
View File

@@ -0,0 +1,9 @@
=============================================
KubeVirt Collection for Ansible Release Notes
=============================================
.. contents:: Topics
v0.1.0
======

3
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,3 @@
# Community Code of Conduct
Please see the official [KubeVirt Code of Conduct](https://github.com/kubevirt/kubevirt/blob/main/CODE_OF_CONDUCT.md).

3
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,3 @@
# Contributing
Refer to the [KubeVirt Contributing guidelines](https://github.com/kubevirt/kubevirt/blob/main/CONTRIBUTING.md).

202
LICENSE Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 The KubeVirt Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
MAINTAINING.md Normal file
View File

@@ -0,0 +1,3 @@
# Maintaining this collection
Refer to the [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).

9
Makefile Normal file
View File

@@ -0,0 +1,9 @@
all:
.PHONY: cluster-up
cluster-up:
hack/e2e-setup.sh
.PHONY: cluster-down
cluster-down:
hack/e2e-setup.sh --cleanup

11
OWNERS Normal file
View File

@@ -0,0 +1,11 @@
approvers:
- 0xFelix
- lyarwood
reviewers:
- akrejcir
- ksimon1
- 0xFelix
- lyarwood
- jcanocan
- opokornyy
- codingben

141
README.md Normal file
View File

@@ -0,0 +1,141 @@
# Lean Ansible bindings for KubeVirt
<!-- Add CI and code coverage badges here. Samples included below. -->
[![CI](https://github.com/kubevirt/kubernetes.kubevirt/workflows/CI/badge.svg?event=push)](https://github.com/kubevirt/kubernetes.kubevirt/actions)
<!-- Describe the collection and why a user would want to use it. What does the collection do? -->
This repo hosts the kubernetes.kubevirt Ansible Collection.
The collection includes an inventory plugin for Ansible to automate the management of VMs running on KubeVirt.
## Code of Conduct
We follow the [Ansible Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html) in all our interactions within this project.
If you encounter abusive behavior, please refer to the [policy violations](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html#policy-violations) section of the Code for information on how to raise a complaint.
## Communication
<!--List available communication channels. In addition to channels specific to your collection, we also recommend to use the following ones.-->
We announce releases and important changes through Ansible's [The Bullhorn newsletter](https://github.com/ansible/community/wiki/News#the-bullhorn). Be sure you are [subscribed](https://eepurl.com/gZmiEP).
Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels).
We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
For more information about communication, refer to the [Ansible Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
## Contributing to this collection
<!--Describe how the community can contribute to your collection. At a minimum, fill up and include the CONTRIBUTING.md file containing how and where users can create issues to report problems or request features for this collection. List contribution requirements, including preferred workflows and necessary testing, so you can benefit from community PRs. If you are following general Ansible contributor guidelines, you can link to - [Ansible Community Guide](https://docs.ansible.com/ansible/devel/community/index.html). List the current maintainers (contributors with write or higher access to the repository). The following can be included:-->
The content of this collection is made by people like you, a community of individuals collaborating on making the world better through developing automation software.
We are actively accepting new contributors.
Any kind of contribution is very welcome.
You don't know how to start? Refer to our [contribution guide](CONTRIBUTING.md)!
We use the following guidelines:
* [CONTRIBUTING.md](CONTRIBUTING.md)
* [REVIEW_CHECKLIST.md](REVIEW_CHECKLIST.md)
* [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html)
* [Ansible Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/index.html)
* [Ansible Collection Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections)
## Collection maintenance
The current maintainers are listed in the [MAINTAINERS](MAINTAINERS) file. If you have questions or need help, feel free to mention them in the proposals.
To learn how to maintain / become a maintainer of this collection, refer to the [Maintainer guidelines](MAINTAINING.md).
## Governance
<!--Describe how the collection is governed. Here can be the following text:-->
The process of decision making in this collection is based on discussing and finding consensus among participants.
Every voice is important. If you have something on your mind, create an issue or dedicated discussion and let's discuss it!
## Tested with Ansible
<!-- List the versions of Ansible the collection has been tested with. Must match what is in galaxy.yml. -->
## External requirements
<!-- List any external resources the collection depends on, for example minimum versions of an OS, libraries, or utilities. Do not list other Ansible collections here. -->
- python >= 3.6
Python libraries:
- kubernetes
- PyYaml
- jsonpatch
- jinja2
### Supported connections
<!-- Optional. If your collection supports only specific connection types (such as HTTPAPI, netconf, or others), list them here. -->
## Included content
<!-- Galaxy will eventually list the module docs within the UI, but until that is ready, you may need to either describe your plugins etc here, or point to an external docsite to cover that information. -->
## Using this collection
<!--Include some quick examples that cover the most common use cases for your collection content. It can include the following examples of installation and upgrade (change NAMESPACE.COLLECTION_NAME correspondingly):-->
### Installing the Collection from Ansible Galaxy
Before using this collection, you need to install it with the Ansible Galaxy command-line tool:
```bash
ansible-galaxy collection install kubernetes.kubevirt
```
You can also include it in a `requirements.yml` file and install it with `ansible-galaxy collection install -r requirements.yml`, using the format:
```yaml
---
collections:
- name: kubernetes.kubevirt
```
Note that if you install the collection from Ansible Galaxy, it will not be upgraded automatically when you upgrade the `ansible` package. To upgrade the collection to the latest available version, run the following command:
```bash
ansible-galaxy collection install kubernetes.kubevirt --upgrade
```
You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax to install version `0.1.0`:
```bash
ansible-galaxy collection install kubernetes.kubevirt:==0.1.0
```
See [Ansible Using collections](https://docs.ansible.com/ansible/devel/user_guide/collections_using.html) for more details.
## Release notes
See the [changelog](https://github.com/ansible-collections/REPONAMEHERE/tree/main/CHANGELOG.rst).
## Roadmap
<!-- Optional. Include the roadmap for this collection, and the proposed release/versioning strategy so users can anticipate the upgrade/update cycle. -->
## More information
<!-- List out where the user can find additional information, such as working group meeting times, slack/IRC channels, or documentation for the product this collection automates. At a minimum, link to: -->
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
- [Ansible User guide](https://docs.ansible.com/ansible/devel/user_guide/index.html)
- [Ansible Developer guide](https://docs.ansible.com/ansible/devel/dev_guide/index.html)
- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst)
- [Ansible Community code of conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html)
- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
- [News for Maintainers](https://github.com/ansible-collections/news-for-maintainers)
## Licensing
<!-- Include the appropriate license information here and a pointer to the full licensing details. If the collection contains modules migrated from the ansible/ansible repo, you must use the same license that existed in the ansible/ansible repo. See the GNU license example below. -->
Apache License 2.0
See [LICENSE](./LICENSE) to see the full text.

3
REVIEW_CHECKLIST.md Normal file
View File

@@ -0,0 +1,3 @@
# Review Checklist
Refer to the [Collection review checklist](https://github.com/ansible/community-docs/blob/main/review_checklist.rst).

View File

@@ -0,0 +1,4 @@
ancestor: null
releases:
0.1.0:
release_date: '2023-07-06'

29
changelogs/config.yaml Normal file
View File

@@ -0,0 +1,29 @@
changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
keep_fragments: false
mention_ancestor: true
new_plugins_after_name: removed_features
notesdir: fragments
prelude_section_name: release_summary
prelude_section_title: Release Summary
sections:
- - major_changes
- Major Changes
- - minor_changes
- Minor Changes
- - breaking_changes
- Breaking Changes / Porting Guide
- - deprecated_features
- Deprecated Features
- - removed_features
- Removed Features (previously deprecated)
- - security_fixes
- Security Fixes
- - bugfixes
- Bugfixes
- - known_issues
- Known Issues
title: KubeVirt Collection for Ansible
trivial_section_name: trivial

View File

1
docs/CHANGELOG.rst Symbolic link
View File

@@ -0,0 +1 @@
../CHANGELOG.rst

1
docs/CONTRIBUTING.md Symbolic link
View File

@@ -0,0 +1 @@
../CONTRIBUTING.md

1
docs/README.md Symbolic link
View File

@@ -0,0 +1 @@
../README.md

View File

@@ -0,0 +1,21 @@
</ul>
</div>
</section>
</div>
</div>
<footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
</div>
<hr/>
<div role="contentinfo">
<p>&#169; Copyright 2023 Red Hat, Inc.</p>
</div>
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
</body>
</html>

View File

@@ -0,0 +1,43 @@
<!doctype html>
<html>
<head>
<title>Kubevirt Ansible Collection documentation index</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="stylesheet" href="https://ansible-middleware.github.io/amq/main/_static/pygments.css" type="text/css" />
<link rel="stylesheet" href="https://ansible-middleware.github.io/amq/main/_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="https://ansible-middleware.github.io/amq/main/_static/ansible-basic-sphinx-ext.css" type="text/css" />
<script data-url_root="./" id="documentation_options" src="https://ansible-middleware.github.io/amq/main/_static/documentation_options.js"></script>
<script src="https://ansible-middleware.github.io/amq/main/_static/jquery.js"></script>
<script src="https://ansible-middleware.github.io/amq/main/_static/underscore.js"></script>
<script src="https://ansible-middleware.github.io/amq/main/_static/doctools.js"></script>
<script src="https://ansible-middleware.github.io/amq/main/_static/js/theme.js"></script>
</head>
<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" >
<a href="#" class="icon icon-home"> Kubevirt Ansible Collection</a>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="Page navigation">
<ul class="wy-breadcrumbs">
<li><a href="#" class="icon icon-home"></a> &raquo;</li>
<li>Welcome to Kubevirt Collection documentation</li>
<li class="wy-breadcrumbs-aside"></li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<section id="welcome-to-amq-collection-documentation">
<h1>Welcome to Kubevirt Collection documentation<a class="headerlink" href="#welcome-to-amq-collection-documentation" title="Permalink to this headline"></a></h1>
<div class="toctree-wrapper compound">
<p class="caption" role="heading"><span class="caption-text">Pick collection version:</span></p>
<ul>

170
docs/conf.py Normal file
View File

@@ -0,0 +1,170 @@
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
sys.path.insert(0, os.path.abspath('../plugins/module_utils/'))
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Kubevirt Ansible Collection'
copyright = '{y} Red Hat, Inc.'.format(y=datetime.date.today().year)
author = 'Red Hat, Inc.'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_parser',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'ansible_basic_sphinx_ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.tmp']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# -- Options for HTML output -------------------------------------------------
html_theme_path = ['_themes']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KubevirtCollectionDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KubevirtCollection.tex', 'Red Hat Kubevirt Ansible Collection Documentation',
'Red Hat, Inc.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'KubevirtCollection', 'Red Hat Kubevirt Ansible Collection Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KubevirtCollection', 'Red Hat Kubevirt Ansible Collection Documentation',
author, 'KubevirtCollection', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/2', None), 'ansible': ('https://docs.ansible.com/ansible/latest/', None)}

13
docs/developing.md Normal file
View File

@@ -0,0 +1,13 @@
## Contributor's Guidelines
- All YAML files named with `.yml` extension
- Use spaces around jinja variables. `{{ var }}` over `{{var}}`
- Variables that are internal to the role should be lowercase and start with the role name
- Keep roles self contained - Roles should avoid including tasks from other roles when possible
- Plays should do nothing more than include a list of roles except where `pre_tasks` and `post_tasks` are required when possible
- Separators - Use valid name, ie. underscores (e.g. `my_role` `my_playbook`) not dashes (`my-role`)
- Paths - When defining paths, do not include trailing slashes (e.g. `my_path: /foo` not `my_path: /foo/`). When concatenating paths, follow the same convention (e.g. `{{ my_path }}/bar` not `{{ my_path }}bar`)
- Indentation - Use 2 spaces for each indent
- `vars/` vs `defaults/` - internal or interpolated variables that don't need to change or be overridden by user go in `vars/`, those that a user would likely override, go under `defaults/` directory
- All arguments have a specification in `meta/argument_specs.yml`
- All playbooks/roles should be focused on compatibility with Ansible Automation Platform

45
docs/docsite/links.yml Normal file
View File

@@ -0,0 +1,45 @@
---
# This will make sure that plugin and module documentation gets Edit on GitHub links
# that allow users to directly create a PR for this plugin or module in GitHub's UI.
# Remove this section if the collection repository is not on GitHub, or if you do not want this
# functionality for your collection.
edit_on_github:
repository: ansible-collections/community.REPO_NAME
branch: main
# If your collection root (the directory containing galaxy.yml) does not coincide with your
# repository's root, you have to specify the path to the collection root here. For example,
# if the collection root is in a subdirectory ansible_collections/community/REPO_NAME
# in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'.
path_prefix: ''
# Here you can add arbitrary extra links. Please keep the number of links down to a
# minimum! Also please keep the description short, since this will be the text put on
# a button.
#
# Also note that some links are automatically added from information in galaxy.yml.
# The following are automatically added:
# 1. A link to the issue tracker (if `issues` is specified);
# 2. A link to the homepage (if `homepage` is specified and does not equal the
# `documentation` or `repository` link);
# 3. A link to the collection's repository (if `repository` is specified).
extra_links:
- description: Report an issue
url: https://github.com/ansible-collections/community.REPO_NAME/issues/new/choose
# Specify communication channels for your collection. We suggest to not specify more
# than one place for communication per communication tool to avoid confusion.
communication:
matrix_rooms:
- topic: General usage and support questions
room: '#users:ansible.im'
irc_channels:
- topic: General usage and support questions
network: Libera
channel: '#ansible'
mailing_lists:
- topic: Ansible Project List
url: https://groups.google.com/g/ansible-project
# You can also add a `subscribe` field with an URI that allows to subscribe
# to the mailing list. For lists on https://groups.google.com/ a subscribe link is
# automatically generated.

26
docs/index.rst Normal file
View File

@@ -0,0 +1,26 @@
.. Red Hat kubernetes kubevirt Ansible Collection documentation main file
Welcome to Kubevirt Collection documentation
=======================================
.. toctree::
:maxdepth: 2
:caption: User documentation
README
plugins/index
roles/index
.. toctree::
:maxdepth: 2
:caption: Developer documentation
testing
developing
releasing
.. toctree::
:maxdepth: 2
:caption: General
Changelog <CHANGELOG>

61
docs/releasing.md Normal file
View File

@@ -0,0 +1,61 @@
# Collection Versioning Strategy
Each supported collection maintained by Ansible follows Semantic Versioning 2.0.0 (https://semver.org/), for example:
Given a version number MAJOR.MINOR.PATCH, the following is incremented:
MAJOR version: when making incompatible API changes (see Feature Release scenarios below for examples)
MINOR version: when adding features or functionality in a backwards compatible manner, or updating testing matrix and/or metadata (deprecation)
PATCH version: when adding backwards compatible bug fixes or security fixes (strict).
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
The first version of a generally available supported collection on Ansible Automation Hub shall be version 1.0.0. NOTE: By default, all newly created collections may begin with a smaller default version of 0.1.0, and therefore a version of 1.0.0 should be explicitly stated by the collection maintainer.
## New content is added to an existing collection
Assuming the current release is 1.0.0, and a new module is ready to be added to the collection, the minor version would be incremented to 1.1.0. The change in the MINOR version indicates an additive change was made while maintaining backward compatibility for existing content within the collection.
## New feature to existing plugin or role within a collection (backwards compatible)
Assuming the current release is 1.0.0, and new features for an existing module are ready for release . We would increment the MINOR version to 1.1.0. The change in the MINOR version indicates an additive change was made while maintaining backward compatibility for existing content within the collection.
## Bug fix or security fix to existing content within a collection
Assuming the current release is 1.0.0 and a bug is fixed prior to the next minor release, the PATCH version would be incremented to 1.0.1. The patch indicates only a bug was fixed within a current version. The PATCH release does not contain new content, nor was functionality removed. Bug fixes may be included in a MINOR or MAJOR feature release if the timing allows, eliminating the need for a PATCH dedicated to the fix.
## Breaking change to any content within a collection
Assuming the current release is 1.0.0, and a breaking change (API or module) is introduced for a user or developer. The MAJOR version would be incremented to 2.0.0.
Examples of breaking changes within a collection may include but are not limited to:
- Argspec changes for a module that require either inventory structure or playbook changes.
- A change in the shape of either the inbound or returned payload of a filter plugin.
- Changes to a connection plugin that require additional inventory parameters or ansible.cfg entries.
- New functionality added to a module that changes the outcome of that module as released in previous versions.
- The removal of plugins from a collection.
## Content removed from a collection
Deleting a module or API is a breaking change. Please see the 'Breaking change' section for how to version this.
## A typographical error was fixed in the documentation for a collection
A correction to the README would be considered a bug fix and the PATCH incremented. See 'Bug fix' above.
## Documentation added/removed/modified within a collection
Only the PATCH version should be increased for a release that contains changes limited to revised documentation.
## Release automation
New releases are triggered by annotated git tags named after semantic versioning. The automation publishes the built artifacts to ansible-galaxy and github releases page.

7
docs/requirements.txt Normal file
View File

@@ -0,0 +1,7 @@
antsibull>=0.17.0
antsibull-docs
antsibull-changelog
ansible-core>=2.14.1
sphinx-rtd-theme
git+https://github.com/felixfontein/ansible-basic-sphinx-ext
myst-parser

4
docs/roles.rst.template Normal file
View File

@@ -0,0 +1,4 @@
Role Index
==========
.. toctree::

35
docs/testing.md Normal file
View File

@@ -0,0 +1,35 @@
# Testing
## Continuous integration
The collection is tested with a [molecule](https://github.com/ansible-community/molecule) setup covering the included roles and verifying correct installation and idempotency.
In order to run the molecule tests locally with python 3.9 available, after cloning the repository:
```
pip install yamllint 'molecule[docker]~=3.5.2' ansible-core flake8 ansible-lint voluptuous
molecule test --all
```
## Test playbooks
Sample playbooks are provided in the `playbooks/` directory; to run the playbooks locally (requires a rhel system with python 3.9+, ansible, and systemd) the steps are as follows:
```
# setup environment
pip install ansible-core
# clone the repository
git clone https://github.com/ansible-middleware/amq
cd amq
# install collection dependencies
ansible-galaxy collection install -r requirements.yml
# install collection python deps
pip install -r requirements.txt
# create inventory for localhost
cat << EOF > inventory
[amq]
localhost ansible_connection=local
EOF
# run the playbook
ansible-playbook -i inventory playbooks/activemq.yml
```

View File

@@ -0,0 +1,6 @@
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- default
network_name: bridge-network
label_selector: app=test

View File

@@ -0,0 +1,6 @@
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- default
network_name: bridge-network
kube_secondary_dns: yes

34
examples/play-create.yml Normal file
View File

@@ -0,0 +1,34 @@
- hosts: localhost
tasks:
- name: Create VM
kubernetes.kubevirt.kubevirt_vm:
state: present
name: testvm
namespace: default
labels:
app: test
instancetype: u1.medium
preference: fedora
interfaces:
- name: default
masquerade: {}
- name: bridge-network
bridge: {}
networks:
- name: default
pod: {}
- name: bridge-network
multus:
networkName: kindexgw
volumes:
- containerDisk:
image: quay.io/containerdisks/fedora:latest
name: containerdisk
- cloudInitNoCloud:
userData: |-
#cloud-config
# The default username is: fedora
ssh_authorized_keys:
- ssh-ed25519 AAAA...
name: cloudinit
wait: yes

8
examples/play-delete.yml Normal file
View File

@@ -0,0 +1,8 @@
- hosts: localhost
tasks:
- name: Delete VM
kubernetes.kubevirt.kubevirt_vm:
name: testvm
namespace: default
state: absent
wait: yes

View File

@@ -0,0 +1,5 @@
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- default
use_service: yes

40
galaxy.yml Normal file
View File

@@ -0,0 +1,40 @@
# See https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html
namespace: kubernetes
name: kubevirt
version: "0.1.0"
readme: README.md
authors:
- KubeVirt Project (kubevirt.io)
dependencies:
kubernetes.core: '>=2.0.0'
description: Lean Ansible bindings for KubeVirt
license_file: LICENSE
tags:
# tags so people can search for collections https://galaxy.ansible.com/search
# tags are all lower-case, no spaces, no dashes.
- api
- k8s
- kubernetes
- kubevirt
- virtualization
- cloud
- infrastructure
repository: https://github.com/kubevirt/kubernetes.kubevirt
documentation: https://github.com/kubevirt/kubernetes.kubevirt/tree/main/docs
homepage: https://kubevirt.io
issues: https://github.com/kubevirt/kubernetes.kubevirt/issues
build_ignore:
- .gitignore
- changelogs/.plugin-cache.yaml
- .github
- .ansible-lint
- .yamllint
- '*.tar.gz'
- '*.zip'
- molecule
- changelogs
- docs/_gh_include
- docs/conf.py
- docs/roles.rst.template
- docs/requirements.yml

369
hack/e2e-setup.sh Executable file
View File

@@ -0,0 +1,369 @@
#!/usr/bin/env bash
#
# This file is part of the KubeVirt project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2023 Red Hat, Inc.
#
# This script is based on:
# - https://github.com/ovn-org/ovn-kubernetes/blob/master/contrib/kind.sh
# - https://github.com/kiagnose/kiagnose/blob/main/automation/e2e.sh
# - https://github.com/kiagnose/kiagnose/blob/main/checkups/kubevirt-vm-latency/automation/e2e.sh
ARGCOUNT=$#
# Returns the full directory name of the script
DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
ARCH=""
case $(uname -m) in
x86_64) ARCH="amd64" ;;
aarch64) ARCH="arm64" ;;
esac
set_default_params() {
BIN_DIR=${BIN_DIR:-$DIR/../bin}
KIND=${KIND:-$BIN_DIR/kind}
KIND_VERSION=${KIND_VERSION:-v0.20.0}
KUBECTL=${KUBECTL:-$BIN_DIR/kubectl}
KUBECTL_VERSION=${KUBECTL_VERSION:-v1.27.3}
KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v1.0.0}
KUBEVIRT_COMMON_INSTANCETYPES_VERSION=${KUBEVIRT_COMMON_INSTANCETYPES_VERSION:-v0.3.0}
KUBEVIRT_USE_EMULATION=${KUBEVIRT_USE_EMULATION:-"false"}
CNAO_VERSION=${CNAO_VERSION:-v0.87.0}
CLUSTER_NAME=${CLUSTER_NAME:-kind}
SECONDARY_NETWORK_NAME=${NETWORK_NAME:-kindexgw}
SECONDARY_NETWORK_SUBNET=${SECONDARY_NETWORK_SUBNET:-172.19.0.0/16}
SECONDARY_NETWORK_RANGE_START=${SECONDARY_NETWORK_RANGE_START:-172.19.1.1}
SECONDARY_NETWORK_RANGE_END=${SECONDARY_NETWORK_RANGE_END:-172.19.255.254}
SECONDARY_NETWORK_GATEWAY=${SECONDARY_NETWORK_GATEWAY:-172.19.0.1}
NAMESPACE=${NAMESPACE:-default}
}
# Taken from:
# https://github.com/kubevirt/kubevirtci/blob/f661bfe0e3678e5409c057855951c50a912571a0/cluster-up/cluster/ephemeral-provider-common.sh#L26C1-L45C1
detect_cri() {
PODMAN_SOCKET=${PODMAN_SOCKET:-"/run/podman/podman.sock"}
if [ "${CRI}" = "podman" ]; then
_cri_socket=$(detect_podman_socket)
_cri_bin="podman --remote --url=unix://$_cri_socket"
elif [ "${CRI}" = "docker" ]; then
_cri_bin=docker
_cri_socket="/var/run/docker.sock"
else
_cri_socket=$(detect_podman_socket)
if [ -n "$_cri_socket" ]; then
_cri_bin="podman --remote --url=unix://$_cri_socket"
echo >&2 "selecting podman as container runtime"
elif docker ps >/dev/null 2>&1; then
_cri_bin=docker
_cri_socket="/var/run/docker.sock"
echo >&2 "selecting docker as container runtime"
else
echo >&2 "no working container runtime found. Neither docker nor podman seems to work."
exit 1
fi
fi
}
# Taken from:
# https://github.com/kubevirt/kubevirtci/blob/f661bfe0e3678e5409c057855951c50a912571a0/cluster-up/cluster/ephemeral-provider-common.sh#L20
detect_podman_socket() {
if curl --unix-socket "${PODMAN_SOCKET}" http://d/v3.0.0/libpod/info >/dev/null 2>&1; then
echo "${PODMAN_SOCKET}"
fi
}
install_kind() {
if [ ! -f "${KIND}" ]; then
echo "Installing kind"
mkdir -p "${BIN_DIR}"
curl -Lo "${KIND}" "https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-${ARCH}"
chmod +x "${KIND}"
echo "Successfully installed kind at ${KIND}:"
${KIND} version
fi
}
install_kubectl() {
if [ ! -f "${KUBECTL}" ]; then
echo "Installing kubectl"
mkdir -p "${BIN_DIR}"
curl -Lo "${KUBECTL}" "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${ARCH}/kubectl"
chmod +x "${KUBECTL}"
echo "Successfully installed kubectl at ${KUBECTL}:"
${KUBECTL} version --client
fi
}
configure_inotify_limits() {
echo "Configuring inotify limits"
sudo sysctl fs.inotify.max_user_instances=512
sudo sysctl fs.inotify.max_user_watches=1048576
}
create_cluster() {
echo "Creating cluster with kind"
DOCKER_HOST=unix://${_cri_socket} ${KIND} create cluster --wait 2m --name "${CLUSTER_NAME}"
echo "Waiting for the network to be ready"
${KUBECTL} wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns --timeout=2m
echo "K8S cluster is up:"
${KUBECTL} get nodes -o wide
}
configure_secondary_network() {
echo "Configuring secondary network"
# Name of the single kind node
local node=${CLUSTER_NAME}-control-plane
# Interface added when connecting the secondary network
local secondary_interface=eth1
${_cri_bin} network create "${SECONDARY_NETWORK_NAME}" --driver=bridge --subnet="${SECONDARY_NETWORK_SUBNET}"
${_cri_bin} network connect "${SECONDARY_NETWORK_NAME}" "${node}"
# Get the ip address assigned to the interface of the secondary network on the node
local ip
ip=$(
${_cri_bin} exec "${node}" ip ad show dev "${secondary_interface}" scope global |
sed -n 's/^ inet \([[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\/[[:digit:]]\{1,2\}\).*$/\1/p'
)
# Configure a bridge inside the node that workloads can attach to
${_cri_bin} exec "${node}" ip link add "${SECONDARY_NETWORK_NAME}" type bridge
${_cri_bin} exec "${node}" ip link set "${secondary_interface}" master "${SECONDARY_NETWORK_NAME}"
${_cri_bin} exec "${node}" ip link set up "${SECONDARY_NETWORK_NAME}"
# Move the ip address from the secondary interface to the newly created bridge
${_cri_bin} exec "${node}" ip address del "${ip}" dev "${secondary_interface}"
${_cri_bin} exec "${node}" ip address add "${ip}" dev "${SECONDARY_NETWORK_NAME}"
}
deploy_kubevirt() {
echo "Deploying KubeVirt"
${KUBECTL} apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml"
${KUBECTL} apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml"
if ! is_nested_virt_enabled; then
echo "Configuring Kubevirt to use emulation"
${KUBECTL} patch kubevirt kubevirt --namespace kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
fi
echo "Waiting for KubeVirt to be ready"
${KUBECTL} wait --for=condition=Available kubevirt kubevirt --namespace=kubevirt --timeout=5m
echo "Successfully deployed KubeVirt:"
${KUBECTL} get pods -n kubevirt
}
# Taken from:
# https://github.com/ovn-org/ovn-kubernetes/blob/59e0b62f4048be3df5b364b894b495f52f729cf1/contrib/kind.sh#L1241
is_nested_virt_enabled() {
local kvm_nested="unknown"
if [ -f "/sys/module/kvm_intel/parameters/nested" ]; then
kvm_nested=$(cat /sys/module/kvm_intel/parameters/nested)
elif [ -f "/sys/module/kvm_amd/parameters/nested" ]; then
kvm_nested=$(cat /sys/module/kvm_amd/parameters/nested)
fi
[ "$kvm_nested" == "1" ] || [ "$kvm_nested" == "Y" ] || [ "$kvm_nested" == "y" ]
}
deploy_kubevirt_common_instancetypes() {
echo "Deploying KubeVirt common-instancetypes"
${KUBECTL} apply -f "https://github.com/kubevirt/common-instancetypes/releases/download/${KUBEVIRT_COMMON_INSTANCETYPES_VERSION}/common-instancetypes-all-bundle-${KUBEVIRT_COMMON_INSTANCETYPES_VERSION}.yaml"
}
deploy_cnao() {
echo "Deploying CNAO (with multus and bridge CNIs)"
${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/namespace.yaml"
${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/network-addons-config.crd.yaml"
${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/operator.yaml"
cat <<EOF | ${KUBECTL} apply -f -
apiVersion: networkaddonsoperator.network.kubevirt.io/v1
kind: NetworkAddonsConfig
metadata:
name: cluster
spec:
imagePullPolicy: IfNotPresent
linuxBridge: {}
multus: {}
EOF
echo "Waiting for CNAO to be ready"
${KUBECTL} wait --for condition=Available networkaddonsconfig cluster --timeout=5m
echo "Successfully deployed CNAO:"
${KUBECTL} get networkaddonsconfig cluster -o yaml
}
create_nad() {
echo "Creating NetworkAttachmentDefinition (with bridge CNI)"
cat <<EOF | ${KUBECTL} apply -f -
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: ${SECONDARY_NETWORK_NAME}
namespace: ${NAMESPACE}
spec:
config: |
{
"cniVersion": "0.3.1",
"name": "${SECONDARY_NETWORK_NAME}",
"type": "bridge",
"bridge": "${SECONDARY_NETWORK_NAME}",
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "${SECONDARY_NETWORK_SUBNET}",
"rangeStart": "${SECONDARY_NETWORK_RANGE_START}",
"rangeEnd": "${SECONDARY_NETWORK_RANGE_END}",
"gateway": "${SECONDARY_NETWORK_GATEWAY}"
}
]
]
}
}
EOF
echo "Successfully created NetworkAttachmentDefinition:"
${KUBECTL} get networkattachmentdefinition.k8s.cni.cncf.io "${SECONDARY_NETWORK_NAME}" --namespace "${NAMESPACE}" -o yaml
}
cleanup() {
DOCKER_HOST=unix://${_cri_socket} ${KIND} delete cluster --name "${CLUSTER_NAME}"
${_cri_bin} network rm "${SECONDARY_NETWORK_NAME}"
}
usage() {
echo -n "$0 [--install-kind] [--install-kubectl] [--configure-inotify-limits] [--create-cluster] [--deploy-kubevirt] [--deploy-kubevirt-common-instancetypes] [--deploy-cnao] [--create-nad] [--cleanup] [--namespace]"
}
set_default_options() {
OPT_INSTALL_KIND=false
OPT_INSTALL_KUBECTL=false
OPT_CONFIGURE_INOTIFY_LIMITS=false
OPT_CREATE_CLUSTER=false
OPT_CONFIGURE_SECONDARY_NETWORK=false
OPT_DEPLOY_KUBEVIRT=false
OPT_DEPLOY_KUBEVIRT_COMMON_INSTANCETYPES=false
OPT_DEPLOY_CNAO=false
OPT_CREATE_NAD=false
OPT_CLEANUP=false
}
parse_args() {
while [ "$1" != "" ]; do
case "$1" in
--install-kind) OPT_INSTALL_KIND=true ;;
--install-kubectl) OPT_INSTALL_KUBECTL=true ;;
--configure-inotify-limits) OPT_CONFIGURE_INOTIFY_LIMITS=true ;;
--create-cluster) OPT_CREATE_CLUSTER=true ;;
--configure-secondary-network) OPT_CONFIGURE_SECONDARY_NETWORK=true ;;
--deploy-kubevirt) OPT_DEPLOY_KUBEVIRT=true ;;
--deploy-kubevirt-common-instancetypes) OPT_DEPLOY_KUBEVIRT_COMMON_INSTANCETYPES=true ;;
--deploy-cnao) OPT_DEPLOY_CNAO=true ;;
--create-nad) OPT_CREATE_NAD=true ;;
--cleanup) OPT_CLEANUP=true ;;
--namespace)
shift
NAMESPACE=$1
;;
-v | --verbose)
set -x
;;
--help)
usage
exit
;;
*)
usage
exit 1
;;
esac
shift
done
}
set_default_params
set_default_options
parse_args "$@"
# Detect the CRI to use, can be rootful podman or docker
detect_cri
set -euo pipefail
# Set defaults if no args were passed to script
if [ "${ARGCOUNT}" -eq "0" ]; then
OPT_INSTALL_KIND=true
OPT_INSTALL_KUBECTL=true
OPT_CREATE_CLUSTER=true
OPT_CONFIGURE_SECONDARY_NETWORK=true
OPT_DEPLOY_KUBEVIRT=true
OPT_DEPLOY_KUBEVIRT_COMMON_INSTANCETYPES=true
OPT_DEPLOY_CNAO=true
OPT_CREATE_NAD=true
fi
if [ "${OPT_CLEANUP}" == true ]; then
cleanup
exit 0
fi
if [ "${OPT_INSTALL_KIND}" == true ]; then
install_kind
fi
if [ "${OPT_INSTALL_KUBECTL}" == true ]; then
install_kubectl
fi
if [ "${OPT_CONFIGURE_INOTIFY_LIMITS}" == true ]; then
configure_inotify_limits
fi
if [ "${OPT_CREATE_CLUSTER}" == true ]; then
create_cluster
fi
if [ "${OPT_CONFIGURE_SECONDARY_NETWORK}" == true ]; then
configure_secondary_network
fi
if [ "${OPT_DEPLOY_KUBEVIRT}" == true ]; then
deploy_kubevirt
fi
if [ "${OPT_DEPLOY_KUBEVIRT_COMMON_INSTANCETYPES}" == true ]; then
deploy_kubevirt_common_instancetypes
fi
if [ "${OPT_DEPLOY_CNAO}" == true ]; then
deploy_cnao
fi
if [ "${OPT_CREATE_NAD}" == true ]; then
create_nad
fi

2
meta/runtime.yml Normal file
View File

@@ -0,0 +1,2 @@
---
requires_ansible: '>= 2.11.0' # Use '>= 2.9.10' instead, if needed

View File

@@ -0,0 +1,687 @@
# -*- coding: utf-8 -*-
# Copyright 2023 Red Hat, Inc.
# Based on the kubernetes.core.k8s inventory
# Apache License 2.0 (see LICENSE)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: kubevirt
short_description: KubeVirt inventory source
author:
- "KubeVirt Project (kubevirt.io)"
description:
- Fetch running VirtualMachineInstances for one or more namespaces with an optional label selector.
- Groups by namespace, namespace_vmis and labels.
- Uses the kubectl connection plugin to access the Kubernetes cluster.
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
extends_documentation_fragment:
- inventory_cache
- constructed
options:
plugin:
description: Token that ensures this is a source file for the "kubevirt" plugin.
required: True
choices: ["kubevirt", "kubernetes.kubevirt.kubevirt"]
host_format:
description:
- 'Specify the format of the host in the inventory group. Available specifiers: name, namespace, uid.'
default: "{namespace}-{name}"
connections:
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
suboptions:
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the Kubernetes client will attempt to load the default
configuration file from I(~/.kube/config). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
environment variable.
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable.
type: bool
aliases: [ verify_ssl ]
namespaces:
description:
- List of namespaces. If not specified, will fetch all VirtualMachineInstances for all namespaces
the user is authorized to access.
label_selector:
description:
- Define a label selector to select a subset of the fetched VirtualMachineInstances.
network_name:
description:
- In case multiple networks are attached to a VirtualMachineInstance, define which interface should
be returned as primary IP address.
aliases: [ interface_name ]
kube_secondary_dns:
description:
- Enable kubesecondarydns derived host names when using a secondary network interface.
type: bool
default: False
use_service:
description:
- Enable the use of services to establish an SSH connection to the VirtualMachine.
type: bool
default: True
api_version:
description:
- Specify the used KubeVirt API version.
default: "kubevirt.io/v1"
requirements:
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
"""
EXAMPLES = """
# Filename must end with kubevirt.[yml|yaml]
# Authenticate with token, and return all VirtualMachineInstances for all accessible namespaces
plugin: kubernetes.kubevirt.kubevirt
connections:
- host: https://192.168.64.4:8443
api_key: xxxxxxxxxxxxxxxx
validate_certs: false
# Use default config (~/.kube/config) file and active context, and return VirtualMachineInstances
# from namespace testing with interfaces connected to network bridge-network
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- testing
network_name: bridge-network
# Use default config (~/.kube/config) file and active context, and return VirtualMachineInstances
# from namespace testing with label app=test
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- testing
label_selector: app=test
# Use a custom config file, and a specific context.
plugin: kubernetes.kubevirt.kubevirt
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
"""
from dataclasses import dataclass
from json import loads
from typing import (
Any,
Dict,
List,
Optional,
Tuple,
Union,
)
import traceback
try:
from kubernetes.dynamic.resource import ResourceField
from kubernetes.dynamic.exceptions import DynamicApiError
except ImportError:
HAS_K8S = False
K8S_IMPORT_ERROR = traceback.format_exc()
else:
HAS_K8S = True
K8S_IMPORT_ERROR = None
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
HAS_K8S_MODULE_HELPER,
k8s_import_exception,
)
from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import (
get_api_client,
K8SClient,
)
LABEL_KUBEVIRT_IO_DOMAIN = "kubevirt.io/domain"
TYPE_LOADBALANCER = "LoadBalancer"
TYPE_NODEPORT = "NodePort"
class KubeVirtInventoryException(Exception):
pass
@dataclass
class GetVmiOptions:
"""
This class holds the options defined by the user.
"""
api_version: Optional[str] = None
label_selector: Optional[str] = None
network_name: Optional[str] = None
kube_secondary_dns: Optional[bool] = None
use_service: Optional[bool] = None
base_domain: Optional[str] = None
host_format: Optional[str] = None
def __post_init__(self):
# Set defaults in __post_init__ to allow instatiating class with None values
if self.api_version is None:
self.api_version = "kubevirt.io/v1"
if self.kube_secondary_dns is None:
self.kube_secondary_dns = False
if self.use_service is None:
self.use_service = True
if self.host_format is None:
self.host_format = "{namespace}-{name}"
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
"""
This class implements the actual inventory module.
"""
NAME = "kubernetes.kubevirt.kubevirt"
connection_plugin = "kubernetes.core.kubectl"
transport = "kubectl"
@staticmethod
def get_default_host_name(host: str) -> str:
"""
get_default_host_name strips URL schemes from the host name and
replaces invalid characters.
"""
return (
host.replace("https://", "")
.replace("http://", "")
.replace(".", "-")
.replace(":", "_")
)
@staticmethod
def format_dynamic_api_exc(exc: DynamicApiError) -> str:
"""
format_dynamic_api_exc tries to extract the message from the JSON body
of a DynamicApiError.
"""
if exc.body:
if exc.headers and exc.headers.get("Content-Type") == "application/json":
message = loads(exc.body).get("message")
if message:
return message
return exc.body
return f"{exc.status} Reason: {exc.reason}"
@staticmethod
def get_host_from_service(service: Dict, node_name: str) -> Optional[str]:
"""
get_host_from_service extracts the hostname to be used from the
passed in service.
"""
# LoadBalancer services can return a hostname or an IP address
if service["spec"]["type"] == TYPE_LOADBALANCER:
ingress = service["status"]["loadBalancer"].get("ingress")
if ingress is not None and len(ingress) > 0:
hostname = ingress[0].get("hostname")
ip_address = ingress[0].get("ip")
return hostname if hostname is not None else ip_address
# NodePort services use the node name as host
if service["spec"]["type"] == TYPE_NODEPORT:
return node_name
return None
@staticmethod
def get_port_from_service(service: Dict) -> Optional[str]:
"""
get_port_from_service extracts the port to be used from the
passed in service.
"""
# LoadBalancer services use the port attribute
if service["spec"]["type"] == TYPE_LOADBALANCER:
return service["spec"]["ports"][0]["port"]
# LoadBalancer services use the nodePort attribute
if service["spec"]["type"] == TYPE_NODEPORT:
return service["spec"]["ports"][0]["nodePort"]
return None
def __init__(self) -> None:
super().__init__()
self.host_format = None
def verify_file(self, path: str) -> None:
"""
verify_file ensures the inventory file is compatible with this plugin.
"""
return super().verify_file(path) and path.endswith(
("kubevirt.yml", "kubevirt.yaml")
)
def parse(self, inventory: Any, loader: Any, path: str, cache: bool = True) -> None:
"""
parse runs basic setup of the inventory.
"""
super().parse(inventory, loader, path)
cache_key = self._get_cache_prefix(path)
config_data = self._read_config_data(path)
self.host_format = config_data.get("host_format")
self.setup(config_data, cache, cache_key)
def setup(self, config_data: Dict, cache: bool, cache_key: str) -> None:
"""
setup checks for availability of the Kubernetes Python client,
gets the configured connections and runs fetch_objects on them.
If there is a cache it is returned instead.
"""
connections = config_data.get("connections")
if not HAS_K8S_MODULE_HELPER:
raise KubeVirtInventoryException(
"This module requires the Kubernetes Python client. "
+ f"Try `pip install kubernetes`. Detail: {k8s_import_exception}"
)
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
self.fetch_objects(connections)
def fetch_objects(self, connections: Dict) -> None:
"""
fetch_objects populates the inventory with every configured connection.
"""
if connections:
if not isinstance(connections, list):
raise KubeVirtInventoryException("Expecting connections to be a list.")
for connection in connections:
if not isinstance(connection, dict):
raise KubeVirtInventoryException(
"Expecting connection to be a dictionary."
)
client = get_api_client(**connection)
name = connection.get(
"name", self.get_default_host_name(client.configuration.host)
)
if connection.get("namespaces"):
namespaces = connection["namespaces"]
else:
namespaces = self.get_available_namespaces(client)
opts = GetVmiOptions(
connection.get("api_version"),
connection.get("label_selector"),
connection.get("network_name", connection.get("interface_name")),
connection.get("kube_secondary_dns"),
connection.get("use_service"),
connection.get("base_domain", self.get_cluster_domain(client)),
self.host_format,
)
for namespace in namespaces:
self.get_vmis_for_namespace(client, name, namespace, opts)
else:
client = get_api_client()
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
opts = GetVmiOptions(host_format=self.host_format)
for namespace in namespaces:
self.get_vmis_for_namespace(client, name, namespace, opts)
def get_cluster_domain(self, client: K8SClient) -> Optional[str]:
"""
get_cluster_domain tries to get the base domain of an OpenShift cluster.
"""
try:
v1_dns = client.resources.get(
api_version="config.openshift.io/v1", kind="DNS"
)
except Exception:
# If resource not found return None
return None
try:
obj = v1_dns.get(name="cluster")
except DynamicApiError as exc:
self.display.debug(
f"Failed to fetch cluster DNS config: {self.format_dynamic_api_exc(exc)}"
)
return None
return obj.get("spec", {}).get("baseDomain")
def get_available_namespaces(self, client: K8SClient) -> List:
"""
get_available_namespaces lists all namespaces accessible with the
configured credentials and returns them.
"""
v1_namespace = client.resources.get(api_version="v1", kind="Namespace")
try:
obj = v1_namespace.get()
except DynamicApiError as exc:
self.display.debug(exc)
raise KubeVirtInventoryException(
f"Error fetching Namespace list: {self.format_dynamic_api_exc(exc)}"
) from exc
return [namespace.metadata.name for namespace in obj.items]
def get_vmis_for_namespace(
self, client: K8SClient, name: str, namespace: str, opts: GetVmiOptions
) -> None:
"""
get_vmis_for_namespace lists all VirtualMachineInstances in a namespace
and adds groups and hosts to the inventory.
"""
vmi_client = client.resources.get(
api_version=opts.api_version, kind="VirtualMachineInstance"
)
try:
vmi_list = vmi_client.get(
namespace=namespace, label_selector=opts.label_selector
)
except DynamicApiError as exc:
self.display.debug(exc)
raise KubeVirtInventoryException(
f"Error fetching VirtualMachineInstance list: {self.format_dynamic_api_exc(exc)}"
) from exc
services = self.get_ssh_services_for_namespace(client, namespace)
namespace_group = f"namespace_{namespace}"
namespace_vmis_group = f"{namespace_group}_vmis"
name = self._sanitize_group_name(name)
namespace_group = self._sanitize_group_name(namespace_group)
namespace_vmis_group = self._sanitize_group_name(namespace_vmis_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_vmis_group)
self.inventory.add_child(namespace_group, namespace_vmis_group)
for vmi in vmi_list.items:
if not (vmi.status and vmi.status.interfaces):
continue
# Find interface by its name:
if opts.network_name is None:
interface = vmi.status.interfaces[0]
else:
interface = next(
(i for i in vmi.status.interfaces if i.name == opts.network_name),
None,
)
# If interface is not found or IP address is not reported skip this VM:
if interface is None or interface.ipAddress is None:
continue
vmi_name = opts.host_format.format(
namespace=vmi.metadata.namespace,
name=vmi.metadata.name,
uid=vmi.metadata.uid,
)
vmi_groups = []
vmi_annotations = (
{}
if not vmi.metadata.annotations
else self.__resource_field_to_dict(vmi.metadata.annotations)
)
if vmi.metadata.labels:
# create a group for each label_value
for key, value in vmi.metadata.labels:
group_name = f"label_{key}_{value}"
group_name = self._sanitize_group_name(group_name)
if group_name not in vmi_groups:
vmi_groups.append(group_name)
self.inventory.add_group(group_name)
vmi_labels = self.__resource_field_to_dict(vmi.metadata.labels)
else:
vmi_labels = {}
# Add vmi to the namespace group, and to each label_value group
self.inventory.add_host(vmi_name)
self.inventory.add_child(namespace_vmis_group, vmi_name)
for group in vmi_groups:
self.inventory.add_child(group, vmi_name)
# Set up the connection
self.inventory.set_variable(vmi_name, "ansible_connection", "ssh")
self.set_ansible_host_and_port(
vmi,
vmi_name,
interface.ipAddress,
services.get(vmi.metadata.labels.get(LABEL_KUBEVIRT_IO_DOMAIN)),
opts,
)
# Add hostvars from metadata
self.inventory.set_variable(vmi_name, "object_type", "vmi")
self.inventory.set_variable(vmi_name, "labels", vmi_labels)
self.inventory.set_variable(vmi_name, "annotations", vmi_annotations)
self.inventory.set_variable(
vmi_name, "cluster_name", vmi.metadata.clusterName
)
self.inventory.set_variable(
vmi_name, "resource_version", vmi.metadata.resourceVersion
)
self.inventory.set_variable(vmi_name, "uid", vmi.metadata.uid)
# Add hostvars from status
vmi_active_pods = (
{}
if not vmi.status.activePods
else self.__resource_field_to_dict(vmi.status.activePods)
)
self.inventory.set_variable(vmi_name, "vmi_active_pods", vmi_active_pods)
vmi_conditions = (
[]
if not vmi.status.conditions
else [self.__resource_field_to_dict(c) for c in vmi.status.conditions]
)
self.inventory.set_variable(vmi_name, "vmi_conditions", vmi_conditions)
vmi_guest_os_info = (
{}
if not vmi.status.guestOSInfo
else self.__resource_field_to_dict(vmi.status.guestOSInfo)
)
self.inventory.set_variable(
vmi_name, "vmi_guest_os_info", vmi_guest_os_info
)
vmi_interfaces = (
[]
if not vmi.status.interfaces
else [self.__resource_field_to_dict(i) for i in vmi.status.interfaces]
)
self.inventory.set_variable(vmi_name, "vmi_interfaces", vmi_interfaces)
self.inventory.set_variable(
vmi_name,
"vmi_launcher_container_image_version",
vmi.status.launcherContainerImageVersion,
)
self.inventory.set_variable(
vmi_name, "vmi_migration_method", vmi.status.migrationMethod
)
self.inventory.set_variable(
vmi_name, "vmi_migration_transport", vmi.status.migrationTransport
)
self.inventory.set_variable(vmi_name, "vmi_node_name", vmi.status.nodeName)
self.inventory.set_variable(vmi_name, "vmi_phase", vmi.status.phase)
vmi_phase_transition_timestamps = (
[]
if not vmi.status.phaseTransitionTimestamps
else [
self.__resource_field_to_dict(p)
for p in vmi.status.phaseTransitionTimestamps
]
)
self.inventory.set_variable(
vmi_name,
"vmi_phase_transition_timestamps",
vmi_phase_transition_timestamps,
)
self.inventory.set_variable(vmi_name, "vmi_qos_class", vmi.status.qosClass)
self.inventory.set_variable(
vmi_name,
"vmi_virtual_machine_revision_name",
vmi.status.virtualMachineRevisionName,
)
vmi_volume_status = (
[]
if not vmi.status.volumeStatus
else [self.__resource_field_to_dict(v) for v in vmi.status.volumeStatus]
)
self.inventory.set_variable(
vmi_name, "vmi_volume_status", vmi_volume_status
)
def get_ssh_services_for_namespace(self, client: K8SClient, namespace: str) -> Dict:
"""
get_ssh_services_for_namespace retrieves all services of a namespace exposing port 22/ssh.
The services are mapped to the name of the corresponding domain.
"""
v1_service = client.resources.get(api_version="v1", kind="Service")
try:
service_list = v1_service.get(
namespace=namespace,
)
except DynamicApiError as exc:
self.display.debug(exc)
raise KubeVirtInventoryException(
f"Error fetching Service list: {self.format_dynamic_api_exc(exc)}"
) from exc
services = {}
for service in service_list.items:
# Continue if service is not of type LoadBalancer or NodePort
if service.get("spec", {}).get("type") not in (
TYPE_LOADBALANCER,
TYPE_NODEPORT,
):
continue
# Continue if ports are not defined, there are more than one port mapping
# or the target port is not port 22/ssh
ports = service["spec"].get("ports")
if ports is None or len(ports) != 1 or ports[0].get("targetPort") != 22:
continue
# Only add the service to the dict if the domain selector is present
domain = service["spec"].get("selector", {}).get(LABEL_KUBEVIRT_IO_DOMAIN)
if domain is not None:
services[domain] = service
return services
def set_ansible_host_and_port(
self,
vmi: Dict,
vmi_name: str,
ip_address: str,
service: Optional[Dict],
opts: GetVmiOptions,
) -> None:
"""
set_ansible_host_and_port sets the ansible_host and possibly the ansible_port var.
Secondary interfaces have priority over a service exposing SSH
"""
ansible_host = None
if opts.kube_secondary_dns and opts.network_name is not None:
# Set ansible_host to the kubesecondarydns derived host name if enabled
# See https://github.com/kubevirt/kubesecondarydns#parameters
ansible_host = (
f"{opts.network_name}.{vmi.metadata.name}.{vmi.metadata.namespace}.vm"
)
if opts.base_domain is not None:
ansible_host += f".{opts.base_domain}"
elif opts.use_service and service is not None:
# Set ansible_host and ansible_port to the host and port from the LoadBalancer
# or NodePort service exposing SSH
host = self.get_host_from_service(service, vmi.status.nodeName)
port = self.get_port_from_service(service)
if host is not None and port is not None:
ansible_host = host
self.inventory.set_variable(vmi_name, "ansible_port", port)
# Default to the IP address of the interface if ansible_host was not set prior
if ansible_host is None:
ansible_host = ip_address
self.inventory.set_variable(vmi_name, "ansible_host", ansible_host)
def __resource_field_to_dict(
self, field: Union[Dict, List, ResourceField, Tuple]
) -> Dict:
"""
Replace this with ResourceField.to_dict() once available in a stable release of
the Kubernetes Python client
See
https://github.com/kubernetes-client/python/blob/main/kubernetes/base/dynamic/resource.py#L393
"""
if isinstance(field, ResourceField):
return {
k: self.__resource_field_to_dict(v) for k, v in field.__dict__.items()
}
if isinstance(field, (list, tuple)):
return [self.__resource_field_to_dict(item) for item in field]
return field

View File

@@ -0,0 +1,427 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2023 Red Hat, Inc.
# Based on the kubernetes.core.k8s module
# Apache License 2.0 (see LICENSE)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: kubevirt_vm
short_description: Create or delete KubeVirt VirtualMachines on Kubernetes
author:
- "KubeVirt Project (kubevirt.io)"
description:
- Use the Kubernetes Python client to perform create or delete operations on KubeVirt VirtualMachines.
- Pass options to create the VirtualMachine as module arguments.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- kubernetes.core.k8s_auth_options
- kubernetes.core.k8s_state_options
- kubernetes.core.k8s_delete_options
options:
api_version:
description:
- Use this to set the API version of KubeVirt.
type: str
default: kubevirt.io/v1
name:
description:
- Specify the name of the VirtualMachine.
- This option is ignored when I(state) is not set to C(present).
- mutually exclusive with C(generate_name).
type: str
generate_name:
description:
- Specify the basis of the VirtualMachine name and random characters will be added automatically on server to
generate a unique name.
- Only used when I(state=present).
- mutually exclusive with C(name).
type: str
namespace:
description:
- Specify the namespace of the VirtualMachine.
type: str
required: yes
annotations:
description:
- Specify annotations to set on the VirtualMachine.
- Only used when I(state=present).
type: dict
labels:
description:
- Specify labels to set on the VirtualMachine.
type: dict
running:
description:
- Specify whether the VirtualMachine should be running.
type: bool
default: yes
termination_grace_period:
description:
- Specify the termination grace period of the VirtualMachine to provide
time for shutting down the guest.
type: int
default: 180
instancetype:
description:
- Specify the instancetype of the VirtualMachine.
- Only used when I(state=present).
type: str
preference:
description:
- Specify the preference of the VirtualMachine.
- Only used when I(state=present).
type: str
infer_from_volume:
description:
- Specify volumes to infer an instancetype or a preference from.
- Only used when I(state=present).
type: dict
suboptions:
instancetype:
description:
- Name of the volume to infer the instancetype from.
type: str
preference:
description:
- Name of the volume to infer the preference from.
type: str
clear_revision_name:
description:
- Specify to clear the revision name of the instancetype or preference.
- Only used when I(state=present).
type: dict
suboptions:
instancetype:
description:
- Clear the revision name of the instancetype.
type: bool
default: no
preference:
description:
- Clear the revision name of the preference.
type: bool
default: no
interfaces:
description:
- Specify the interfaces of the VirtualMachine.
- 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_interface'
type: list
elements: 'dict'
networks:
description:
- Specify the networks of the VirtualMachine.
- 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_network'
type: list
elements: 'dict'
volumes:
description:
- Specify the volumes of the VirtualMachine.
- 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_volume'
type: list
elements: 'dict'
wait:
description:
- Whether to wait for the VirtualMachine to end up in the ready state.
type: bool
default: no
wait_sleep:
description:
- Number of seconds to sleep between checks.
- Ignored if C(wait) is not set.
default: 5
type: int
wait_timeout:
description:
- How long in seconds to wait for the resource to end up in the desired state.
- Ignored if C(wait) is not set.
default: 120
type: int
requirements:
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
- "jsonpatch"
- "jinja2"
"""
EXAMPLES = """
- name: Create a VirtualMachine
kubernetes.kubevirt.kubevirt_vm:
state: present
name: testvm
namespace: default
labels:
app: test
instancetype: u1.medium
preference: fedora
interfaces:
- name: default
masquerade: {}
- name: bridge-network
bridge: {}
networks:
- name: default
pod: {}
- name: bridge-network
multus:
networkName: kindexgw
volumes:
- containerDisk:
image: quay.io/containerdisks/fedora:latest
name: containerdisk
- cloudInitNoCloud:
userData: |-
#cloud-config
# The default username is: fedora
ssh_authorized_keys:
- ssh-ed25519 AAAA...
name: cloudinit
- name: Delete a VirtualMachine
kubernetes.kubevirt.kubevirt_vm:
name: testvm
namespace: default
state: absent
"""
RETURN = """
result:
description:
- The created object. Will be empty in the case of a deletion.
type: complex
returned: success
contains:
changed:
description: Whether the VirtualMachine was changed
type: bool
sample: True
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
method:
description: Method executed on the Kubernetes API.
returned: success
type: str
"""
from copy import deepcopy
from typing import Dict
import traceback
from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import (
AnsibleModule,
)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
AUTH_ARG_SPEC,
COMMON_ARG_SPEC,
DELETE_OPTS_ARG_SPEC,
)
from ansible_collections.kubernetes.core.plugins.module_utils.k8s import (
runner,
)
from ansible_collections.kubernetes.core.plugins.module_utils.k8s.core import (
AnsibleK8SModule,
)
from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import (
CoreException,
)
try:
import yaml
except ImportError:
HAS_YAML = False
YAML_IMPORT_ERROR = traceback.format_exc()
else:
HAS_YAML = True
YAML_IMPORT_ERROR = None
try:
from jinja2 import Environment
except ImportError:
HAS_JINJA = False
JINJA_IMPORT_ERROR = traceback.format_exc()
else:
HAS_JINJA = True
JINJA_IMPORT_ERROR = None
VM_TEMPLATE = """
apiVersion: {{ api_version }}
kind: VirtualMachine
metadata:
{% if name %}
name: "{{ name }}"
{% endif %}
{% if generate_name %}
generateName: "{{ generate_name }}"
{% endif %}
namespace: "{{ namespace }}"
{% if annotations %}
annotations:
{{ annotations | to_yaml | indent(4) }}
{%- endif %}
{% if labels %}
labels:
{{ labels | to_yaml | indent(4) }}
{%- endif %}
spec:
{% if instancetype or infer_from_volume.instancetype %}
instancetype:
{% if instancetype %}
name: "{{ instancetype }}"
{% endif %}
{% if infer_from_volume.instancetype %}
inferFromVolume: "{{ infer_from_volume.instancetype }}"
{% endif %}
{% if clear_revision_name.instancetype %}
revisionName: ""
{% endif %}
{% endif %}
{% if preference or infer_from_volume.preference %}
preference:
{% if preference %}
name: "{{ preference }}"
{% endif %}
{% if infer_from_volume.preference %}
inferFromVolume: "{{ infer_from_volume.preference }}"
{% endif %}
{% if clear_revision_name.preference %}
revisionName: ""
{% endif %}
{% endif %}
running: {{ running }}
template:
{% if annotations or labels %}
metadata:
{% if annotations %}
annotations:
{{ annotations | to_yaml | indent(8) }}
{%- endif %}
{% if labels %}
labels:
{{ labels | to_yaml | indent(8) }}
{%- endif %}
{% endif %}
spec:
domain:
{% if interfaces %}
devices:
interfaces:
{{ interfaces | to_yaml | indent(10) }}
{%- else %}
devices: {}
{% endif %}
{% if networks %}
networks:
{{ networks | to_yaml | indent(6) }}
{%- endif %}
{% if volumes %}
volumes:
{{ volumes | to_yaml | indent(6) }}
{%- endif %}
terminationGracePeriodSeconds: {{ termination_grace_period }}
"""
def render_template(params: Dict) -> str:
"""
render_template uses Jinja2 to render the VM_TEMPLATE into a string.
"""
env = Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True)
env.filters["to_yaml"] = lambda data, *_, **kw: yaml.dump(
data, allow_unicode=True, default_flow_style=False, **kw
)
template = env.from_string(VM_TEMPLATE.strip())
return template.render(params)
def arg_spec() -> Dict:
"""
arg_spec defines the argument spec of this module.
"""
spec = {
"api_version": {"default": "kubevirt.io/v1"},
"name": {},
"generate_name": {},
"namespace": {"required": True},
"annotations": {"type": "dict"},
"labels": {"type": "dict"},
"running": {"type": "bool", "default": True},
"termination_grace_period": {"type": "int", "default": 180},
"instancetype": {},
"preference": {},
"infer_from_volume": {
"type": "dict",
"options": {"instancetype": {}, "preference": {}},
},
"clear_revision_name": {
"type": "dict",
"options": {
"instancetype": {"type": "bool", "default": False},
"preference": {"type": "bool", "default": False},
},
},
"interfaces": {"type": "list", "elements": "dict"},
"networks": {"type": "list", "elements": "dict"},
"volumes": {"type": "list", "elements": "dict"},
"wait": {"type": "bool", "default": False},
"wait_sleep": {"type": "int", "default": 5},
"wait_timeout": {"type": "int", "default": 120},
}
spec.update(deepcopy(AUTH_ARG_SPEC))
spec.update(deepcopy(COMMON_ARG_SPEC))
spec["delete_options"] = {
"type": "dict",
"default": None,
"options": deepcopy(DELETE_OPTS_ARG_SPEC),
}
return spec
def main() -> None:
"""
main instantiates the AnsibleK8SModule, creates the resource
definition and runs the module.
"""
module = AnsibleK8SModule(
module_class=AnsibleModule,
argument_spec=arg_spec(),
mutually_exclusive=[
("name", "generate_name"),
],
required_one_of=[
("name", "generate_name"),
],
required_together=[("interfaces", "networks")],
supports_check_mode=True,
)
# Set resource_definition to our rendered template
module.params["resource_definition"] = render_template(module.params)
# Set wait_condition to allow waiting for the ready state of the VirtualMachine
module.params["wait_condition"] = {"type": "Ready", "status": True}
try:
runner.run_module(module)
except CoreException as exc:
module.fail_from_exception(exc)
if __name__ == "__main__":
main()

4
requirements.txt Normal file
View File

@@ -0,0 +1,4 @@
kubernetes>=12.0.0
PyYaml
jsonpatch
jinja2

3
requirements.yml Normal file
View File

@@ -0,0 +1,3 @@
collections:
- name: kubernetes.core
version: '>=2.0.0'

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -eux
source virtualenv.sh
pip install kubernetes PyYAML jsonpatch Jinja2
./server.py &
cleanup() {
kill -9 "$(jobs -p)"
}
trap cleanup INT TERM EXIT
# Fake auth file
mkdir -p ~/.kube/
cat <<EOF > ~/.kube/config
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: http://localhost:12345
name: development
contexts:
- context:
cluster: development
user: developer
name: dev-frontend
current-context: dev-frontend
kind: Config
preferences: {}
users:
- name: developer
user:
token: ZDNg7LzSlp8a0u0fht_tRnPMTOjxqgJGCyi_iy0ecUw
EOF
#################################################
# RUN THE PLUGIN
#################################################
# run the plugin second
export ANSIBLE_INVENTORY_ENABLED=kubernetes.kubevirt.kubevirt
cat << EOF > "$OUTPUT_DIR/test.kubevirt.yml"
plugin: kubernetes.kubevirt.kubevirt
connections:
- namespaces:
- default
EOF
ansible-inventory -vvvv -i "$OUTPUT_DIR/test.kubevirt.yml" --list --output="$OUTPUT_DIR/plugin.out"
#################################################
# DIFF THE RESULTS
#################################################
diff "$(pwd)/test.out" "$OUTPUT_DIR/plugin.out"

View File

@@ -0,0 +1,322 @@
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import os
from http import HTTPStatus
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
from threading import Thread
from urllib.parse import urlparse
class TestHandler(SimpleHTTPRequestHandler):
# Path handlers:
handlers = {}
def log_message(self, format, *args):
"""
Empty method, so we don't mix output of HTTP server with tests
"""
def do_GET(self):
params = urlparse(self.path)
if params.path in self.handlers:
self.handlers[params.path](self)
else:
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
params = urlparse(self.path)
if params.path in self.handlers:
self.handlers[params.path](self)
else:
self.send_error(HTTPStatus.NOT_FOUND)
class TestServer:
# The host and port and path used by the embedded tests web server:
PORT = None
# The embedded web server:
_httpd = None
# Thread for http server:
_thread = None
def set_json_response(self, path, code, body):
def _handle_request(handler):
handler.send_response(code)
handler.send_header("Content-Type", "application/json")
handler.end_headers()
data = json.dumps(body, ensure_ascii=False).encode("utf-8")
handler.wfile.write(data)
TestHandler.handlers[path] = _handle_request
def start_server(self, host="localhost"):
self._httpd = HTTPServer((host, 12345), TestHandler)
self._thread = Thread(target=self._httpd.serve_forever)
self._thread.start()
def stop_server(self):
self._httpd.shutdown()
self._thread.join()
if __name__ == "__main__":
print(os.getpid())
server = TestServer()
server.start_server()
server.set_json_response(path="/version", code=200, body={})
server.set_json_response(
path="/api",
code=200,
body={
"kind": "APIVersions",
"versions": ["v1"],
"serverAddressByClientCIDRs": [
{"clientCIDR": "0.0.0.0/0", "serverAddress": "localhost:12345"}
],
},
)
server.set_json_response(
path="/api/v1",
code=200,
body={
"kind": "APIResourceList",
"groupVersion": "v1",
"resources": [
{
"name": "services",
"singularName": "service",
"namespaced": True,
"kind": "Service",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch",
],
"shortNames": ["svc"],
}
],
},
)
server.set_json_response(
path="/api/v1/namespaces/default/services",
code=200,
body={
"kind": "ServiceList",
"groupVersion": "v1",
"items": [],
},
)
server.set_json_response(
path="/apis",
code=200,
body={
"kind": "APIGroupList",
"apiVersion": "v1",
"groups": [
{
"name": "kubevirt.io",
"versions": [{"groupVersion": "kubevirt.io/v1", "version": "v1"}],
"preferredVersion": {
"groupVersion": "kubevirt.io/v1",
"version": "v1",
},
}
],
},
)
server.set_json_response(
path="/apis/kubevirt.io/v1",
code=200,
body={
"kind": "APIResourceList",
"apiVersion": "v1",
"groupVersion": "kubevirt.io/v1",
"resources": [
{
"name": "virtualmachineinstances",
"singularName": "virtualmachineinstance",
"namespaced": True,
"kind": "VirtualMachineInstance",
"verbs": [
"delete",
"deletecollection",
"get",
"list",
"patch",
"create",
"update",
"watch",
],
"shortNames": ["vmi", "vmis"],
}
],
},
)
server.set_json_response(
path="/apis/kubevirt.io/v1/namespaces/default/virtualmachineinstances",
code=200,
body={
"apiVersion": "v1",
"items": [
{
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachineInstance",
"metadata": {
"annotations": {
"kubevirt.io/latest-observed-api-version": "v1",
"kubevirt.io/storage-observed-api-version": "v1alpha3",
},
"creationTimestamp": "2022-09-14T13:43:36Z",
"finalizers": [
"kubevirt.io/virtualMachineControllerFinalize",
"foregroundDeleteVirtualMachine",
],
"generation": 9,
"labels": {
"kubevirt.io/nodeName": "node01",
"kubevirt.io/vm": "vm-cirros",
},
"name": "vm-cirros",
"namespace": "default",
"ownerReferences": [
{
"apiVersion": "kubevirt.io/v1",
"blockOwnerDeletion": True,
"controller": True,
"kind": "VirtualMachine",
"name": "vm-cirros",
"uid": "4d1b1438-91ba-4c75-a211-566fc50a06f5",
}
],
"resourceVersion": "5387",
"uid": "7b3a8d94-bd7e-4c14-818a-89228172e4f1",
},
"spec": {
"domain": {
"cpu": {
"cores": 1,
"model": "host-model",
"sockets": 1,
"threads": 1,
},
"devices": {
"disks": [
{
"disk": {"bus": "virtio"},
"name": "containerdisk",
},
{
"disk": {"bus": "virtio"},
"name": "cloudinitdisk",
},
],
"interfaces": [{"bridge": {}, "name": "default"}],
},
"features": {"acpi": {"enabled": True}},
"firmware": {
"uuid": "0d2a2043-41c0-59c3-9b17-025022203668"
},
"machine": {"type": "q35"},
"resources": {"requests": {"memory": "128Mi"}},
},
"networks": [{"name": "default", "pod": {}}],
"terminationGracePeriodSeconds": 0,
"volumes": [
{
"containerDisk": {
"image": "registry:5000/kubevirt/cirros-container-disk-demo:devel",
"imagePullPolicy": "IfNotPresent",
},
"name": "containerdisk",
},
{
"cloudInitNoCloud": {
"userData": "#!/bin/sh\n\necho 'printed from cloud-init userdata'\n"
},
"name": "cloudinitdisk",
},
],
},
"status": {
"activePods": {
"a9a6c31b-8574-46f9-8bec-70ff091c3d97": "node01"
},
"conditions": [
{
"lastProbeTime": None,
"lastTransitionTime": "2022-09-14T13:43:39Z",
"status": "True",
"type": "Ready",
},
{
"lastProbeTime": None,
"lastTransitionTime": None,
"message": "cannot migrate VMI which does not use masquerade to connect to the pod network",
"reason": "InterfaceNotLiveMigratable",
"status": "False",
"type": "LiveMigratable",
},
],
"guestOSInfo": {},
"interfaces": [
{
"infoSource": "domain",
"ipAddress": "10.244.196.152",
"ipAddresses": ["10.244.196.152", "fd10:244::c497"],
"mac": "96:13:92:4f:05:d3",
"name": "default",
"queueCount": 1,
}
],
"launcherContainerImageVersion":
"registry:5000/kubevirt/virt-launcher@sha256:5c1474d240488c9a8e6e6e48b2ad446113744353b4cd2464baee3550e6b1829d",
"migrationMethod": "BlockMigration",
"migrationTransport": "Unix",
"nodeName": "node01",
"phase": "Running",
"phaseTransitionTimestamps": [
{
"phase": "Pending",
"phaseTransitionTimestamp": "2022-09-14T13:43:36Z",
},
{
"phase": "Scheduling",
"phaseTransitionTimestamp": "2022-09-14T13:43:36Z",
},
{
"phase": "Scheduled",
"phaseTransitionTimestamp": "2022-09-14T13:43:39Z",
},
{
"phase": "Running",
"phaseTransitionTimestamp": "2022-09-14T13:43:40Z",
},
],
"qosClass": "Burstable",
"runtimeUser": 0,
"virtualMachineRevisionName": "revision-start-vm-4d1b1438-91ba-4c75-a211-566fc50a06f5-9",
"volumeStatus": [
{"name": "cloudinitdisk", "size": 1048576, "target": "vdb"},
{"name": "containerdisk", "target": "vda"},
],
},
}
],
"kind": "List",
"metadata": {"resourceVersion": "", "selfLink": ""},
},
)

View File

@@ -0,0 +1,124 @@
{
"_meta": {
"hostvars": {
"default-vm-cirros": {
"annotations": {
"kubevirt.io/latest-observed-api-version": "v1",
"kubevirt.io/storage-observed-api-version": "v1alpha3"
},
"ansible_connection": "ssh",
"ansible_host": "10.244.196.152",
"cluster_name": null,
"labels": {
"kubevirt.io/nodeName": "node01",
"kubevirt.io/vm": "vm-cirros"
},
"object_type": "vmi",
"resource_version": "5387",
"uid": "7b3a8d94-bd7e-4c14-818a-89228172e4f1",
"vmi_active_pods": {
"a9a6c31b-8574-46f9-8bec-70ff091c3d97": "node01"
},
"vmi_conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2022-09-14T13:43:39Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": null,
"message": "cannot migrate VMI which does not use masquerade to connect to the pod network",
"reason": "InterfaceNotLiveMigratable",
"status": "False",
"type": "LiveMigratable"
}
],
"vmi_guest_os_info": {},
"vmi_interfaces": [
{
"infoSource": "domain",
"ipAddress": "10.244.196.152",
"ipAddresses": [
"10.244.196.152",
"fd10:244::c497"
],
"mac": "96:13:92:4f:05:d3",
"name": "default",
"queueCount": 1
}
],
"vmi_launcher_container_image_version": "registry:5000/kubevirt/virt-launcher@sha256:5c1474d240488c9a8e6e6e48b2ad446113744353b4cd2464baee3550e6b1829d",
"vmi_migration_method": "BlockMigration",
"vmi_migration_transport": "Unix",
"vmi_node_name": "node01",
"vmi_phase": "Running",
"vmi_phase_transition_timestamps": [
{
"phase": "Pending",
"phaseTransitionTimestamp": "2022-09-14T13:43:36Z"
},
{
"phase": "Scheduling",
"phaseTransitionTimestamp": "2022-09-14T13:43:36Z"
},
{
"phase": "Scheduled",
"phaseTransitionTimestamp": "2022-09-14T13:43:39Z"
},
{
"phase": "Running",
"phaseTransitionTimestamp": "2022-09-14T13:43:40Z"
}
],
"vmi_qos_class": "Burstable",
"vmi_virtual_machine_revision_name": "revision-start-vm-4d1b1438-91ba-4c75-a211-566fc50a06f5-9",
"vmi_volume_status": [
{
"name": "cloudinitdisk",
"size": 1048576,
"target": "vdb"
},
{
"name": "containerdisk",
"target": "vda"
}
]
}
}
},
"all": {
"children": [
"ungrouped",
"localhost_12345",
"label_kubevirt_io_nodeName_node01",
"label_kubevirt_io_vm_vm_cirros"
]
},
"label_kubevirt_io_nodeName_node01": {
"hosts": [
"default-vm-cirros"
]
},
"label_kubevirt_io_vm_vm_cirros": {
"hosts": [
"default-vm-cirros"
]
},
"localhost_12345": {
"children": [
"namespace_default"
]
},
"namespace_default": {
"children": [
"namespace_default_vmis"
]
},
"namespace_default_vmis": {
"hosts": [
"default-vm-cirros"
]
}
}