+
+
+

Welcome to Kubevirt Collection documentation

+
+

Pick collection version:

+
    diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..1faf986 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import datetime +import os +import sys +sys.path.insert(0, os.path.abspath('../plugins/module_utils/')) +# sys.path.insert(0, os.path.abspath('.')) + +# -- Project information ----------------------------------------------------- + +project = 'Kubevirt Ansible Collection' +copyright = '{y} Red Hat, Inc.'.format(y=datetime.date.today().year) +author = 'Red Hat, Inc.' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'myst_parser', + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'ansible_basic_sphinx_ext', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.tmp'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +highlight_language = 'YAML+Jinja' + +# -- Options for HTML output ------------------------------------------------- +html_theme_path = ['_themes'] +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'KubevirtCollectionDoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'KubevirtCollection.tex', 'Red Hat Kubevirt Ansible Collection Documentation', + 'Red Hat, Inc.', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'KubevirtCollection', 'Red Hat Kubevirt Ansible Collection Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'KubevirtCollection', 'Red Hat Kubevirt Ansible Collection Documentation', + author, 'KubevirtCollection', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'python': ('https://docs.python.org/2', None), 'ansible': ('https://docs.ansible.com/ansible/latest/', None)} diff --git a/docs/developing.md b/docs/developing.md new file mode 100644 index 0000000..ca04283 --- /dev/null +++ b/docs/developing.md @@ -0,0 +1,13 @@ +## Contributor's Guidelines + +- All YAML files named with `.yml` extension +- Use spaces around jinja variables. `{{ var }}` over `{{var}}` +- Variables that are internal to the role should be lowercase and start with the role name +- Keep roles self contained - Roles should avoid including tasks from other roles when possible +- Plays should do nothing more than include a list of roles except where `pre_tasks` and `post_tasks` are required when possible +- Separators - Use valid name, ie. underscores (e.g. `my_role` `my_playbook`) not dashes (`my-role`) +- Paths - When defining paths, do not include trailing slashes (e.g. `my_path: /foo` not `my_path: /foo/`). When concatenating paths, follow the same convention (e.g. `{{ my_path }}/bar` not `{{ my_path }}bar`) +- Indentation - Use 2 spaces for each indent +- `vars/` vs `defaults/` - internal or interpolated variables that don't need to change or be overridden by user go in `vars/`, those that a user would likely override, go under `defaults/` directory +- All arguments have a specification in `meta/argument_specs.yml` +- All playbooks/roles should be focused on compatibility with Ansible Automation Platform diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml new file mode 100644 index 0000000..d760eb5 --- /dev/null +++ b/docs/docsite/links.yml @@ -0,0 +1,45 @@ +--- +# This will make sure that plugin and module documentation gets Edit on GitHub links +# that allow users to directly create a PR for this plugin or module in GitHub's UI. +# Remove this section if the collection repository is not on GitHub, or if you do not want this +# functionality for your collection. +edit_on_github: + repository: ansible-collections/community.REPO_NAME + branch: main + # If your collection root (the directory containing galaxy.yml) does not coincide with your + # repository's root, you have to specify the path to the collection root here. For example, + # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME + # in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'. + path_prefix: '' + +# Here you can add arbitrary extra links. Please keep the number of links down to a +# minimum! Also please keep the description short, since this will be the text put on +# a button. +# +# Also note that some links are automatically added from information in galaxy.yml. +# The following are automatically added: +# 1. A link to the issue tracker (if `issues` is specified); +# 2. A link to the homepage (if `homepage` is specified and does not equal the +# `documentation` or `repository` link); +# 3. A link to the collection's repository (if `repository` is specified). + +extra_links: + - description: Report an issue + url: https://github.com/ansible-collections/community.REPO_NAME/issues/new/choose + +# Specify communication channels for your collection. We suggest to not specify more +# than one place for communication per communication tool to avoid confusion. +communication: + matrix_rooms: + - topic: General usage and support questions + room: '#users:ansible.im' + irc_channels: + - topic: General usage and support questions + network: Libera + channel: '#ansible' + mailing_lists: + - topic: Ansible Project List + url: https://groups.google.com/g/ansible-project + # You can also add a `subscribe` field with an URI that allows to subscribe + # to the mailing list. For lists on https://groups.google.com/ a subscribe link is + # automatically generated. diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..5058f92 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,26 @@ +.. Red Hat kubernetes kubevirt Ansible Collection documentation main file + +Welcome to Kubevirt Collection documentation +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: User documentation + + README + plugins/index + roles/index + +.. toctree:: + :maxdepth: 2 + :caption: Developer documentation + + testing + developing + releasing + +.. toctree:: + :maxdepth: 2 + :caption: General + + Changelog diff --git a/docs/releasing.md b/docs/releasing.md new file mode 100644 index 0000000..537ba1e --- /dev/null +++ b/docs/releasing.md @@ -0,0 +1,61 @@ +# Collection Versioning Strategy + +Each supported collection maintained by Ansible follows Semantic Versioning 2.0.0 (https://semver.org/), for example: +Given a version number MAJOR.MINOR.PATCH, the following is incremented: + +MAJOR version: when making incompatible API changes (see Feature Release scenarios below for examples) + +MINOR version: when adding features or functionality in a backwards compatible manner, or updating testing matrix and/or metadata (deprecation) + +PATCH version: when adding backwards compatible bug fixes or security fixes (strict). + +Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. + +The first version of a generally available supported collection on Ansible Automation Hub shall be version 1.0.0. NOTE: By default, all newly created collections may begin with a smaller default version of 0.1.0, and therefore a version of 1.0.0 should be explicitly stated by the collection maintainer. + +## New content is added to an existing collection + +Assuming the current release is 1.0.0, and a new module is ready to be added to the collection, the minor version would be incremented to 1.1.0. The change in the MINOR version indicates an additive change was made while maintaining backward compatibility for existing content within the collection. + + +## New feature to existing plugin or role within a collection (backwards compatible) + +Assuming the current release is 1.0.0, and new features for an existing module are ready for release . We would increment the MINOR version to 1.1.0. The change in the MINOR version indicates an additive change was made while maintaining backward compatibility for existing content within the collection. + + +## Bug fix or security fix to existing content within a collection + +Assuming the current release is 1.0.0 and a bug is fixed prior to the next minor release, the PATCH version would be incremented to 1.0.1. The patch indicates only a bug was fixed within a current version. The PATCH release does not contain new content, nor was functionality removed. Bug fixes may be included in a MINOR or MAJOR feature release if the timing allows, eliminating the need for a PATCH dedicated to the fix. + + +## Breaking change to any content within a collection + +Assuming the current release is 1.0.0, and a breaking change (API or module) is introduced for a user or developer. The MAJOR version would be incremented to 2.0.0. + +Examples of breaking changes within a collection may include but are not limited to: + + - Argspec changes for a module that require either inventory structure or playbook changes. + - A change in the shape of either the inbound or returned payload of a filter plugin. + - Changes to a connection plugin that require additional inventory parameters or ansible.cfg entries. + - New functionality added to a module that changes the outcome of that module as released in previous versions. + - The removal of plugins from a collection. + + +## Content removed from a collection + +Deleting a module or API is a breaking change. Please see the 'Breaking change' section for how to version this. + + +## A typographical error was fixed in the documentation for a collection + +A correction to the README would be considered a bug fix and the PATCH incremented. See 'Bug fix' above. + + +## Documentation added/removed/modified within a collection + +Only the PATCH version should be increased for a release that contains changes limited to revised documentation. + + +## Release automation + +New releases are triggered by annotated git tags named after semantic versioning. The automation publishes the built artifacts to ansible-galaxy and github releases page. diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..c43be53 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,7 @@ +antsibull>=0.17.0 +antsibull-docs +antsibull-changelog +ansible-core>=2.14.1 +sphinx-rtd-theme +git+https://github.com/felixfontein/ansible-basic-sphinx-ext +myst-parser diff --git a/docs/roles.rst.template b/docs/roles.rst.template new file mode 100644 index 0000000..52dfdcd --- /dev/null +++ b/docs/roles.rst.template @@ -0,0 +1,4 @@ +Role Index +========== + +.. toctree:: diff --git a/docs/testing.md b/docs/testing.md new file mode 100644 index 0000000..f9343f2 --- /dev/null +++ b/docs/testing.md @@ -0,0 +1,35 @@ +# Testing + +## Continuous integration + +The collection is tested with a [molecule](https://github.com/ansible-community/molecule) setup covering the included roles and verifying correct installation and idempotency. +In order to run the molecule tests locally with python 3.9 available, after cloning the repository: + +``` +pip install yamllint 'molecule[docker]~=3.5.2' ansible-core flake8 ansible-lint voluptuous +molecule test --all +``` + +## Test playbooks + +Sample playbooks are provided in the `playbooks/` directory; to run the playbooks locally (requires a rhel system with python 3.9+, ansible, and systemd) the steps are as follows: + +``` +# setup environment +pip install ansible-core +# clone the repository +git clone https://github.com/ansible-middleware/amq +cd amq +# install collection dependencies +ansible-galaxy collection install -r requirements.yml +# install collection python deps +pip install -r requirements.txt +# create inventory for localhost +cat << EOF > inventory +[amq] +localhost ansible_connection=local +EOF +# run the playbook +ansible-playbook -i inventory playbooks/activemq.yml +``` + diff --git a/examples/inventory.kubevirt.yml b/examples/inventory.kubevirt.yml new file mode 100644 index 0000000..d3b0e09 --- /dev/null +++ b/examples/inventory.kubevirt.yml @@ -0,0 +1,6 @@ +plugin: kubernetes.kubevirt.kubevirt +connections: + - namespaces: + - default + network_name: bridge-network + label_selector: app=test diff --git a/examples/kubesecondarydns.kubevirt.yml b/examples/kubesecondarydns.kubevirt.yml new file mode 100644 index 0000000..94e5747 --- /dev/null +++ b/examples/kubesecondarydns.kubevirt.yml @@ -0,0 +1,6 @@ +plugin: kubernetes.kubevirt.kubevirt +connections: + - namespaces: + - default + network_name: bridge-network + kube_secondary_dns: yes diff --git a/examples/play-create.yml b/examples/play-create.yml new file mode 100644 index 0000000..441f380 --- /dev/null +++ b/examples/play-create.yml @@ -0,0 +1,34 @@ +- hosts: localhost + tasks: + - name: Create VM + kubernetes.kubevirt.kubevirt_vm: + state: present + name: testvm + namespace: default + labels: + app: test + instancetype: u1.medium + preference: fedora + interfaces: + - name: default + masquerade: {} + - name: bridge-network + bridge: {} + networks: + - name: default + pod: {} + - name: bridge-network + multus: + networkName: kindexgw + volumes: + - containerDisk: + image: quay.io/containerdisks/fedora:latest + name: containerdisk + - cloudInitNoCloud: + userData: |- + #cloud-config + # The default username is: fedora + ssh_authorized_keys: + - ssh-ed25519 AAAA... + name: cloudinit + wait: yes diff --git a/examples/play-delete.yml b/examples/play-delete.yml new file mode 100644 index 0000000..a14e5bc --- /dev/null +++ b/examples/play-delete.yml @@ -0,0 +1,8 @@ +- hosts: localhost + tasks: + - name: Delete VM + kubernetes.kubevirt.kubevirt_vm: + name: testvm + namespace: default + state: absent + wait: yes diff --git a/examples/services.kubevirt.yml b/examples/services.kubevirt.yml new file mode 100644 index 0000000..e766204 --- /dev/null +++ b/examples/services.kubevirt.yml @@ -0,0 +1,5 @@ +plugin: kubernetes.kubevirt.kubevirt +connections: + - namespaces: + - default + use_service: yes \ No newline at end of file diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000..a3c8671 --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,40 @@ +# See https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html + +namespace: kubernetes +name: kubevirt +version: "0.1.0" +readme: README.md +authors: + - KubeVirt Project (kubevirt.io) +dependencies: + kubernetes.core: '>=2.0.0' +description: Lean Ansible bindings for KubeVirt +license_file: LICENSE +tags: +# tags so people can search for collections https://galaxy.ansible.com/search +# tags are all lower-case, no spaces, no dashes. + - api + - k8s + - kubernetes + - kubevirt + - virtualization + - cloud + - infrastructure +repository: https://github.com/kubevirt/kubernetes.kubevirt +documentation: https://github.com/kubevirt/kubernetes.kubevirt/tree/main/docs +homepage: https://kubevirt.io +issues: https://github.com/kubevirt/kubernetes.kubevirt/issues +build_ignore: + - .gitignore + - changelogs/.plugin-cache.yaml + - .github + - .ansible-lint + - .yamllint + - '*.tar.gz' + - '*.zip' + - molecule + - changelogs + - docs/_gh_include + - docs/conf.py + - docs/roles.rst.template + - docs/requirements.yml diff --git a/hack/e2e-setup.sh b/hack/e2e-setup.sh new file mode 100755 index 0000000..fb9b4c0 --- /dev/null +++ b/hack/e2e-setup.sh @@ -0,0 +1,369 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2023 Red Hat, Inc. +# + +# This script is based on: +# - https://github.com/ovn-org/ovn-kubernetes/blob/master/contrib/kind.sh +# - https://github.com/kiagnose/kiagnose/blob/main/automation/e2e.sh +# - https://github.com/kiagnose/kiagnose/blob/main/checkups/kubevirt-vm-latency/automation/e2e.sh + +ARGCOUNT=$# +# Returns the full directory name of the script +DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +ARCH="" +case $(uname -m) in +x86_64) ARCH="amd64" ;; +aarch64) ARCH="arm64" ;; +esac + +set_default_params() { + BIN_DIR=${BIN_DIR:-$DIR/../bin} + + KIND=${KIND:-$BIN_DIR/kind} + KIND_VERSION=${KIND_VERSION:-v0.20.0} + + KUBECTL=${KUBECTL:-$BIN_DIR/kubectl} + KUBECTL_VERSION=${KUBECTL_VERSION:-v1.27.3} + + KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v1.0.0} + KUBEVIRT_COMMON_INSTANCETYPES_VERSION=${KUBEVIRT_COMMON_INSTANCETYPES_VERSION:-v0.3.0} + KUBEVIRT_USE_EMULATION=${KUBEVIRT_USE_EMULATION:-"false"} + + CNAO_VERSION=${CNAO_VERSION:-v0.87.0} + + CLUSTER_NAME=${CLUSTER_NAME:-kind} + SECONDARY_NETWORK_NAME=${NETWORK_NAME:-kindexgw} + SECONDARY_NETWORK_SUBNET=${SECONDARY_NETWORK_SUBNET:-172.19.0.0/16} + SECONDARY_NETWORK_RANGE_START=${SECONDARY_NETWORK_RANGE_START:-172.19.1.1} + SECONDARY_NETWORK_RANGE_END=${SECONDARY_NETWORK_RANGE_END:-172.19.255.254} + SECONDARY_NETWORK_GATEWAY=${SECONDARY_NETWORK_GATEWAY:-172.19.0.1} + + NAMESPACE=${NAMESPACE:-default} +} + +# Taken from: +# https://github.com/kubevirt/kubevirtci/blob/f661bfe0e3678e5409c057855951c50a912571a0/cluster-up/cluster/ephemeral-provider-common.sh#L26C1-L45C1 +detect_cri() { + PODMAN_SOCKET=${PODMAN_SOCKET:-"/run/podman/podman.sock"} + + if [ "${CRI}" = "podman" ]; then + _cri_socket=$(detect_podman_socket) + _cri_bin="podman --remote --url=unix://$_cri_socket" + elif [ "${CRI}" = "docker" ]; then + _cri_bin=docker + _cri_socket="/var/run/docker.sock" + else + _cri_socket=$(detect_podman_socket) + if [ -n "$_cri_socket" ]; then + _cri_bin="podman --remote --url=unix://$_cri_socket" + echo >&2 "selecting podman as container runtime" + elif docker ps >/dev/null 2>&1; then + _cri_bin=docker + _cri_socket="/var/run/docker.sock" + echo >&2 "selecting docker as container runtime" + else + echo >&2 "no working container runtime found. Neither docker nor podman seems to work." + exit 1 + fi + fi +} + +# Taken from: +# https://github.com/kubevirt/kubevirtci/blob/f661bfe0e3678e5409c057855951c50a912571a0/cluster-up/cluster/ephemeral-provider-common.sh#L20 +detect_podman_socket() { + if curl --unix-socket "${PODMAN_SOCKET}" http://d/v3.0.0/libpod/info >/dev/null 2>&1; then + echo "${PODMAN_SOCKET}" + fi +} + +install_kind() { + if [ ! -f "${KIND}" ]; then + echo "Installing kind" + mkdir -p "${BIN_DIR}" + curl -Lo "${KIND}" "https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-${ARCH}" + chmod +x "${KIND}" + + echo "Successfully installed kind at ${KIND}:" + ${KIND} version + fi +} + +install_kubectl() { + if [ ! -f "${KUBECTL}" ]; then + echo "Installing kubectl" + mkdir -p "${BIN_DIR}" + curl -Lo "${KUBECTL}" "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${ARCH}/kubectl" + chmod +x "${KUBECTL}" + + echo "Successfully installed kubectl at ${KUBECTL}:" + ${KUBECTL} version --client + fi +} + +configure_inotify_limits() { + echo "Configuring inotify limits" + sudo sysctl fs.inotify.max_user_instances=512 + sudo sysctl fs.inotify.max_user_watches=1048576 +} + +create_cluster() { + echo "Creating cluster with kind" + DOCKER_HOST=unix://${_cri_socket} ${KIND} create cluster --wait 2m --name "${CLUSTER_NAME}" + + echo "Waiting for the network to be ready" + ${KUBECTL} wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns --timeout=2m + + echo "K8S cluster is up:" + ${KUBECTL} get nodes -o wide +} + +configure_secondary_network() { + echo "Configuring secondary network" + # Name of the single kind node + local node=${CLUSTER_NAME}-control-plane + # Interface added when connecting the secondary network + local secondary_interface=eth1 + + ${_cri_bin} network create "${SECONDARY_NETWORK_NAME}" --driver=bridge --subnet="${SECONDARY_NETWORK_SUBNET}" + ${_cri_bin} network connect "${SECONDARY_NETWORK_NAME}" "${node}" + + # Get the ip address assigned to the interface of the secondary network on the node + local ip + ip=$( + ${_cri_bin} exec "${node}" ip ad show dev "${secondary_interface}" scope global | + sed -n 's/^ inet \([[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\.[[:digit:]]\{1,3\}\/[[:digit:]]\{1,2\}\).*$/\1/p' + ) + + # Configure a bridge inside the node that workloads can attach to + ${_cri_bin} exec "${node}" ip link add "${SECONDARY_NETWORK_NAME}" type bridge + ${_cri_bin} exec "${node}" ip link set "${secondary_interface}" master "${SECONDARY_NETWORK_NAME}" + ${_cri_bin} exec "${node}" ip link set up "${SECONDARY_NETWORK_NAME}" + # Move the ip address from the secondary interface to the newly created bridge + ${_cri_bin} exec "${node}" ip address del "${ip}" dev "${secondary_interface}" + ${_cri_bin} exec "${node}" ip address add "${ip}" dev "${SECONDARY_NETWORK_NAME}" +} + +deploy_kubevirt() { + echo "Deploying KubeVirt" + ${KUBECTL} apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml" + ${KUBECTL} apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml" + + if ! is_nested_virt_enabled; then + echo "Configuring Kubevirt to use emulation" + ${KUBECTL} patch kubevirt kubevirt --namespace kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}' + fi + + echo "Waiting for KubeVirt to be ready" + ${KUBECTL} wait --for=condition=Available kubevirt kubevirt --namespace=kubevirt --timeout=5m + + echo "Successfully deployed KubeVirt:" + ${KUBECTL} get pods -n kubevirt +} + +# Taken from: +# https://github.com/ovn-org/ovn-kubernetes/blob/59e0b62f4048be3df5b364b894b495f52f729cf1/contrib/kind.sh#L1241 +is_nested_virt_enabled() { + local kvm_nested="unknown" + if [ -f "/sys/module/kvm_intel/parameters/nested" ]; then + kvm_nested=$(cat /sys/module/kvm_intel/parameters/nested) + elif [ -f "/sys/module/kvm_amd/parameters/nested" ]; then + kvm_nested=$(cat /sys/module/kvm_amd/parameters/nested) + fi + [ "$kvm_nested" == "1" ] || [ "$kvm_nested" == "Y" ] || [ "$kvm_nested" == "y" ] +} + +deploy_kubevirt_common_instancetypes() { + echo "Deploying KubeVirt common-instancetypes" + ${KUBECTL} apply -f "https://github.com/kubevirt/common-instancetypes/releases/download/${KUBEVIRT_COMMON_INSTANCETYPES_VERSION}/common-instancetypes-all-bundle-${KUBEVIRT_COMMON_INSTANCETYPES_VERSION}.yaml" +} + +deploy_cnao() { + echo "Deploying CNAO (with multus and bridge CNIs)" + ${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/namespace.yaml" + ${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/network-addons-config.crd.yaml" + ${KUBECTL} apply -f "https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/operator.yaml" + + cat <= 2.11.0' # Use '>= 2.9.10' instead, if needed diff --git a/plugins/inventory/kubevirt.py b/plugins/inventory/kubevirt.py new file mode 100644 index 0000000..58308b7 --- /dev/null +++ b/plugins/inventory/kubevirt.py @@ -0,0 +1,687 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Red Hat, Inc. +# Based on the kubernetes.core.k8s inventory +# Apache License 2.0 (see LICENSE) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +name: kubevirt + +short_description: KubeVirt inventory source + +author: +- "KubeVirt Project (kubevirt.io)" + +description: +- Fetch running VirtualMachineInstances for one or more namespaces with an optional label selector. +- Groups by namespace, namespace_vmis and labels. +- Uses the kubectl connection plugin to access the Kubernetes cluster. +- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values. + +extends_documentation_fragment: +- inventory_cache +- constructed + +options: + plugin: + description: Token that ensures this is a source file for the "kubevirt" plugin. + required: True + choices: ["kubevirt", "kubernetes.kubevirt.kubevirt"] + host_format: + description: + - 'Specify the format of the host in the inventory group. Available specifiers: name, namespace, uid.' + default: "{namespace}-{name}" + connections: + description: + - Optional list of cluster connection settings. If no connections are provided, the default + I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces + the active user is authorized to access. + suboptions: + name: + description: + - Optional name to assign to the cluster. If not provided, a name is constructed from the server + and port. + kubeconfig: + description: + - Path to an existing Kubernetes config file. If not provided, and no other connection + options are provided, the Kubernetes client will attempt to load the default + configuration file from I(~/.kube/config). Can also be specified via K8S_AUTH_KUBECONFIG + environment variable. + context: + description: + - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment + variable. + host: + description: + - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable. + api_key: + description: + - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment + variable. + username: + description: + - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME + environment variable. + password: + description: + - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD + environment variable. + client_cert: + description: + - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE + environment variable. + aliases: [ cert_file ] + client_key: + description: + - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE + environment variable. + aliases: [ key_file ] + ca_cert: + description: + - Path to a CA certificate used to authenticate with the API. Can also be specified via + K8S_AUTH_SSL_CA_CERT environment variable. + aliases: [ ssl_ca_cert ] + validate_certs: + description: + - Whether or not to verify the API server's SSL certificates. Can also be specified via + K8S_AUTH_VERIFY_SSL environment variable. + type: bool + aliases: [ verify_ssl ] + namespaces: + description: + - List of namespaces. If not specified, will fetch all VirtualMachineInstances for all namespaces + the user is authorized to access. + label_selector: + description: + - Define a label selector to select a subset of the fetched VirtualMachineInstances. + network_name: + description: + - In case multiple networks are attached to a VirtualMachineInstance, define which interface should + be returned as primary IP address. + aliases: [ interface_name ] + kube_secondary_dns: + description: + - Enable kubesecondarydns derived host names when using a secondary network interface. + type: bool + default: False + use_service: + description: + - Enable the use of services to establish an SSH connection to the VirtualMachine. + type: bool + default: True + api_version: + description: + - Specify the used KubeVirt API version. + default: "kubevirt.io/v1" + +requirements: +- "python >= 3.6" +- "kubernetes >= 12.0.0" +- "PyYAML >= 3.11" +""" + +EXAMPLES = """ +# Filename must end with kubevirt.[yml|yaml] + +# Authenticate with token, and return all VirtualMachineInstances for all accessible namespaces +plugin: kubernetes.kubevirt.kubevirt +connections: +- host: https://192.168.64.4:8443 + api_key: xxxxxxxxxxxxxxxx + validate_certs: false + +# Use default config (~/.kube/config) file and active context, and return VirtualMachineInstances +# from namespace testing with interfaces connected to network bridge-network +plugin: kubernetes.kubevirt.kubevirt +connections: +- namespaces: + - testing + network_name: bridge-network + +# Use default config (~/.kube/config) file and active context, and return VirtualMachineInstances +# from namespace testing with label app=test +plugin: kubernetes.kubevirt.kubevirt +connections: +- namespaces: + - testing + label_selector: app=test + +# Use a custom config file, and a specific context. +plugin: kubernetes.kubevirt.kubevirt +connections: +- kubeconfig: /path/to/config + context: 'awx/192-168-64-4:8443/developer' +""" + +from dataclasses import dataclass +from json import loads +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, + Union, +) +import traceback + +try: + from kubernetes.dynamic.resource import ResourceField + from kubernetes.dynamic.exceptions import DynamicApiError +except ImportError: + HAS_K8S = False + K8S_IMPORT_ERROR = traceback.format_exc() +else: + HAS_K8S = True + K8S_IMPORT_ERROR = None + +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable + +from ansible_collections.kubernetes.core.plugins.module_utils.common import ( + HAS_K8S_MODULE_HELPER, + k8s_import_exception, +) + +from ansible_collections.kubernetes.core.plugins.module_utils.k8s.client import ( + get_api_client, + K8SClient, +) + +LABEL_KUBEVIRT_IO_DOMAIN = "kubevirt.io/domain" +TYPE_LOADBALANCER = "LoadBalancer" +TYPE_NODEPORT = "NodePort" + + +class KubeVirtInventoryException(Exception): + pass + + +@dataclass +class GetVmiOptions: + """ + This class holds the options defined by the user. + """ + + api_version: Optional[str] = None + label_selector: Optional[str] = None + network_name: Optional[str] = None + kube_secondary_dns: Optional[bool] = None + use_service: Optional[bool] = None + base_domain: Optional[str] = None + host_format: Optional[str] = None + + def __post_init__(self): + # Set defaults in __post_init__ to allow instatiating class with None values + if self.api_version is None: + self.api_version = "kubevirt.io/v1" + if self.kube_secondary_dns is None: + self.kube_secondary_dns = False + if self.use_service is None: + self.use_service = True + if self.host_format is None: + self.host_format = "{namespace}-{name}" + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + """ + This class implements the actual inventory module. + """ + + NAME = "kubernetes.kubevirt.kubevirt" + + connection_plugin = "kubernetes.core.kubectl" + transport = "kubectl" + + @staticmethod + def get_default_host_name(host: str) -> str: + """ + get_default_host_name strips URL schemes from the host name and + replaces invalid characters. + """ + return ( + host.replace("https://", "") + .replace("http://", "") + .replace(".", "-") + .replace(":", "_") + ) + + @staticmethod + def format_dynamic_api_exc(exc: DynamicApiError) -> str: + """ + format_dynamic_api_exc tries to extract the message from the JSON body + of a DynamicApiError. + """ + if exc.body: + if exc.headers and exc.headers.get("Content-Type") == "application/json": + message = loads(exc.body).get("message") + if message: + return message + return exc.body + + return f"{exc.status} Reason: {exc.reason}" + + @staticmethod + def get_host_from_service(service: Dict, node_name: str) -> Optional[str]: + """ + get_host_from_service extracts the hostname to be used from the + passed in service. + """ + # LoadBalancer services can return a hostname or an IP address + if service["spec"]["type"] == TYPE_LOADBALANCER: + ingress = service["status"]["loadBalancer"].get("ingress") + if ingress is not None and len(ingress) > 0: + hostname = ingress[0].get("hostname") + ip_address = ingress[0].get("ip") + return hostname if hostname is not None else ip_address + + # NodePort services use the node name as host + if service["spec"]["type"] == TYPE_NODEPORT: + return node_name + + return None + + @staticmethod + def get_port_from_service(service: Dict) -> Optional[str]: + """ + get_port_from_service extracts the port to be used from the + passed in service. + """ + # LoadBalancer services use the port attribute + if service["spec"]["type"] == TYPE_LOADBALANCER: + return service["spec"]["ports"][0]["port"] + + # LoadBalancer services use the nodePort attribute + if service["spec"]["type"] == TYPE_NODEPORT: + return service["spec"]["ports"][0]["nodePort"] + + return None + + def __init__(self) -> None: + super().__init__() + self.host_format = None + + def verify_file(self, path: str) -> None: + """ + verify_file ensures the inventory file is compatible with this plugin. + """ + return super().verify_file(path) and path.endswith( + ("kubevirt.yml", "kubevirt.yaml") + ) + + def parse(self, inventory: Any, loader: Any, path: str, cache: bool = True) -> None: + """ + parse runs basic setup of the inventory. + """ + super().parse(inventory, loader, path) + cache_key = self._get_cache_prefix(path) + config_data = self._read_config_data(path) + self.host_format = config_data.get("host_format") + self.setup(config_data, cache, cache_key) + + def setup(self, config_data: Dict, cache: bool, cache_key: str) -> None: + """ + setup checks for availability of the Kubernetes Python client, + gets the configured connections and runs fetch_objects on them. + If there is a cache it is returned instead. + """ + connections = config_data.get("connections") + + if not HAS_K8S_MODULE_HELPER: + raise KubeVirtInventoryException( + "This module requires the Kubernetes Python client. " + + f"Try `pip install kubernetes`. Detail: {k8s_import_exception}" + ) + + source_data = None + if cache and cache_key in self._cache: + try: + source_data = self._cache[cache_key] + except KeyError: + pass + + if not source_data: + self.fetch_objects(connections) + + def fetch_objects(self, connections: Dict) -> None: + """ + fetch_objects populates the inventory with every configured connection. + """ + if connections: + if not isinstance(connections, list): + raise KubeVirtInventoryException("Expecting connections to be a list.") + + for connection in connections: + if not isinstance(connection, dict): + raise KubeVirtInventoryException( + "Expecting connection to be a dictionary." + ) + client = get_api_client(**connection) + name = connection.get( + "name", self.get_default_host_name(client.configuration.host) + ) + if connection.get("namespaces"): + namespaces = connection["namespaces"] + else: + namespaces = self.get_available_namespaces(client) + + opts = GetVmiOptions( + connection.get("api_version"), + connection.get("label_selector"), + connection.get("network_name", connection.get("interface_name")), + connection.get("kube_secondary_dns"), + connection.get("use_service"), + connection.get("base_domain", self.get_cluster_domain(client)), + self.host_format, + ) + for namespace in namespaces: + self.get_vmis_for_namespace(client, name, namespace, opts) + else: + client = get_api_client() + name = self.get_default_host_name(client.configuration.host) + namespaces = self.get_available_namespaces(client) + opts = GetVmiOptions(host_format=self.host_format) + for namespace in namespaces: + self.get_vmis_for_namespace(client, name, namespace, opts) + + def get_cluster_domain(self, client: K8SClient) -> Optional[str]: + """ + get_cluster_domain tries to get the base domain of an OpenShift cluster. + """ + try: + v1_dns = client.resources.get( + api_version="config.openshift.io/v1", kind="DNS" + ) + except Exception: + # If resource not found return None + return None + try: + obj = v1_dns.get(name="cluster") + except DynamicApiError as exc: + self.display.debug( + f"Failed to fetch cluster DNS config: {self.format_dynamic_api_exc(exc)}" + ) + return None + return obj.get("spec", {}).get("baseDomain") + + def get_available_namespaces(self, client: K8SClient) -> List: + """ + get_available_namespaces lists all namespaces accessible with the + configured credentials and returns them. + """ + v1_namespace = client.resources.get(api_version="v1", kind="Namespace") + try: + obj = v1_namespace.get() + except DynamicApiError as exc: + self.display.debug(exc) + raise KubeVirtInventoryException( + f"Error fetching Namespace list: {self.format_dynamic_api_exc(exc)}" + ) from exc + return [namespace.metadata.name for namespace in obj.items] + + def get_vmis_for_namespace( + self, client: K8SClient, name: str, namespace: str, opts: GetVmiOptions + ) -> None: + """ + get_vmis_for_namespace lists all VirtualMachineInstances in a namespace + and adds groups and hosts to the inventory. + """ + vmi_client = client.resources.get( + api_version=opts.api_version, kind="VirtualMachineInstance" + ) + try: + vmi_list = vmi_client.get( + namespace=namespace, label_selector=opts.label_selector + ) + except DynamicApiError as exc: + self.display.debug(exc) + raise KubeVirtInventoryException( + f"Error fetching VirtualMachineInstance list: {self.format_dynamic_api_exc(exc)}" + ) from exc + + services = self.get_ssh_services_for_namespace(client, namespace) + + namespace_group = f"namespace_{namespace}" + namespace_vmis_group = f"{namespace_group}_vmis" + + name = self._sanitize_group_name(name) + namespace_group = self._sanitize_group_name(namespace_group) + namespace_vmis_group = self._sanitize_group_name(namespace_vmis_group) + + self.inventory.add_group(name) + self.inventory.add_group(namespace_group) + self.inventory.add_child(name, namespace_group) + self.inventory.add_group(namespace_vmis_group) + self.inventory.add_child(namespace_group, namespace_vmis_group) + + for vmi in vmi_list.items: + if not (vmi.status and vmi.status.interfaces): + continue + + # Find interface by its name: + if opts.network_name is None: + interface = vmi.status.interfaces[0] + else: + interface = next( + (i for i in vmi.status.interfaces if i.name == opts.network_name), + None, + ) + + # If interface is not found or IP address is not reported skip this VM: + if interface is None or interface.ipAddress is None: + continue + + vmi_name = opts.host_format.format( + namespace=vmi.metadata.namespace, + name=vmi.metadata.name, + uid=vmi.metadata.uid, + ) + vmi_groups = [] + vmi_annotations = ( + {} + if not vmi.metadata.annotations + else self.__resource_field_to_dict(vmi.metadata.annotations) + ) + + if vmi.metadata.labels: + # create a group for each label_value + for key, value in vmi.metadata.labels: + group_name = f"label_{key}_{value}" + group_name = self._sanitize_group_name(group_name) + if group_name not in vmi_groups: + vmi_groups.append(group_name) + self.inventory.add_group(group_name) + vmi_labels = self.__resource_field_to_dict(vmi.metadata.labels) + else: + vmi_labels = {} + + # Add vmi to the namespace group, and to each label_value group + self.inventory.add_host(vmi_name) + self.inventory.add_child(namespace_vmis_group, vmi_name) + for group in vmi_groups: + self.inventory.add_child(group, vmi_name) + + # Set up the connection + self.inventory.set_variable(vmi_name, "ansible_connection", "ssh") + self.set_ansible_host_and_port( + vmi, + vmi_name, + interface.ipAddress, + services.get(vmi.metadata.labels.get(LABEL_KUBEVIRT_IO_DOMAIN)), + opts, + ) + + # Add hostvars from metadata + self.inventory.set_variable(vmi_name, "object_type", "vmi") + self.inventory.set_variable(vmi_name, "labels", vmi_labels) + self.inventory.set_variable(vmi_name, "annotations", vmi_annotations) + self.inventory.set_variable( + vmi_name, "cluster_name", vmi.metadata.clusterName + ) + self.inventory.set_variable( + vmi_name, "resource_version", vmi.metadata.resourceVersion + ) + self.inventory.set_variable(vmi_name, "uid", vmi.metadata.uid) + + # Add hostvars from status + vmi_active_pods = ( + {} + if not vmi.status.activePods + else self.__resource_field_to_dict(vmi.status.activePods) + ) + self.inventory.set_variable(vmi_name, "vmi_active_pods", vmi_active_pods) + vmi_conditions = ( + [] + if not vmi.status.conditions + else [self.__resource_field_to_dict(c) for c in vmi.status.conditions] + ) + self.inventory.set_variable(vmi_name, "vmi_conditions", vmi_conditions) + vmi_guest_os_info = ( + {} + if not vmi.status.guestOSInfo + else self.__resource_field_to_dict(vmi.status.guestOSInfo) + ) + self.inventory.set_variable( + vmi_name, "vmi_guest_os_info", vmi_guest_os_info + ) + vmi_interfaces = ( + [] + if not vmi.status.interfaces + else [self.__resource_field_to_dict(i) for i in vmi.status.interfaces] + ) + self.inventory.set_variable(vmi_name, "vmi_interfaces", vmi_interfaces) + self.inventory.set_variable( + vmi_name, + "vmi_launcher_container_image_version", + vmi.status.launcherContainerImageVersion, + ) + self.inventory.set_variable( + vmi_name, "vmi_migration_method", vmi.status.migrationMethod + ) + self.inventory.set_variable( + vmi_name, "vmi_migration_transport", vmi.status.migrationTransport + ) + self.inventory.set_variable(vmi_name, "vmi_node_name", vmi.status.nodeName) + self.inventory.set_variable(vmi_name, "vmi_phase", vmi.status.phase) + vmi_phase_transition_timestamps = ( + [] + if not vmi.status.phaseTransitionTimestamps + else [ + self.__resource_field_to_dict(p) + for p in vmi.status.phaseTransitionTimestamps + ] + ) + self.inventory.set_variable( + vmi_name, + "vmi_phase_transition_timestamps", + vmi_phase_transition_timestamps, + ) + self.inventory.set_variable(vmi_name, "vmi_qos_class", vmi.status.qosClass) + self.inventory.set_variable( + vmi_name, + "vmi_virtual_machine_revision_name", + vmi.status.virtualMachineRevisionName, + ) + vmi_volume_status = ( + [] + if not vmi.status.volumeStatus + else [self.__resource_field_to_dict(v) for v in vmi.status.volumeStatus] + ) + self.inventory.set_variable( + vmi_name, "vmi_volume_status", vmi_volume_status + ) + + def get_ssh_services_for_namespace(self, client: K8SClient, namespace: str) -> Dict: + """ + get_ssh_services_for_namespace retrieves all services of a namespace exposing port 22/ssh. + The services are mapped to the name of the corresponding domain. + """ + v1_service = client.resources.get(api_version="v1", kind="Service") + try: + service_list = v1_service.get( + namespace=namespace, + ) + except DynamicApiError as exc: + self.display.debug(exc) + raise KubeVirtInventoryException( + f"Error fetching Service list: {self.format_dynamic_api_exc(exc)}" + ) from exc + + services = {} + for service in service_list.items: + # Continue if service is not of type LoadBalancer or NodePort + if service.get("spec", {}).get("type") not in ( + TYPE_LOADBALANCER, + TYPE_NODEPORT, + ): + continue + + # Continue if ports are not defined, there are more than one port mapping + # or the target port is not port 22/ssh + ports = service["spec"].get("ports") + if ports is None or len(ports) != 1 or ports[0].get("targetPort") != 22: + continue + + # Only add the service to the dict if the domain selector is present + domain = service["spec"].get("selector", {}).get(LABEL_KUBEVIRT_IO_DOMAIN) + if domain is not None: + services[domain] = service + + return services + + def set_ansible_host_and_port( + self, + vmi: Dict, + vmi_name: str, + ip_address: str, + service: Optional[Dict], + opts: GetVmiOptions, + ) -> None: + """ + set_ansible_host_and_port sets the ansible_host and possibly the ansible_port var. + Secondary interfaces have priority over a service exposing SSH + """ + ansible_host = None + if opts.kube_secondary_dns and opts.network_name is not None: + # Set ansible_host to the kubesecondarydns derived host name if enabled + # See https://github.com/kubevirt/kubesecondarydns#parameters + ansible_host = ( + f"{opts.network_name}.{vmi.metadata.name}.{vmi.metadata.namespace}.vm" + ) + if opts.base_domain is not None: + ansible_host += f".{opts.base_domain}" + elif opts.use_service and service is not None: + # Set ansible_host and ansible_port to the host and port from the LoadBalancer + # or NodePort service exposing SSH + host = self.get_host_from_service(service, vmi.status.nodeName) + port = self.get_port_from_service(service) + if host is not None and port is not None: + ansible_host = host + self.inventory.set_variable(vmi_name, "ansible_port", port) + + # Default to the IP address of the interface if ansible_host was not set prior + if ansible_host is None: + ansible_host = ip_address + + self.inventory.set_variable(vmi_name, "ansible_host", ansible_host) + + def __resource_field_to_dict( + self, field: Union[Dict, List, ResourceField, Tuple] + ) -> Dict: + """ + Replace this with ResourceField.to_dict() once available in a stable release of + the Kubernetes Python client + See + https://github.com/kubernetes-client/python/blob/main/kubernetes/base/dynamic/resource.py#L393 + """ + if isinstance(field, ResourceField): + return { + k: self.__resource_field_to_dict(v) for k, v in field.__dict__.items() + } + + if isinstance(field, (list, tuple)): + return [self.__resource_field_to_dict(item) for item in field] + + return field diff --git a/plugins/modules/kubevirt_vm.py b/plugins/modules/kubevirt_vm.py new file mode 100644 index 0000000..8630c55 --- /dev/null +++ b/plugins/modules/kubevirt_vm.py @@ -0,0 +1,427 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2023 Red Hat, Inc. +# Based on the kubernetes.core.k8s module +# Apache License 2.0 (see LICENSE) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: kubevirt_vm + +short_description: Create or delete KubeVirt VirtualMachines on Kubernetes + +author: +- "KubeVirt Project (kubevirt.io)" + +description: +- Use the Kubernetes Python client to perform create or delete operations on KubeVirt VirtualMachines. +- Pass options to create the VirtualMachine as module arguments. +- Authenticate using either a config file, certificates, password or token. +- Supports check mode. + +extends_documentation_fragment: +- kubernetes.core.k8s_auth_options +- kubernetes.core.k8s_state_options +- kubernetes.core.k8s_delete_options + +options: + api_version: + description: + - Use this to set the API version of KubeVirt. + type: str + default: kubevirt.io/v1 + name: + description: + - Specify the name of the VirtualMachine. + - This option is ignored when I(state) is not set to C(present). + - mutually exclusive with C(generate_name). + type: str + generate_name: + description: + - Specify the basis of the VirtualMachine name and random characters will be added automatically on server to + generate a unique name. + - Only used when I(state=present). + - mutually exclusive with C(name). + type: str + namespace: + description: + - Specify the namespace of the VirtualMachine. + type: str + required: yes + annotations: + description: + - Specify annotations to set on the VirtualMachine. + - Only used when I(state=present). + type: dict + labels: + description: + - Specify labels to set on the VirtualMachine. + type: dict + running: + description: + - Specify whether the VirtualMachine should be running. + type: bool + default: yes + termination_grace_period: + description: + - Specify the termination grace period of the VirtualMachine to provide + time for shutting down the guest. + type: int + default: 180 + instancetype: + description: + - Specify the instancetype of the VirtualMachine. + - Only used when I(state=present). + type: str + preference: + description: + - Specify the preference of the VirtualMachine. + - Only used when I(state=present). + type: str + infer_from_volume: + description: + - Specify volumes to infer an instancetype or a preference from. + - Only used when I(state=present). + type: dict + suboptions: + instancetype: + description: + - Name of the volume to infer the instancetype from. + type: str + preference: + description: + - Name of the volume to infer the preference from. + type: str + clear_revision_name: + description: + - Specify to clear the revision name of the instancetype or preference. + - Only used when I(state=present). + type: dict + suboptions: + instancetype: + description: + - Clear the revision name of the instancetype. + type: bool + default: no + preference: + description: + - Clear the revision name of the preference. + type: bool + default: no + interfaces: + description: + - Specify the interfaces of the VirtualMachine. + - 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_interface' + type: list + elements: 'dict' + networks: + description: + - Specify the networks of the VirtualMachine. + - 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_network' + type: list + elements: 'dict' + volumes: + description: + - Specify the volumes of the VirtualMachine. + - 'See: https://kubevirt.io/api-reference/main/definitions.html#_v1_volume' + type: list + elements: 'dict' + wait: + description: + - Whether to wait for the VirtualMachine to end up in the ready state. + type: bool + default: no + wait_sleep: + description: + - Number of seconds to sleep between checks. + - Ignored if C(wait) is not set. + default: 5 + type: int + wait_timeout: + description: + - How long in seconds to wait for the resource to end up in the desired state. + - Ignored if C(wait) is not set. + default: 120 + type: int + +requirements: +- "python >= 3.6" +- "kubernetes >= 12.0.0" +- "PyYAML >= 3.11" +- "jsonpatch" +- "jinja2" +""" + +EXAMPLES = """ +- name: Create a VirtualMachine + kubernetes.kubevirt.kubevirt_vm: + state: present + name: testvm + namespace: default + labels: + app: test + instancetype: u1.medium + preference: fedora + interfaces: + - name: default + masquerade: {} + - name: bridge-network + bridge: {} + networks: + - name: default + pod: {} + - name: bridge-network + multus: + networkName: kindexgw + volumes: + - containerDisk: + image: quay.io/containerdisks/fedora:latest + name: containerdisk + - cloudInitNoCloud: + userData: |- + #cloud-config + # The default username is: fedora + ssh_authorized_keys: + - ssh-ed25519 AAAA... + name: cloudinit + +- name: Delete a VirtualMachine + kubernetes.kubevirt.kubevirt_vm: + name: testvm + namespace: default + state: absent +""" + +RETURN = """ +result: + description: + - The created object. Will be empty in the case of a deletion. + type: complex + returned: success + contains: + changed: + description: Whether the VirtualMachine was changed + type: bool + sample: True + duration: + description: elapsed time of task in seconds + returned: when C(wait) is true + type: int + sample: 48 + method: + description: Method executed on the Kubernetes API. + returned: success + type: str +""" + +from copy import deepcopy +from typing import Dict +import traceback + +from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import ( + AnsibleModule, +) +from ansible_collections.kubernetes.core.plugins.module_utils.args_common import ( + AUTH_ARG_SPEC, + COMMON_ARG_SPEC, + DELETE_OPTS_ARG_SPEC, +) +from ansible_collections.kubernetes.core.plugins.module_utils.k8s import ( + runner, +) +from ansible_collections.kubernetes.core.plugins.module_utils.k8s.core import ( + AnsibleK8SModule, +) +from ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions import ( + CoreException, +) + +try: + import yaml +except ImportError: + HAS_YAML = False + YAML_IMPORT_ERROR = traceback.format_exc() +else: + HAS_YAML = True + YAML_IMPORT_ERROR = None + +try: + from jinja2 import Environment +except ImportError: + HAS_JINJA = False + JINJA_IMPORT_ERROR = traceback.format_exc() +else: + HAS_JINJA = True + JINJA_IMPORT_ERROR = None + + +VM_TEMPLATE = """ +apiVersion: {{ api_version }} +kind: VirtualMachine +metadata: + {% if name %} + name: "{{ name }}" + {% endif %} + {% if generate_name %} + generateName: "{{ generate_name }}" + {% endif %} + namespace: "{{ namespace }}" + {% if annotations %} + annotations: + {{ annotations | to_yaml | indent(4) }} + {%- endif %} + {% if labels %} + labels: + {{ labels | to_yaml | indent(4) }} + {%- endif %} +spec: + {% if instancetype or infer_from_volume.instancetype %} + instancetype: + {% if instancetype %} + name: "{{ instancetype }}" + {% endif %} + {% if infer_from_volume.instancetype %} + inferFromVolume: "{{ infer_from_volume.instancetype }}" + {% endif %} + {% if clear_revision_name.instancetype %} + revisionName: "" + {% endif %} + {% endif %} + {% if preference or infer_from_volume.preference %} + preference: + {% if preference %} + name: "{{ preference }}" + {% endif %} + {% if infer_from_volume.preference %} + inferFromVolume: "{{ infer_from_volume.preference }}" + {% endif %} + {% if clear_revision_name.preference %} + revisionName: "" + {% endif %} + {% endif %} + running: {{ running }} + template: + {% if annotations or labels %} + metadata: + {% if annotations %} + annotations: + {{ annotations | to_yaml | indent(8) }} + {%- endif %} + {% if labels %} + labels: + {{ labels | to_yaml | indent(8) }} + {%- endif %} + {% endif %} + spec: + domain: + {% if interfaces %} + devices: + interfaces: + {{ interfaces | to_yaml | indent(10) }} + {%- else %} + devices: {} + {% endif %} + {% if networks %} + networks: + {{ networks | to_yaml | indent(6) }} + {%- endif %} + {% if volumes %} + volumes: + {{ volumes | to_yaml | indent(6) }} + {%- endif %} + terminationGracePeriodSeconds: {{ termination_grace_period }} +""" + + +def render_template(params: Dict) -> str: + """ + render_template uses Jinja2 to render the VM_TEMPLATE into a string. + """ + env = Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True) + env.filters["to_yaml"] = lambda data, *_, **kw: yaml.dump( + data, allow_unicode=True, default_flow_style=False, **kw + ) + + template = env.from_string(VM_TEMPLATE.strip()) + return template.render(params) + + +def arg_spec() -> Dict: + """ + arg_spec defines the argument spec of this module. + """ + spec = { + "api_version": {"default": "kubevirt.io/v1"}, + "name": {}, + "generate_name": {}, + "namespace": {"required": True}, + "annotations": {"type": "dict"}, + "labels": {"type": "dict"}, + "running": {"type": "bool", "default": True}, + "termination_grace_period": {"type": "int", "default": 180}, + "instancetype": {}, + "preference": {}, + "infer_from_volume": { + "type": "dict", + "options": {"instancetype": {}, "preference": {}}, + }, + "clear_revision_name": { + "type": "dict", + "options": { + "instancetype": {"type": "bool", "default": False}, + "preference": {"type": "bool", "default": False}, + }, + }, + "interfaces": {"type": "list", "elements": "dict"}, + "networks": {"type": "list", "elements": "dict"}, + "volumes": {"type": "list", "elements": "dict"}, + "wait": {"type": "bool", "default": False}, + "wait_sleep": {"type": "int", "default": 5}, + "wait_timeout": {"type": "int", "default": 120}, + } + spec.update(deepcopy(AUTH_ARG_SPEC)) + spec.update(deepcopy(COMMON_ARG_SPEC)) + spec["delete_options"] = { + "type": "dict", + "default": None, + "options": deepcopy(DELETE_OPTS_ARG_SPEC), + } + + return spec + + +def main() -> None: + """ + main instantiates the AnsibleK8SModule, creates the resource + definition and runs the module. + """ + module = AnsibleK8SModule( + module_class=AnsibleModule, + argument_spec=arg_spec(), + mutually_exclusive=[ + ("name", "generate_name"), + ], + required_one_of=[ + ("name", "generate_name"), + ], + required_together=[("interfaces", "networks")], + supports_check_mode=True, + ) + + # Set resource_definition to our rendered template + module.params["resource_definition"] = render_template(module.params) + + # Set wait_condition to allow waiting for the ready state of the VirtualMachine + module.params["wait_condition"] = {"type": "Ready", "status": True} + + try: + runner.run_module(module) + except CoreException as exc: + module.fail_from_exception(exc) + + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a66dba7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +kubernetes>=12.0.0 +PyYaml +jsonpatch +jinja2 diff --git a/requirements.yml b/requirements.yml new file mode 100644 index 0000000..89ff613 --- /dev/null +++ b/requirements.yml @@ -0,0 +1,3 @@ +collections: + - name: kubernetes.core + version: '>=2.0.0' diff --git a/tests/integration/targets/inventory_kubevirt/runme.sh b/tests/integration/targets/inventory_kubevirt/runme.sh new file mode 100755 index 0000000..c8f629a --- /dev/null +++ b/tests/integration/targets/inventory_kubevirt/runme.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -eux + +source virtualenv.sh +pip install kubernetes PyYAML jsonpatch Jinja2 + +./server.py & + +cleanup() { + kill -9 "$(jobs -p)" +} + +trap cleanup INT TERM EXIT + +# Fake auth file +mkdir -p ~/.kube/ +cat < ~/.kube/config +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: http://localhost:12345 + name: development +contexts: +- context: + cluster: development + user: developer + name: dev-frontend +current-context: dev-frontend +kind: Config +preferences: {} +users: +- name: developer + user: + token: ZDNg7LzSlp8a0u0fht_tRnPMTOjxqgJGCyi_iy0ecUw +EOF + +################################################# +# RUN THE PLUGIN +################################################# + +# run the plugin second +export ANSIBLE_INVENTORY_ENABLED=kubernetes.kubevirt.kubevirt + +cat << EOF > "$OUTPUT_DIR/test.kubevirt.yml" +plugin: kubernetes.kubevirt.kubevirt +connections: + - namespaces: + - default +EOF + +ansible-inventory -vvvv -i "$OUTPUT_DIR/test.kubevirt.yml" --list --output="$OUTPUT_DIR/plugin.out" + +################################################# +# DIFF THE RESULTS +################################################# + +diff "$(pwd)/test.out" "$OUTPUT_DIR/plugin.out" diff --git a/tests/integration/targets/inventory_kubevirt/server.py b/tests/integration/targets/inventory_kubevirt/server.py new file mode 100755 index 0000000..ef1478c --- /dev/null +++ b/tests/integration/targets/inventory_kubevirt/server.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import os +from http import HTTPStatus +from http.server import HTTPServer +from http.server import SimpleHTTPRequestHandler +from threading import Thread +from urllib.parse import urlparse + + +class TestHandler(SimpleHTTPRequestHandler): + # Path handlers: + handlers = {} + + def log_message(self, format, *args): + """ + Empty method, so we don't mix output of HTTP server with tests + """ + + def do_GET(self): + params = urlparse(self.path) + + if params.path in self.handlers: + self.handlers[params.path](self) + else: + SimpleHTTPRequestHandler.do_GET(self) + + def do_POST(self): + params = urlparse(self.path) + + if params.path in self.handlers: + self.handlers[params.path](self) + else: + self.send_error(HTTPStatus.NOT_FOUND) + + +class TestServer: + # The host and port and path used by the embedded tests web server: + PORT = None + + # The embedded web server: + _httpd = None + # Thread for http server: + _thread = None + + def set_json_response(self, path, code, body): + def _handle_request(handler): + handler.send_response(code) + handler.send_header("Content-Type", "application/json") + handler.end_headers() + + data = json.dumps(body, ensure_ascii=False).encode("utf-8") + handler.wfile.write(data) + + TestHandler.handlers[path] = _handle_request + + def start_server(self, host="localhost"): + self._httpd = HTTPServer((host, 12345), TestHandler) + self._thread = Thread(target=self._httpd.serve_forever) + self._thread.start() + + def stop_server(self): + self._httpd.shutdown() + self._thread.join() + + +if __name__ == "__main__": + print(os.getpid()) + server = TestServer() + server.start_server() + server.set_json_response(path="/version", code=200, body={}) + server.set_json_response( + path="/api", + code=200, + body={ + "kind": "APIVersions", + "versions": ["v1"], + "serverAddressByClientCIDRs": [ + {"clientCIDR": "0.0.0.0/0", "serverAddress": "localhost:12345"} + ], + }, + ) + server.set_json_response( + path="/api/v1", + code=200, + body={ + "kind": "APIResourceList", + "groupVersion": "v1", + "resources": [ + { + "name": "services", + "singularName": "service", + "namespaced": True, + "kind": "Service", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + ], + "shortNames": ["svc"], + } + ], + }, + ) + server.set_json_response( + path="/api/v1/namespaces/default/services", + code=200, + body={ + "kind": "ServiceList", + "groupVersion": "v1", + "items": [], + }, + ) + server.set_json_response( + path="/apis", + code=200, + body={ + "kind": "APIGroupList", + "apiVersion": "v1", + "groups": [ + { + "name": "kubevirt.io", + "versions": [{"groupVersion": "kubevirt.io/v1", "version": "v1"}], + "preferredVersion": { + "groupVersion": "kubevirt.io/v1", + "version": "v1", + }, + } + ], + }, + ) + server.set_json_response( + path="/apis/kubevirt.io/v1", + code=200, + body={ + "kind": "APIResourceList", + "apiVersion": "v1", + "groupVersion": "kubevirt.io/v1", + "resources": [ + { + "name": "virtualmachineinstances", + "singularName": "virtualmachineinstance", + "namespaced": True, + "kind": "VirtualMachineInstance", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch", + ], + "shortNames": ["vmi", "vmis"], + } + ], + }, + ) + server.set_json_response( + path="/apis/kubevirt.io/v1/namespaces/default/virtualmachineinstances", + code=200, + body={ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "kubevirt.io/v1", + "kind": "VirtualMachineInstance", + "metadata": { + "annotations": { + "kubevirt.io/latest-observed-api-version": "v1", + "kubevirt.io/storage-observed-api-version": "v1alpha3", + }, + "creationTimestamp": "2022-09-14T13:43:36Z", + "finalizers": [ + "kubevirt.io/virtualMachineControllerFinalize", + "foregroundDeleteVirtualMachine", + ], + "generation": 9, + "labels": { + "kubevirt.io/nodeName": "node01", + "kubevirt.io/vm": "vm-cirros", + }, + "name": "vm-cirros", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "kubevirt.io/v1", + "blockOwnerDeletion": True, + "controller": True, + "kind": "VirtualMachine", + "name": "vm-cirros", + "uid": "4d1b1438-91ba-4c75-a211-566fc50a06f5", + } + ], + "resourceVersion": "5387", + "uid": "7b3a8d94-bd7e-4c14-818a-89228172e4f1", + }, + "spec": { + "domain": { + "cpu": { + "cores": 1, + "model": "host-model", + "sockets": 1, + "threads": 1, + }, + "devices": { + "disks": [ + { + "disk": {"bus": "virtio"}, + "name": "containerdisk", + }, + { + "disk": {"bus": "virtio"}, + "name": "cloudinitdisk", + }, + ], + "interfaces": [{"bridge": {}, "name": "default"}], + }, + "features": {"acpi": {"enabled": True}}, + "firmware": { + "uuid": "0d2a2043-41c0-59c3-9b17-025022203668" + }, + "machine": {"type": "q35"}, + "resources": {"requests": {"memory": "128Mi"}}, + }, + "networks": [{"name": "default", "pod": {}}], + "terminationGracePeriodSeconds": 0, + "volumes": [ + { + "containerDisk": { + "image": "registry:5000/kubevirt/cirros-container-disk-demo:devel", + "imagePullPolicy": "IfNotPresent", + }, + "name": "containerdisk", + }, + { + "cloudInitNoCloud": { + "userData": "#!/bin/sh\n\necho 'printed from cloud-init userdata'\n" + }, + "name": "cloudinitdisk", + }, + ], + }, + "status": { + "activePods": { + "a9a6c31b-8574-46f9-8bec-70ff091c3d97": "node01" + }, + "conditions": [ + { + "lastProbeTime": None, + "lastTransitionTime": "2022-09-14T13:43:39Z", + "status": "True", + "type": "Ready", + }, + { + "lastProbeTime": None, + "lastTransitionTime": None, + "message": "cannot migrate VMI which does not use masquerade to connect to the pod network", + "reason": "InterfaceNotLiveMigratable", + "status": "False", + "type": "LiveMigratable", + }, + ], + "guestOSInfo": {}, + "interfaces": [ + { + "infoSource": "domain", + "ipAddress": "10.244.196.152", + "ipAddresses": ["10.244.196.152", "fd10:244::c497"], + "mac": "96:13:92:4f:05:d3", + "name": "default", + "queueCount": 1, + } + ], + "launcherContainerImageVersion": + "registry:5000/kubevirt/virt-launcher@sha256:5c1474d240488c9a8e6e6e48b2ad446113744353b4cd2464baee3550e6b1829d", + "migrationMethod": "BlockMigration", + "migrationTransport": "Unix", + "nodeName": "node01", + "phase": "Running", + "phaseTransitionTimestamps": [ + { + "phase": "Pending", + "phaseTransitionTimestamp": "2022-09-14T13:43:36Z", + }, + { + "phase": "Scheduling", + "phaseTransitionTimestamp": "2022-09-14T13:43:36Z", + }, + { + "phase": "Scheduled", + "phaseTransitionTimestamp": "2022-09-14T13:43:39Z", + }, + { + "phase": "Running", + "phaseTransitionTimestamp": "2022-09-14T13:43:40Z", + }, + ], + "qosClass": "Burstable", + "runtimeUser": 0, + "virtualMachineRevisionName": "revision-start-vm-4d1b1438-91ba-4c75-a211-566fc50a06f5-9", + "volumeStatus": [ + {"name": "cloudinitdisk", "size": 1048576, "target": "vdb"}, + {"name": "containerdisk", "target": "vda"}, + ], + }, + } + ], + "kind": "List", + "metadata": {"resourceVersion": "", "selfLink": ""}, + }, + ) diff --git a/tests/integration/targets/inventory_kubevirt/test.out b/tests/integration/targets/inventory_kubevirt/test.out new file mode 100644 index 0000000..943a82e --- /dev/null +++ b/tests/integration/targets/inventory_kubevirt/test.out @@ -0,0 +1,124 @@ +{ + "_meta": { + "hostvars": { + "default-vm-cirros": { + "annotations": { + "kubevirt.io/latest-observed-api-version": "v1", + "kubevirt.io/storage-observed-api-version": "v1alpha3" + }, + "ansible_connection": "ssh", + "ansible_host": "10.244.196.152", + "cluster_name": null, + "labels": { + "kubevirt.io/nodeName": "node01", + "kubevirt.io/vm": "vm-cirros" + }, + "object_type": "vmi", + "resource_version": "5387", + "uid": "7b3a8d94-bd7e-4c14-818a-89228172e4f1", + "vmi_active_pods": { + "a9a6c31b-8574-46f9-8bec-70ff091c3d97": "node01" + }, + "vmi_conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2022-09-14T13:43:39Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": null, + "message": "cannot migrate VMI which does not use masquerade to connect to the pod network", + "reason": "InterfaceNotLiveMigratable", + "status": "False", + "type": "LiveMigratable" + } + ], + "vmi_guest_os_info": {}, + "vmi_interfaces": [ + { + "infoSource": "domain", + "ipAddress": "10.244.196.152", + "ipAddresses": [ + "10.244.196.152", + "fd10:244::c497" + ], + "mac": "96:13:92:4f:05:d3", + "name": "default", + "queueCount": 1 + } + ], + "vmi_launcher_container_image_version": "registry:5000/kubevirt/virt-launcher@sha256:5c1474d240488c9a8e6e6e48b2ad446113744353b4cd2464baee3550e6b1829d", + "vmi_migration_method": "BlockMigration", + "vmi_migration_transport": "Unix", + "vmi_node_name": "node01", + "vmi_phase": "Running", + "vmi_phase_transition_timestamps": [ + { + "phase": "Pending", + "phaseTransitionTimestamp": "2022-09-14T13:43:36Z" + }, + { + "phase": "Scheduling", + "phaseTransitionTimestamp": "2022-09-14T13:43:36Z" + }, + { + "phase": "Scheduled", + "phaseTransitionTimestamp": "2022-09-14T13:43:39Z" + }, + { + "phase": "Running", + "phaseTransitionTimestamp": "2022-09-14T13:43:40Z" + } + ], + "vmi_qos_class": "Burstable", + "vmi_virtual_machine_revision_name": "revision-start-vm-4d1b1438-91ba-4c75-a211-566fc50a06f5-9", + "vmi_volume_status": [ + { + "name": "cloudinitdisk", + "size": 1048576, + "target": "vdb" + }, + { + "name": "containerdisk", + "target": "vda" + } + ] + } + } + }, + "all": { + "children": [ + "ungrouped", + "localhost_12345", + "label_kubevirt_io_nodeName_node01", + "label_kubevirt_io_vm_vm_cirros" + ] + }, + "label_kubevirt_io_nodeName_node01": { + "hosts": [ + "default-vm-cirros" + ] + }, + "label_kubevirt_io_vm_vm_cirros": { + "hosts": [ + "default-vm-cirros" + ] + }, + "localhost_12345": { + "children": [ + "namespace_default" + ] + }, + "namespace_default": { + "children": [ + "namespace_default_vmis" + ] + }, + "namespace_default_vmis": { + "hosts": [ + "default-vm-cirros" + ] + } +} \ No newline at end of file