Merge pull request #122 from 0xFelix/cleanups2

Several small cleanups
This commit is contained in:
kubevirt-bot
2024-07-12 14:05:06 +02:00
committed by GitHub
19 changed files with 635 additions and 742 deletions

View File

@@ -45,7 +45,7 @@ ansible-galaxy collection install kubevirt-kubevirt.core-*.tar.gz
<!--start collection_dependencies -->
#### Ansible collections
* [kubernetes.core](https://galaxy.ansible.com/ui/repo/published/kubernetes/core)>=3.1.0,<4.1.0
* [kubernetes.core](https://galaxy.ansible.com/ui/repo/published/kubernetes/core)>=3.1.0,<6.0.0
To install all the dependencies:
```bash

View File

@@ -256,12 +256,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = "kubevirt.core.kubevirt"
# Used to convert camel case variable names into snake case
snake_case_pattern = re_compile(r"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])")
_snake_case_pattern = re_compile(r"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])")
@staticmethod
def get_default_hostname(host: str) -> str:
def _get_default_hostname(host: str) -> str:
"""
get_default_host_name strips URL schemes from the host name and
_get_default_host_name strips URL schemes from the host name and
replaces invalid characters.
"""
return (
@@ -272,9 +272,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
)
@staticmethod
def format_dynamic_api_exc(exc: DynamicApiError) -> str:
def _format_dynamic_api_exc(exc: DynamicApiError) -> str:
"""
format_dynamic_api_exc tries to extract the message from the JSON body
_format_dynamic_api_exc tries to extract the message from the JSON body
of a DynamicApiError.
"""
if exc.body:
@@ -287,17 +287,17 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return f"{exc.status} Reason: {exc.reason}"
@staticmethod
def format_var_name(name: str) -> str:
def _format_var_name(name: str) -> str:
"""
format_var_name formats a CamelCase variable name into a snake_case name
_format_var_name formats a CamelCase variable name into a snake_case name
suitable for use as a inventory variable name.
"""
return InventoryModule.snake_case_pattern.sub("_", name).lower()
return InventoryModule._snake_case_pattern.sub("_", name).lower()
@staticmethod
def obj_is_valid(obj: Dict) -> bool:
def _obj_is_valid(obj: Dict) -> bool:
"""
obj_is_valid ensures commonly used keys are present in the passed object.
_obj_is_valid ensures commonly used keys are present in the passed object.
"""
return bool(
"spec" in obj
@@ -309,9 +309,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
)
@staticmethod
def get_host_from_service(service: Dict, node_name: Optional[str]) -> Optional[str]:
def _get_host_from_service(
service: Dict, node_name: Optional[str]
) -> Optional[str]:
"""
get_host_from_service extracts the hostname to be used from the
_get_host_from_service extracts the hostname to be used from the
passed in service.
"""
service_type = service.get("spec", {}).get("type")
@@ -329,9 +331,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return None
@staticmethod
def get_port_from_service(service: Dict) -> Optional[str]:
def _get_port_from_service(service: Dict) -> Optional[str]:
"""
get_port_from_service extracts the port to be used from the
_get_port_from_service extracts the port to be used from the
passed in service.
"""
ports = service.get("spec", {}).get("ports", [])
@@ -349,9 +351,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return None
@staticmethod
def is_windows(guest_os_info: Optional[Dict], annotations: Optional[Dict]) -> bool:
def _is_windows(guest_os_info: Optional[Dict], annotations: Optional[Dict]) -> bool:
"""
is_windows checkes whether a given VM is running a Windows guest
_is_windows checks whether a given VM is running a Windows guest
by checking its GuestOSInfo and annotations.
"""
if guest_os_info and "id" in guest_os_info:
@@ -404,7 +406,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
attempt_to_read_cache = user_cache_setting and cache
cache_needs_update = user_cache_setting and not cache
self.connections_compatibility(config_data)
self._connections_compatibility(config_data)
opts = InventoryOptions(config_data=config_data)
results = {}
@@ -414,13 +416,17 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
except KeyError:
cache_needs_update = True
if not attempt_to_read_cache or cache_needs_update:
results = self.fetch_objects(get_api_client(**config_data), opts)
results = self._fetch_objects(get_api_client(**config_data), opts)
if cache_needs_update:
self._cache[cache_key] = results
self.populate_inventory(results, opts)
self._populate_inventory(results, opts)
def connections_compatibility(self, config_data: Dict) -> None:
def _connections_compatibility(self, config_data: Dict) -> None:
"""
_connections_compatibility ensures compatibility with the connection
parameter found in earlier versions of this inventory plugin (<1.5.0).
"""
collection_name = "kubevirt.core"
version_removed_in = "3.0.0"
@@ -457,7 +463,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
removed=True,
)
def fetch_objects(self, client: Any, opts: InventoryOptions) -> Dict:
def _fetch_objects(self, client: Any, opts: InventoryOptions) -> Dict:
"""
fetch_objects fetches all relevant objects from the K8S API.
"""
@@ -465,10 +471,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for namespace in (
opts.namespaces
if opts.namespaces
else self.get_available_namespaces(client)
else self._get_available_namespaces(client)
):
vms = self.get_vms_for_namespace(client, namespace, opts)
vmis = self.get_vmis_for_namespace(client, namespace, opts)
vms = self._get_vms_for_namespace(client, namespace, opts)
vmis = self._get_vmis_for_namespace(client, namespace, opts)
if not vms and not vmis:
# Continue if no VMs and VMIs were found to avoid adding empty groups.
@@ -477,18 +483,18 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
namespaces[namespace] = {
"vms": vms,
"vmis": vmis,
"services": self.get_ssh_services_for_namespace(client, namespace),
"services": self._get_ssh_services_for_namespace(client, namespace),
}
return {
"default_hostname": self.get_default_hostname(client.configuration.host),
"cluster_domain": self.get_cluster_domain(client),
"default_hostname": self._get_default_hostname(client.configuration.host),
"cluster_domain": self._get_cluster_domain(client),
"namespaces": namespaces,
}
def get_cluster_domain(self, client: K8SClient) -> Optional[str]:
def _get_cluster_domain(self, client: K8SClient) -> Optional[str]:
"""
get_cluster_domain tries to get the base domain of an OpenShift cluster.
_get_cluster_domain tries to get the base domain of an OpenShift cluster.
"""
try:
v1_dns = client.resources.get(
@@ -501,16 +507,16 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
obj = v1_dns.get(name="cluster")
except DynamicApiError as exc:
self.display.debug(
f"Failed to fetch cluster DNS config: {self.format_dynamic_api_exc(exc)}"
f"Failed to fetch cluster DNS config: {self._format_dynamic_api_exc(exc)}"
)
return None
return obj.get("spec", {}).get("baseDomain")
def get_resources(
def _get_resources(
self, client: K8SClient, api_version: str, kind: str, **kwargs
) -> List[Dict]:
"""
get_resources uses a dynamic K8SClient to fetch resources from the K8S API.
_get_resources uses a dynamic K8SClient to fetch resources from the K8S API.
"""
client = client.resources.get(api_version=api_version, kind=kind)
try:
@@ -518,29 +524,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
except DynamicApiError as exc:
self.display.debug(exc)
raise KubeVirtInventoryException(
f"Error fetching {kind} list: {self.format_dynamic_api_exc(exc)}"
f"Error fetching {kind} list: {self._format_dynamic_api_exc(exc)}"
) from exc
return [item.to_dict() for item in result.items]
def get_available_namespaces(self, client: K8SClient) -> List[str]:
def _get_available_namespaces(self, client: K8SClient) -> List[str]:
"""
get_available_namespaces lists all namespaces accessible with the
_get_available_namespaces lists all namespaces accessible with the
configured credentials and returns them.
"""
return [
namespace["metadata"]["name"]
for namespace in self.get_resources(client, "v1", "Namespace")
for namespace in self._get_resources(client, "v1", "Namespace")
if "metadata" in namespace and "name" in namespace["metadata"]
]
def get_vms_for_namespace(
def _get_vms_for_namespace(
self, client: K8SClient, namespace: str, opts: InventoryOptions
) -> List[Dict]:
"""
get_vms_for_namespace returns a list of all VirtualMachines in a namespace.
_get_vms_for_namespace returns a list of all VirtualMachines in a namespace.
"""
return self.get_resources(
return self._get_resources(
client,
opts.api_version,
"VirtualMachine",
@@ -548,13 +554,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
label_selector=opts.label_selector,
)
def get_vmis_for_namespace(
def _get_vmis_for_namespace(
self, client: K8SClient, namespace: str, opts: InventoryOptions
) -> List[Dict]:
"""
get_vmis_for_namespace returns a list of all VirtualMachineInstances in a namespace.
_get_vmis_for_namespace returns a list of all VirtualMachineInstances in a namespace.
"""
return self.get_resources(
return self._get_resources(
client,
opts.api_version,
"VirtualMachineInstance",
@@ -562,12 +568,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
label_selector=opts.label_selector,
)
def get_ssh_services_for_namespace(self, client: K8SClient, namespace: str) -> Dict:
def _get_ssh_services_for_namespace(
self, client: K8SClient, namespace: str
) -> Dict:
"""
get_ssh_services_for_namespace retrieves all services of a namespace exposing port 22/ssh.
_get_ssh_services_for_namespace retrieves all services of a namespace exposing port 22/ssh.
The services are mapped to the name of the corresponding domain.
"""
items = self.get_resources(
items = self._get_resources(
client,
"v1",
"Service",
@@ -601,9 +609,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return services
def populate_inventory(self, results: Dict, opts: InventoryOptions) -> None:
def _populate_inventory(self, results: Dict, opts: InventoryOptions) -> None:
"""
populate_inventory populates the inventory by completing the InventoryOptions
_populate_inventory populates the inventory by completing the InventoryOptions
and invoking populate_inventory_from_namespace for every namespace in results.
"""
if opts.base_domain is None:
@@ -611,22 +619,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if opts.name is None:
opts.name = results["default_hostname"]
for namespace, data in results["namespaces"].items():
self.populate_inventory_from_namespace(namespace, data, opts)
self._populate_inventory_from_namespace(namespace, data, opts)
def populate_inventory_from_namespace(
def _populate_inventory_from_namespace(
self, namespace: str, data: Dict, opts: InventoryOptions
) -> None:
"""
populate_inventory_from_namespace adds groups and hosts from a
_populate_inventory_from_namespace adds groups and hosts from a
namespace to the inventory.
"""
vms = {
vm["metadata"]["name"]: vm for vm in data["vms"] if self.obj_is_valid(vm)
vm["metadata"]["name"]: vm for vm in data["vms"] if self._obj_is_valid(vm)
}
vmis = {
vmi["metadata"]["name"]: vmi
for vmi in data["vmis"]
if self.obj_is_valid(vmi)
if self._obj_is_valid(vmi)
}
if not vms and not vmis:
@@ -636,7 +644,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
services = {
domain: service
for domain, service in data["services"].items()
if self.obj_is_valid(service)
if self._obj_is_valid(service)
}
name = self._sanitize_group_name(opts.name)
@@ -648,23 +656,25 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# Add found VMs and optionally enhance with VMI data
for name, vm in vms.items():
hostname = self.add_host(vm["metadata"], opts.host_format, namespace_group)
self.set_vars_from_vm(hostname, vm, opts)
hostname = self._add_host(vm["metadata"], opts.host_format, namespace_group)
self._set_vars_from_vm(hostname, vm, opts)
if name in vmis:
self.set_vars_from_vmi(hostname, vmis[name], services, opts)
self.set_composable_vars(hostname)
self._set_vars_from_vmi(hostname, vmis[name], services, opts)
self._set_composable_vars(hostname)
# Add remaining VMIs without VM
for name, vmi in vmis.items():
if name in vms:
continue
hostname = self.add_host(vmi["metadata"], opts.host_format, namespace_group)
self.set_vars_from_vmi(hostname, vmi, services, opts)
self.set_composable_vars(hostname)
hostname = self._add_host(
vmi["metadata"], opts.host_format, namespace_group
)
self._set_vars_from_vmi(hostname, vmi, services, opts)
self._set_composable_vars(hostname)
def add_host(self, metadata: Dict, host_format: str, namespace_group: str) -> str:
def _add_host(self, metadata: Dict, host_format: str, namespace_group: str) -> str:
"""
add_hosts adds a host to the inventory.
_add_host adds a host to the inventory.
"""
hostname = host_format.format(
namespace=metadata["namespace"],
@@ -676,20 +686,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return hostname
def set_vars_from_vm(self, hostname: str, vm: Dict, opts: InventoryOptions) -> None:
def _set_vars_from_vm(
self, hostname: str, vm: Dict, opts: InventoryOptions
) -> None:
"""
set_vars_from_vm sets inventory variables from a VM prefixed with vm_.
_set_vars_from_vm sets inventory variables from a VM prefixed with vm_.
"""
self.set_common_vars(hostname, "vm", vm, opts)
self._set_common_vars(hostname, "vm", vm, opts)
def set_vars_from_vmi(
def _set_vars_from_vmi(
self, hostname: str, vmi: Dict, services: Dict, opts: InventoryOptions
) -> None:
"""
set_vars_from_vmi sets inventory variables from a VMI prefixed with vmi_ and
_set_vars_from_vmi sets inventory variables from a VMI prefixed with vmi_ and
looks up the interface to set ansible_host and ansible_port.
"""
self.set_common_vars(hostname, "vmi", vmi, opts)
self._set_common_vars(hostname, "vmi", vmi, opts)
if not (interfaces := vmi["status"].get("interfaces")):
return
@@ -710,7 +722,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# Set up the connection
service = None
if self.is_windows(
if self._is_windows(
vmi["status"].get("guestOSInfo", {}),
vmi["metadata"].get("annotations", {}),
):
@@ -719,7 +731,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
service = services.get(
vmi["metadata"].get("labels", {}).get(LABEL_KUBEVIRT_IO_DOMAIN)
)
self.set_ansible_host_and_port(
self._set_ansible_host_and_port(
vmi,
hostname,
interface["ipAddress"],
@@ -727,11 +739,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
opts,
)
def set_common_vars(
def _set_common_vars(
self, hostname: str, prefix: str, obj: Dict, opts: InventoryOptions
):
"""
set_common_vars sets common inventory variables from VMs or VMIs.
_set_common_vars sets common inventory variables from VMs or VMIs.
"""
# Add hostvars from metadata
if annotations := obj["metadata"].get("annotations"):
@@ -740,7 +752,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self.inventory.set_variable(hostname, f"{prefix}_labels", labels)
# Create label groups and add vm to it if enabled
if opts.create_groups:
self.set_groups_from_labels(hostname, labels)
self._set_groups_from_labels(hostname, labels)
if resource_version := obj["metadata"].get("resourceVersion"):
self.inventory.set_variable(
hostname, f"{prefix}_resource_version", resource_version
@@ -751,12 +763,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# Add hostvars from status
for key, value in obj["status"].items():
self.inventory.set_variable(
hostname, f"{prefix}_{self.format_var_name(key)}", value
hostname, f"{prefix}_{self._format_var_name(key)}", value
)
def set_groups_from_labels(self, hostname: str, labels: Dict) -> None:
def _set_groups_from_labels(self, hostname: str, labels: Dict) -> None:
"""
set_groups_from_labels adds groups for each label of a VM or VMI and
_set_groups_from_labels adds groups for each label of a VM or VMI and
adds the host to each group.
"""
groups = []
@@ -769,7 +781,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self.inventory.add_group(group)
self.inventory.add_child(group, hostname)
def set_ansible_host_and_port(
def _set_ansible_host_and_port(
self,
vmi: Dict,
hostname: str,
@@ -778,7 +790,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
opts: InventoryOptions,
) -> None:
"""
set_ansible_host_and_port sets the ansible_host and possibly the ansible_port var.
_set_ansible_host_and_port sets the ansible_host and possibly the ansible_port var.
Secondary interfaces have priority over a service exposing SSH
"""
ansible_host = None
@@ -795,8 +807,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
node_name = vmi["status"].get("nodeName")
if node_name and opts.append_base_domain and opts.base_domain:
node_name += f".{opts.base_domain}"
host = self.get_host_from_service(service, node_name)
port = self.get_port_from_service(service)
host = self._get_host_from_service(service, node_name)
port = self._get_port_from_service(service)
if host is not None and port is not None:
ansible_host = host
ansible_port = port
@@ -808,9 +820,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self.inventory.set_variable(hostname, "ansible_host", ansible_host)
self.inventory.set_variable(hostname, "ansible_port", ansible_port)
def set_composable_vars(self, hostname: str) -> None:
def _set_composable_vars(self, hostname: str) -> None:
"""
set_composable_vars sets vars per
_set_composable_vars sets vars per
https://docs.ansible.com/ansible/latest/dev_guide/developing_inventory.html
"""
hostvars = self.inventory.get_host(hostname).get_vars()

View File

@@ -69,7 +69,7 @@ WINDOWS_VMI_4 = merge_dicts(
],
)
def test_ansible_connection_winrm(inventory, hosts, vmi, expected):
inventory.populate_inventory(
inventory._populate_inventory(
{
"default_hostname": "test",
"cluster_domain": "test.com",

View File

@@ -45,7 +45,7 @@ def test_set_composable_vars(
"keyed_groups": [{"prefix": "fedora", "key": "vmi_guest_os_info.versionId"}],
"strict": True,
}
inventory.populate_inventory(
inventory._populate_inventory(
{
"default_hostname": "test",
"cluster_domain": "test.com",

View File

@@ -44,7 +44,7 @@ VMI1 = {
def test_stopped_vm(inventory, hosts):
inventory.populate_inventory(
inventory._populate_inventory(
{
"default_hostname": "test",
"cluster_domain": "test.com",

View File

@@ -29,7 +29,7 @@ from ansible_collections.kubevirt.core.tests.unit.plugins.inventory.constants im
],
)
def test_get_default_hostname(host, expected):
assert InventoryModule.get_default_hostname(host) == expected
assert InventoryModule._get_default_hostname(host) == expected
@pytest.mark.parametrize(
@@ -44,7 +44,7 @@ def test_get_default_hostname(host, expected):
],
)
def test_format_var_name(name, expected):
assert InventoryModule.format_var_name(name) == expected
assert InventoryModule._format_var_name(name) == expected
@pytest.mark.parametrize(
@@ -93,7 +93,7 @@ def test_format_var_name(name, expected):
],
)
def test_obj_is_valid(obj, expected):
assert InventoryModule.obj_is_valid(obj) == expected
assert InventoryModule._obj_is_valid(obj) == expected
@pytest.mark.parametrize(
@@ -151,7 +151,7 @@ def test_obj_is_valid(obj, expected):
],
)
def test_get_host_from_service(service, node_name, expected):
assert InventoryModule.get_host_from_service(service, node_name) == expected
assert InventoryModule._get_host_from_service(service, node_name) == expected
@pytest.mark.parametrize(
@@ -199,7 +199,7 @@ def test_get_host_from_service(service, node_name, expected):
],
)
def test_port_from_service(service, expected):
assert InventoryModule.get_port_from_service(service) == expected
assert InventoryModule._get_port_from_service(service) == expected
@pytest.mark.parametrize(
@@ -251,11 +251,11 @@ def test_port_from_service(service, expected):
],
)
def test_is_windows(guest_os_info, annotations, expected):
assert InventoryModule.is_windows(guest_os_info, annotations) == expected
assert InventoryModule._is_windows(guest_os_info, annotations) == expected
def test_get_cluster_domain(inventory, client):
assert inventory.get_cluster_domain(client) == DEFAULT_BASE_DOMAIN
assert inventory._get_cluster_domain(client) == DEFAULT_BASE_DOMAIN
@pytest.mark.parametrize(
@@ -292,10 +292,10 @@ def test_get_cluster_domain(inventory, client):
)
def test_populate_inventory(mocker, inventory, results, expected):
populate_inventory_from_namespace = mocker.patch.object(
inventory, "populate_inventory_from_namespace"
inventory, "_populate_inventory_from_namespace"
)
inventory.populate_inventory(results, InventoryOptions())
inventory._populate_inventory(results, InventoryOptions())
opts = InventoryOptions(
base_domain=results["cluster_domain"], name=results["default_hostname"]
@@ -321,7 +321,7 @@ def test_populate_inventory(mocker, inventory, results, expected):
)
def test_set_groups_from_labels(inventory, groups, labels, expected):
hostname = "default-testvm"
inventory.set_groups_from_labels(hostname, labels)
inventory._set_groups_from_labels(hostname, labels)
for group in expected:
assert group in groups
assert hostname in groups[group]["children"]

View File

@@ -62,7 +62,7 @@ def test_add_host(inventory, groups, hosts, host_format, expected):
namespace_group = "namespace_default"
inventory.inventory.add_group(namespace_group)
inventory.add_host(
inventory._add_host(
{
"name": "testvm",
"namespace": DEFAULT_NAMESPACE,

View File

@@ -23,7 +23,7 @@ def test_config_data_without_connections_ignored(inventory):
"label_selector": "app=test",
}
inventory.connections_compatibility(config_data)
inventory._connections_compatibility(config_data)
assert config_data["name"] == "connection-1"
assert config_data["namespaces"] == ["default"]
@@ -46,7 +46,7 @@ def test_single_connection_supported(inventory):
"label_selector": "app=something",
}
inventory.connections_compatibility(config_data)
inventory._connections_compatibility(config_data)
assert config_data["name"] == "connection-1"
assert config_data["namespaces"] == ["default"]
@@ -58,7 +58,7 @@ def test_multiple_connections_not_supported(inventory):
with pytest.raises(
AnsibleError, match="Split your connections into multiple configuration files."
):
inventory.connections_compatibility(
inventory._connections_compatibility(
{
"connections": [
{
@@ -84,4 +84,4 @@ def test_multiple_connections_not_supported(inventory):
)
def test_connections_exceptions(inventory, config_data, expected):
with pytest.raises(KubeVirtInventoryException, match=expected):
inventory.connections_compatibility(config_data)
inventory._connections_compatibility(config_data)

View File

@@ -37,25 +37,25 @@ from ansible_collections.kubevirt.core.tests.unit.plugins.inventory.constants im
)
def test_fetch_objects(mocker, inventory, opts, namespaces):
get_available_namespaces = mocker.patch.object(
inventory, "get_available_namespaces", return_value=[DEFAULT_NAMESPACE]
inventory, "_get_available_namespaces", return_value=[DEFAULT_NAMESPACE]
)
get_vms_for_namespace = mocker.patch.object(
inventory, "get_vms_for_namespace", return_value=[{}]
inventory, "_get_vms_for_namespace", return_value=[{}]
)
get_vmis_for_namespace = mocker.patch.object(
inventory, "get_vmis_for_namespace", return_value=[{}]
inventory, "_get_vmis_for_namespace", return_value=[{}]
)
get_ssh_services_for_namespace = mocker.patch.object(
inventory, "get_ssh_services_for_namespace", return_value=[]
inventory, "_get_ssh_services_for_namespace", return_value=[]
)
get_default_hostname = mocker.patch.object(
inventory, "get_default_hostname", return_value="default-hostname"
inventory, "_get_default_hostname", return_value="default-hostname"
)
get_cluster_domain = mocker.patch.object(
inventory, "get_cluster_domain", return_value="test.com"
inventory, "_get_cluster_domain", return_value="test.com"
)
inventory.fetch_objects(mocker.Mock(), opts)
inventory._fetch_objects(mocker.Mock(), opts)
if opts.namespaces:
get_available_namespaces.assert_not_called()
@@ -77,25 +77,25 @@ def test_fetch_objects(mocker, inventory, opts, namespaces):
def test_fetch_objects_early_return(mocker, inventory):
get_available_namespaces = mocker.patch.object(
inventory, "get_available_namespaces", return_value=[DEFAULT_NAMESPACE]
inventory, "_get_available_namespaces", return_value=[DEFAULT_NAMESPACE]
)
get_vms_for_namespace = mocker.patch.object(
inventory, "get_vms_for_namespace", return_value=[]
inventory, "_get_vms_for_namespace", return_value=[]
)
get_vmis_for_namespace = mocker.patch.object(
inventory, "get_vmis_for_namespace", return_value=[]
inventory, "_get_vmis_for_namespace", return_value=[]
)
get_ssh_services_for_namespace = mocker.patch.object(
inventory, "get_ssh_services_for_namespace"
inventory, "_get_ssh_services_for_namespace"
)
get_default_hostname = mocker.patch.object(
inventory, "get_default_hostname", return_value="default-hostname"
inventory, "_get_default_hostname", return_value="default-hostname"
)
get_cluster_domain = mocker.patch.object(
inventory, "get_cluster_domain", return_value="test.com"
inventory, "_get_cluster_domain", return_value="test.com"
)
inventory.fetch_objects(mocker.Mock(), InventoryOptions())
inventory._fetch_objects(mocker.Mock(), InventoryOptions())
get_available_namespaces.assert_called_once()
get_vms_for_namespace.assert_called_once_with(

View File

@@ -52,5 +52,6 @@ def status_reason_error(mocker):
)
def test_format_dynamic_api_exc(request, exc, expected):
assert (
InventoryModule.format_dynamic_api_exc(request.getfixturevalue(exc)) == expected
InventoryModule._format_dynamic_api_exc(request.getfixturevalue(exc))
== expected
)

View File

@@ -30,19 +30,19 @@ from ansible_collections.kubevirt.core.tests.unit.plugins.inventory.constants im
indirect=["client"],
)
def test_get_resources(inventory, client):
assert inventory.get_resources(client, "v1", "Namespace") == [
assert inventory._get_resources(client, "v1", "Namespace") == [
{"metadata": {"name": DEFAULT_NAMESPACE}}
]
assert inventory.get_resources(client, "v1", "Service") == [
assert inventory._get_resources(client, "v1", "Service") == [
{"metadata": {"name": "testsvc"}}
]
assert inventory.get_resources(client, "config.openshift.io/v1", "DNS") == [
assert inventory._get_resources(client, "config.openshift.io/v1", "DNS") == [
{"spec": {"baseDomain": DEFAULT_BASE_DOMAIN}}
]
assert inventory.get_resources(client, "kubevirt.io/v1", "VirtualMachine") == [
assert inventory._get_resources(client, "kubevirt.io/v1", "VirtualMachine") == [
{"metadata": {"name": "testvm"}}
]
assert inventory.get_resources(
assert inventory._get_resources(
client, "kubevirt.io/v1", "VirtualMachineInstance"
) == [{"metadata": {"name": "testvmi"}}]
@@ -67,7 +67,7 @@ def test_get_resources(inventory, client):
indirect=["client"],
)
def test_get_available_namespaces(inventory, client, expected):
assert inventory.get_available_namespaces(client) == expected
assert inventory._get_available_namespaces(client) == expected
@pytest.mark.parametrize(
@@ -83,7 +83,7 @@ def test_get_available_namespaces(inventory, client, expected):
indirect=["client"],
)
def test_get_vms_for_namespace(inventory, client):
assert inventory.get_vms_for_namespace(
assert inventory._get_vms_for_namespace(
client, DEFAULT_NAMESPACE, InventoryOptions()
) == [{"metadata": {"name": "testvm1"}}, {"metadata": {"name": "testvm2"}}]
@@ -101,6 +101,6 @@ def test_get_vms_for_namespace(inventory, client):
indirect=["client"],
)
def test_get_vmis_for_namespace(inventory, client):
assert inventory.get_vmis_for_namespace(
assert inventory._get_vmis_for_namespace(
client, DEFAULT_NAMESPACE, InventoryOptions()
) == [{"metadata": {"name": "testvmi1"}}, {"metadata": {"name": "testvmi2"}}]

View File

@@ -57,7 +57,7 @@ SVC_NP_SSH = {
indirect=["client"],
)
def test_get_ssh_services_for_namespace(inventory, client):
assert inventory.get_ssh_services_for_namespace(client, DEFAULT_NAMESPACE) == {
assert inventory._get_ssh_services_for_namespace(client, DEFAULT_NAMESPACE) == {
"test-lb-ssh": SVC_LB_SSH,
"test-np-ssh": SVC_NP_SSH,
}
@@ -165,4 +165,4 @@ SVC_NO_SELECTOR = {
indirect=["client"],
)
def test_ignore_unwanted_services(inventory, client):
assert not inventory.get_ssh_services_for_namespace(client, DEFAULT_NAMESPACE)
assert not inventory._get_ssh_services_for_namespace(client, DEFAULT_NAMESPACE)

View File

@@ -102,8 +102,8 @@ def test_config_data_to_opts(mocker, inventory, config_data, expected):
mocker.patch.object(inventory, "get_cache_key")
mocker.patch.object(inventory, "get_option")
mocker.patch.object(kubevirt, "get_api_client")
mocker.patch.object(inventory, "fetch_objects")
populate_inventory = mocker.patch.object(inventory, "populate_inventory")
mocker.patch.object(inventory, "_fetch_objects")
populate_inventory = mocker.patch.object(inventory, "_populate_inventory")
inventory.parse(None, None, "", False)
@@ -139,8 +139,8 @@ def test_use_of_cache(
)
get_option = mocker.patch.object(inventory, "get_option", return_value=cache_option)
get_api_client = mocker.patch.object(kubevirt, "get_api_client")
fetch_objects = mocker.patch.object(inventory, "fetch_objects")
populate_inventory = mocker.patch.object(inventory, "populate_inventory")
fetch_objects = mocker.patch.object(inventory, "_fetch_objects")
populate_inventory = mocker.patch.object(inventory, "_populate_inventory")
if cache_parse is None:
inventory.parse(None, None, path)
@@ -173,7 +173,7 @@ def test_k8s_client_missing(mocker, inventory, present):
mocker.patch.object(inventory, "_read_config_data", return_value={})
mocker.patch.object(inventory, "get_cache_key")
mocker.patch.object(inventory, "get_option")
fetch_objects = mocker.patch.object(inventory, "fetch_objects")
fetch_objects = mocker.patch.object(inventory, "_fetch_objects")
if present:
inventory.parse(None, None, "", False)

View File

@@ -129,15 +129,15 @@ def test_populate_inventory_from_namespace(
set_vars_from_vmi_calls.append(mocker.call(hostname, vmi, {}, opts))
set_composable_vars_calls.append(mocker.call(hostname))
obj_is_valid = mocker.patch.object(inventory, "obj_is_valid", return_value=True)
obj_is_valid = mocker.patch.object(inventory, "_obj_is_valid", return_value=True)
add_host = mocker.patch.object(
inventory, "add_host", side_effect=add_host_side_effects
inventory, "_add_host", side_effect=add_host_side_effects
)
set_vars_from_vm = mocker.patch.object(inventory, "set_vars_from_vm")
set_vars_from_vmi = mocker.patch.object(inventory, "set_vars_from_vmi")
set_composable_vars = mocker.patch.object(inventory, "set_composable_vars")
set_vars_from_vm = mocker.patch.object(inventory, "_set_vars_from_vm")
set_vars_from_vmi = mocker.patch.object(inventory, "_set_vars_from_vmi")
set_composable_vars = mocker.patch.object(inventory, "_set_composable_vars")
inventory.populate_inventory_from_namespace(
inventory._populate_inventory_from_namespace(
DEFAULT_NAMESPACE, {"vms": vms, "vmis": vmis, "services": {}}, opts
)

View File

@@ -27,7 +27,7 @@ def test_use_ip_address_by_default(mocker, inventory, opts):
hostname = "default-testvm"
ip_address = "1.1.1.1"
inventory.set_ansible_host_and_port({}, hostname, ip_address, None, opts)
inventory._set_ansible_host_and_port({}, hostname, ip_address, None, opts)
set_variable.assert_has_calls(
[
@@ -53,7 +53,7 @@ def test_kube_secondary_dns(mocker, inventory, base_domain):
"status": {"interfaces": [{"name": "awesome"}]},
}
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
vmi,
hostname,
"1.1.1.1",
@@ -86,7 +86,7 @@ def test_kube_secondary_dns_precedence_over_service(mocker, inventory):
"status": {"interfaces": [{"name": "awesome"}]},
}
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
vmi,
hostname,
"1.1.1.1",
@@ -170,7 +170,7 @@ def test_service(mocker, inventory, service, expected_host, expected_port):
},
}
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
vmi,
hostname,
"1.1.1.1",
@@ -201,7 +201,7 @@ def test_service_append_base_domain(mocker, inventory):
"ports": [{"nodePort": 25}],
},
}
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
vmi,
hostname,
"1.1.1.1",
@@ -229,8 +229,8 @@ def test_service_append_base_domain(mocker, inventory):
)
def test_service_fallback(mocker, inventory, host, port):
set_variable = mocker.patch.object(inventory.inventory, "set_variable")
mocker.patch.object(inventory, "get_host_from_service", return_value=host)
mocker.patch.object(inventory, "get_port_from_service", return_value=port)
mocker.patch.object(inventory, "_get_host_from_service", return_value=host)
mocker.patch.object(inventory, "_get_port_from_service", return_value=port)
hostname = "default-testvm"
vmi = {
@@ -238,7 +238,7 @@ def test_service_fallback(mocker, inventory, host, port):
"nodeName": "testnode",
},
}
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
vmi,
hostname,
"1.1.1.1",
@@ -258,7 +258,7 @@ def test_no_service_if_network_name(mocker, inventory):
set_variable = mocker.patch.object(inventory.inventory, "set_variable")
hostname = "default-testvm"
inventory.set_ansible_host_and_port(
inventory._set_ansible_host_and_port(
{},
hostname,
"1.2.3.4",

View File

@@ -85,7 +85,7 @@ def test_set_common_vars(inventory, hosts, obj, expected):
hostname = "default-testvm"
prefix = "".join(choice(ascii_lowercase) for i in range(5))
inventory.inventory.add_host(hostname)
inventory.set_common_vars(hostname, prefix, obj, InventoryOptions())
inventory._set_common_vars(hostname, prefix, obj, InventoryOptions())
for key, value in expected.items():
prefixed_key = f"{prefix}_{key}"
@@ -102,13 +102,13 @@ def test_set_common_vars(inventory, hosts, obj, expected):
)
def test_set_common_vars_create_groups(mocker, inventory, create_groups):
mocker.patch.object(inventory.inventory, "set_variable")
set_groups_from_labels = mocker.patch.object(inventory, "set_groups_from_labels")
set_groups_from_labels = mocker.patch.object(inventory, "_set_groups_from_labels")
hostname = "default-testvm"
labels = {"testkey": "testval"}
opts = InventoryOptions(create_groups=create_groups)
inventory.set_common_vars(
inventory._set_common_vars(
hostname, "prefix", {"metadata": {"labels": labels}, "status": {}}, opts
)
@@ -123,10 +123,10 @@ def test_called_by_set_vars_from(mocker, inventory):
opts = InventoryOptions()
obj = {"status": {}}
set_common_vars = mocker.patch.object(inventory, "set_common_vars")
set_common_vars = mocker.patch.object(inventory, "_set_common_vars")
inventory.set_vars_from_vm(hostname, obj, opts)
inventory.set_vars_from_vmi(hostname, obj, {}, opts)
inventory._set_vars_from_vm(hostname, obj, opts)
inventory._set_vars_from_vmi(hostname, obj, {}, opts)
set_common_vars.assert_has_calls(
[

View File

@@ -13,21 +13,21 @@ from ansible_collections.kubevirt.core.plugins.inventory.kubevirt import (
def test_ignore_vmi_without_interface(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "_set_common_vars")
set_ansible_host_and_port = mocker.patch.object(
inventory, "set_ansible_host_and_port"
inventory, "_set_ansible_host_and_port"
)
vmi = {"status": {}}
inventory.set_vars_from_vmi("default-testvm", vmi, {}, InventoryOptions())
inventory._set_vars_from_vmi("default-testvm", vmi, {}, InventoryOptions())
set_ansible_host_and_port.assert_not_called()
def test_use_first_interface_by_default(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "_set_common_vars")
set_ansible_host_and_port = mocker.patch.object(
inventory, "set_ansible_host_and_port"
inventory, "_set_ansible_host_and_port"
)
hostname = "default-testvm"
@@ -36,7 +36,7 @@ def test_use_first_interface_by_default(mocker, inventory):
"status": {"interfaces": [{"ipAddress": "1.1.1.1"}, {"ipAddress": "2.2.2.2"}]},
}
opts = InventoryOptions()
inventory.set_vars_from_vmi(hostname, vmi, {}, opts)
inventory._set_vars_from_vmi(hostname, vmi, {}, opts)
set_ansible_host_and_port.assert_called_once_with(
vmi, hostname, "1.1.1.1", None, opts
@@ -44,9 +44,9 @@ def test_use_first_interface_by_default(mocker, inventory):
def test_use_named_interface(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "_set_common_vars")
set_ansible_host_and_port = mocker.patch.object(
inventory, "set_ansible_host_and_port"
inventory, "_set_ansible_host_and_port"
)
hostname = "default-testvm"
@@ -60,7 +60,7 @@ def test_use_named_interface(mocker, inventory):
},
}
opts = InventoryOptions(network_name="second")
inventory.set_vars_from_vmi(hostname, vmi, {}, opts)
inventory._set_vars_from_vmi(hostname, vmi, {}, opts)
set_ansible_host_and_port.assert_called_once_with(
vmi, hostname, "2.2.2.2", None, opts
@@ -68,16 +68,16 @@ def test_use_named_interface(mocker, inventory):
def test_ignore_vmi_without_named_interface(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "_set_common_vars")
set_ansible_host_and_port = mocker.patch.object(
inventory, "set_ansible_host_and_port"
inventory, "_set_ansible_host_and_port"
)
vmi = {
"metadata": {},
"status": {"interfaces": [{"name": "somename", "ipAddress": "1.1.1.1"}]},
}
inventory.set_vars_from_vmi(
inventory._set_vars_from_vmi(
"default-testvm", vmi, {}, InventoryOptions(network_name="awesome")
)
@@ -85,22 +85,22 @@ def test_ignore_vmi_without_named_interface(mocker, inventory):
def test_set_winrm_if_windows(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "is_windows", return_value=True)
mocker.patch.object(inventory, "set_ansible_host_and_port")
mocker.patch.object(inventory, "_set_common_vars")
mocker.patch.object(inventory, "_is_windows", return_value=True)
mocker.patch.object(inventory, "_set_ansible_host_and_port")
set_variable = mocker.patch.object(inventory.inventory, "set_variable")
hostname = "default-testvm"
vmi = {"metadata": {}, "status": {"interfaces": [{"ipAddress": "1.1.1.1"}]}}
inventory.set_vars_from_vmi(hostname, vmi, {}, InventoryOptions())
inventory._set_vars_from_vmi(hostname, vmi, {}, InventoryOptions())
set_variable.assert_called_once_with(hostname, "ansible_connection", "winrm")
def test_service_lookup(mocker, inventory):
mocker.patch.object(inventory, "set_common_vars")
mocker.patch.object(inventory, "_set_common_vars")
set_ansible_host_and_port = mocker.patch.object(
inventory, "set_ansible_host_and_port"
inventory, "_set_ansible_host_and_port"
)
hostname = "default-testvm"
@@ -110,7 +110,7 @@ def test_service_lookup(mocker, inventory):
}
opts = InventoryOptions()
service = {"metadata": {"name": "testsvc"}}
inventory.set_vars_from_vmi(hostname, vmi, {"testdomain": service}, opts)
inventory._set_vars_from_vmi(hostname, vmi, {"testdomain": service}, opts)
set_ansible_host_and_port.assert_called_once_with(
vmi, hostname, "1.1.1.1", service, opts

View File

@@ -20,134 +20,26 @@ from ansible_collections.kubevirt.core.tests.unit.utils.ansible_module_mock impo
)
@pytest.fixture(scope="module")
def vm_definition_create():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
"labels": {"environment": "staging", "service": "loadbalancer"},
},
"spec": {
"running": True,
"instancetype": {"name": "u1.medium"},
"preference": {"name": "fedora"},
"dataVolumeTemplates": [
{
"metadata": {"name": "testdv"},
"spec": {
"source": {
"registry": {
"url": "docker://quay.io/containerdisks/fedora:latest"
},
},
"storage": {
"accessModes": ["ReadWriteOnce"],
"resources": {"requests": {"storage": "5Gi"}},
},
},
}
],
"template": {
"metadata": {
"labels": {"environment": "staging", "service": "loadbalancer"}
},
"spec": {
"domain": {"devices": {}},
"terminationGracePeriodSeconds": 180,
},
},
},
}
def test_module_fails_when_required_args_missing(mocker):
mocker.patch.object(AnsibleModule, "fail_json", fail_json)
with pytest.raises(AnsibleFailJson):
set_module_args({})
kubevirt_vm.main()
@pytest.fixture(scope="module")
def vm_definition_running():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {"devices": {}},
},
},
},
}
@pytest.fixture(scope="module")
def vm_definition_stopped():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
},
"spec": {
"running": False,
"template": {
"spec": {
"domain": {"devices": {}},
},
},
},
}
@pytest.fixture(scope="module")
def module_params_default():
return {
"api_version": "kubevirt.io/v1",
"annotations": None,
"labels": None,
"running": True,
"instancetype": None,
"preference": None,
"data_volume_templates": None,
"spec": None,
"wait": False,
"wait_sleep": 5,
"wait_timeout": 5,
"kubeconfig": None,
"context": None,
"host": None,
"api_key": None,
"username": None,
"password": None,
"validate_certs": None,
"ca_cert": None,
"client_cert": None,
"client_key": None,
"proxy": None,
"no_proxy": None,
"proxy_headers": None,
"persist_config": None,
"impersonate_user": None,
"impersonate_groups": None,
"state": "present",
"force": False,
"delete_options": None,
}
@pytest.fixture(scope="module")
def module_params_create(module_params_default):
return module_params_default | {
VM_DEFINITION_CREATE = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
"labels": {"service": "loadbalancer", "environment": "staging"},
"labels": {"environment": "staging", "service": "loadbalancer"},
},
"spec": {
"running": True,
"instancetype": {"name": "u1.medium"},
"preference": {"name": "fedora"},
"data_volume_templates": [
"dataVolumeTemplates": [
{
"metadata": {"name": "testdv"},
"spec": {
@@ -163,121 +55,187 @@ def module_params_create(module_params_default):
},
}
],
"spec": {
"domain": {"devices": {}},
"terminationGracePeriodSeconds": 180,
"template": {
"metadata": {
"labels": {"environment": "staging", "service": "loadbalancer"}
},
"spec": {
"domain": {"devices": {}},
"terminationGracePeriodSeconds": 180,
},
},
}
},
}
@pytest.fixture(scope="module")
def module_params_running(module_params_default):
return module_params_default | {
VM_DEFINITION_RUNNING = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
},
"spec": {
"running": True,
}
"template": {
"spec": {
"domain": {"devices": {}},
},
},
},
}
@pytest.fixture(scope="module")
def module_params_stopped(module_params_default):
return module_params_default | {
VM_DEFINITION_STOPPED = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
},
"spec": {
"running": False,
}
"template": {
"spec": {
"domain": {"devices": {}},
},
},
},
}
MODULE_PARAMS_DEFAULT = {
"api_version": "kubevirt.io/v1",
"annotations": None,
"labels": None,
"running": True,
"instancetype": None,
"preference": None,
"data_volume_templates": None,
"spec": None,
"wait": False,
"wait_sleep": 5,
"wait_timeout": 5,
"kubeconfig": None,
"context": None,
"host": None,
"api_key": None,
"username": None,
"password": None,
"validate_certs": None,
"ca_cert": None,
"client_cert": None,
"client_key": None,
"proxy": None,
"no_proxy": None,
"proxy_headers": None,
"persist_config": None,
"impersonate_user": None,
"impersonate_groups": None,
"state": "present",
"force": False,
"delete_options": None,
}
@pytest.fixture(scope="module")
def module_params_delete(module_params_default):
return module_params_default | {
"name": "testvm",
"namespace": "default",
"state": "absent",
"wait": True,
}
MODULE_PARAMS_CREATE = MODULE_PARAMS_DEFAULT | {
"name": "testvm",
"namespace": "default",
"labels": {"service": "loadbalancer", "environment": "staging"},
"instancetype": {"name": "u1.medium"},
"preference": {"name": "fedora"},
"data_volume_templates": [
{
"metadata": {"name": "testdv"},
"spec": {
"source": {
"registry": {
"url": "docker://quay.io/containerdisks/fedora:latest"
},
},
"storage": {
"accessModes": ["ReadWriteOnce"],
"resources": {"requests": {"storage": "5Gi"}},
},
},
}
],
"spec": {
"domain": {"devices": {}},
"terminationGracePeriodSeconds": 180,
},
}
MODULE_PARAMS_RUNNING = MODULE_PARAMS_DEFAULT | {
"name": "testvm",
"namespace": "default",
"running": True,
}
@pytest.fixture(scope="module")
def k8s_module_params_create(module_params_create, vm_definition_create):
return module_params_create | {
"generate_name": None,
"resource_definition": vm_definition_create,
"wait_condition": {"type": "Ready", "status": True},
}
MODULE_PARAMS_STOPPED = MODULE_PARAMS_DEFAULT | {
"name": "testvm",
"namespace": "default",
"running": False,
}
MODULE_PARAMS_DELETE = MODULE_PARAMS_DEFAULT | {
"name": "testvm",
"namespace": "default",
"state": "absent",
"wait": True,
}
@pytest.fixture(scope="module")
def k8s_module_params_running(module_params_running, vm_definition_running):
return module_params_running | {
"generate_name": None,
"resource_definition": vm_definition_running,
"wait_condition": {"type": "Ready", "status": True},
}
K8S_MODULE_PARAMS_CREATE = MODULE_PARAMS_CREATE | {
"generate_name": None,
"resource_definition": VM_DEFINITION_CREATE,
"wait_condition": {"type": "Ready", "status": True},
}
K8S_MODULE_PARAMS_RUNNING = MODULE_PARAMS_RUNNING | {
"generate_name": None,
"resource_definition": VM_DEFINITION_RUNNING,
"wait_condition": {"type": "Ready", "status": True},
}
@pytest.fixture(scope="module")
def k8s_module_params_stopped(module_params_stopped, vm_definition_stopped):
return module_params_stopped | {
"generate_name": None,
"resource_definition": vm_definition_stopped,
"wait_condition": {"type": "Ready", "status": False, "reason": "VMINotExists"},
}
K8S_MODULE_PARAMS_STOPPED = MODULE_PARAMS_STOPPED | {
"generate_name": None,
"resource_definition": VM_DEFINITION_STOPPED,
"wait_condition": {"type": "Ready", "status": False, "reason": "VMINotExists"},
}
@pytest.fixture(scope="module")
def k8s_module_params_delete(module_params_delete, vm_definition_running):
return module_params_delete | {
"generate_name": None,
"resource_definition": vm_definition_running,
"wait_condition": {"type": "Ready", "status": True},
}
def test_module_fails_when_required_args_missing(mocker):
mocker.patch.object(AnsibleModule, "fail_json", fail_json)
with pytest.raises(AnsibleFailJson):
set_module_args({})
kubevirt_vm.main()
K8S_MODULE_PARAMS_DELETE = MODULE_PARAMS_DELETE | {
"generate_name": None,
"resource_definition": VM_DEFINITION_RUNNING,
"wait_condition": {"type": "Ready", "status": True},
}
@pytest.mark.parametrize(
"module_params,k8s_module_params,vm_definition,method",
[
(
"module_params_create",
"k8s_module_params_create",
"vm_definition_create",
MODULE_PARAMS_CREATE,
K8S_MODULE_PARAMS_CREATE,
VM_DEFINITION_CREATE,
"create",
),
(
"module_params_running",
"k8s_module_params_running",
"vm_definition_running",
MODULE_PARAMS_RUNNING,
K8S_MODULE_PARAMS_RUNNING,
VM_DEFINITION_RUNNING,
"update",
),
(
"module_params_stopped",
"k8s_module_params_stopped",
"vm_definition_stopped",
MODULE_PARAMS_STOPPED,
K8S_MODULE_PARAMS_STOPPED,
VM_DEFINITION_STOPPED,
"update",
),
(
"module_params_delete",
"k8s_module_params_delete",
"vm_definition_running",
MODULE_PARAMS_DELETE,
K8S_MODULE_PARAMS_DELETE,
VM_DEFINITION_RUNNING,
"delete",
),
],
)
def test_module(
request,
mocker,
module_params,
k8s_module_params,
vm_definition,
method,
):
def test_module(mocker, module_params, k8s_module_params, vm_definition, method):
mocker.patch.object(AnsibleModule, "exit_json", exit_json)
mocker.patch.object(runner, "get_api_client")
@@ -292,57 +250,192 @@ def test_module(
)
with pytest.raises(AnsibleExitJson):
set_module_args(request.getfixturevalue(module_params))
set_module_args(module_params)
kubevirt_vm.main()
perform_action.assert_called_once_with(
mocker.ANY,
request.getfixturevalue(vm_definition),
request.getfixturevalue(k8s_module_params),
vm_definition,
k8s_module_params,
)
@pytest.fixture(scope="module")
def create_vm_params():
return {
"api_version": "kubevirt.io/v1",
"running": True,
CREATE_VM_PARAMS = {
"api_version": "kubevirt.io/v1",
"running": True,
"namespace": "default",
}
CREATE_VM_PARAMS_ANNOTATIONS = CREATE_VM_PARAMS | {
"annotations": {"test": "test"},
}
CREATE_VM_PARAMS_LABELS = CREATE_VM_PARAMS | {
"labels": {"test": "test"},
}
CREATE_VM_PARAMS_INSTANCETYPE = CREATE_VM_PARAMS | {
"instancetype": {"name": "u1.medium"},
}
CREATE_VM_PARAMS_PREFERENCE = CREATE_VM_PARAMS | {
"preference": {"name": "fedora"},
}
CREATE_VM_PARAMS_DATAVOLUMETEMPLATE = CREATE_VM_PARAMS | {
"data_volume_templates": [
{
"metadata": {"name": "testdv"},
"spec": {
"source": {
"registry": {
"url": "docker://quay.io/containerdisks/fedora:latest"
},
},
"storage": {
"accessModes": ["ReadWriteOnce"],
"resources": {"requests": {"storage": "5Gi"}},
},
},
},
],
}
CREATE_VM_PARAMS_NAME = CREATE_VM_PARAMS | {
"name": "testvm",
}
CREATE_VM_PARAMS_GENERATE_NAME = CREATE_VM_PARAMS | {
"generate_name": "testvm-1234",
}
CREATE_VM_PARAMS_SPECS = CREATE_VM_PARAMS | {
"spec": {
"domain": {
"devices": {
"cpu": {
"cores": 2,
"socket": 1,
"threads": 2,
}
}
}
}
}
CREATED_VM = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
}
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
CREATED_VM_LABELS = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
"labels": {
"test": "test",
},
},
"spec": {
"running": True,
"template": {
"metadata": {
"labels": {"test": "test"},
},
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def create_vm_params_annotations(create_vm_params):
return create_vm_params | {
"annotations": {"test": "test"},
}
CREATED_VM_ANNOTATIONS = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
"annotations": {
"test": "test",
},
},
"spec": {
"running": True,
"template": {
"metadata": {
"annotations": {"test": "test"},
},
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def create_vm_params_labels(create_vm_params):
return create_vm_params | {
"labels": {"test": "test"},
}
@pytest.fixture(scope="module")
def create_vm_params_instancetype(create_vm_params):
return create_vm_params | {
CREATED_VM_INSTANCETYPE = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"instancetype": {"name": "u1.medium"},
}
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def create_vm_params_preference(create_vm_params):
return create_vm_params | {
CREATED_VM_PREFERENCE = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"preference": {"name": "fedora"},
}
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def create_vm_params_datavolumetemplate(create_vm_params):
return create_vm_params | {
"data_volume_templates": [
CREATED_VM_DATAVOLUMETEMPLATE = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"dataVolumeTemplates": [
{
"metadata": {"name": "testdv"},
"spec": {
@@ -358,282 +451,92 @@ def create_vm_params_datavolumetemplate(create_vm_params):
},
},
],
}
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def create_vm_params_name(create_vm_params):
return create_vm_params | {
CREATED_VM_NAME = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
}
@pytest.fixture(scope="module")
def create_vm_params_generate_name(create_vm_params):
return create_vm_params | {
"generate_name": "testvm-1234",
}
@pytest.fixture(scope="module")
def create_vm_params_specs(create_vm_params):
return create_vm_params | {
"spec": {
"domain": {
"devices": {
"cpu": {
"cores": 2,
"socket": 1,
"threads": 2,
}
}
}
}
}
@pytest.fixture(scope="module")
def created_vm():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
}
CREATED_VM_GENERATE_NAME = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"generateName": "testvm-1234",
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
CREATED_VM_SPECS = {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {
"cpu": {
"cores": 2,
"socket": 1,
"threads": 2,
}
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_labels():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
"labels": {
"test": "test",
},
},
"spec": {
"running": True,
"template": {
"metadata": {
"labels": {"test": "test"},
},
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_annotations():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
"annotations": {
"test": "test",
},
},
"spec": {
"running": True,
"template": {
"metadata": {
"annotations": {"test": "test"},
},
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_instancetype():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"instancetype": {"name": "u1.medium"},
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_preference():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"preference": {"name": "fedora"},
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_datavolumetemplate():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"dataVolumeTemplates": [
{
"metadata": {"name": "testdv"},
"spec": {
"source": {
"registry": {
"url": "docker://quay.io/containerdisks/fedora:latest"
},
},
"storage": {
"accessModes": ["ReadWriteOnce"],
"resources": {"requests": {"storage": "5Gi"}},
},
},
},
],
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_name():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"name": "testvm",
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_generate_name():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"generateName": "testvm-1234",
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {},
},
},
},
},
}
@pytest.fixture(scope="module")
def created_vm_specs():
return {
"apiVersion": "kubevirt.io/v1",
"kind": "VirtualMachine",
"metadata": {
"namespace": "default",
},
"spec": {
"running": True,
"template": {
"spec": {
"domain": {
"devices": {
"cpu": {
"cores": 2,
"socket": 1,
"threads": 2,
}
},
},
},
},
},
}
},
}
@pytest.mark.parametrize(
"params,expected",
[
("create_vm_params", "created_vm"),
("create_vm_params_annotations", "created_vm_annotations"),
("create_vm_params_labels", "created_vm_labels"),
("create_vm_params_instancetype", "created_vm_instancetype"),
("create_vm_params_preference", "created_vm_preference"),
("create_vm_params_datavolumetemplate", "created_vm_datavolumetemplate"),
("create_vm_params_name", "created_vm_name"),
("create_vm_params_generate_name", "created_vm_generate_name"),
("create_vm_params_specs", "created_vm_specs"),
(CREATE_VM_PARAMS, CREATED_VM),
(CREATE_VM_PARAMS_ANNOTATIONS, CREATED_VM_ANNOTATIONS),
(CREATE_VM_PARAMS_LABELS, CREATED_VM_LABELS),
(CREATE_VM_PARAMS_INSTANCETYPE, CREATED_VM_INSTANCETYPE),
(CREATE_VM_PARAMS_PREFERENCE, CREATED_VM_PREFERENCE),
(CREATE_VM_PARAMS_DATAVOLUMETEMPLATE, CREATED_VM_DATAVOLUMETEMPLATE),
(CREATE_VM_PARAMS_NAME, CREATED_VM_NAME),
(CREATE_VM_PARAMS_GENERATE_NAME, CREATED_VM_GENERATE_NAME),
(CREATE_VM_PARAMS_SPECS, CREATED_VM_SPECS),
],
)
def test_create_vm(request, params, expected):
assert kubevirt_vm.create_vm(
request.getfixturevalue(params)
) == request.getfixturevalue(expected)
def test_create_vm(params, expected):
assert kubevirt_vm.create_vm(params) == expected

View File

@@ -24,85 +24,62 @@ from ansible_collections.kubevirt.core.tests.unit.utils.ansible_module_mock impo
)
@pytest.fixture(scope="module")
def find_args_default():
return {
"kind": "VirtualMachine",
"api_version": "kubevirt.io/v1",
"name": None,
"namespace": None,
"label_selectors": [],
"field_selectors": [],
"wait": None,
"wait_sleep": 5,
"wait_timeout": 120,
"condition": {"type": "Ready", "status": True},
}
@pytest.fixture(scope="module")
def find_args_name_namespace(find_args_default):
return find_args_default | {
"name": "testvm",
"namespace": "default",
}
@pytest.fixture(scope="module")
def find_args_label_selector(find_args_default):
return find_args_default | {
"label_selectors": ["app=test"],
}
@pytest.fixture(scope="module")
def find_args_field_selector(find_args_default):
return find_args_default | {
"field_selectors": ["app=test"],
}
@pytest.fixture(scope="module")
def find_args_running(find_args_default):
return find_args_default | {
"wait": True,
"condition": {"type": "Ready", "status": True},
}
@pytest.fixture(scope="module")
def find_args_stopped(find_args_default):
return find_args_default | {
"wait": True,
"condition": {"type": "Ready", "status": False, "reason": "VMINotExists"},
}
@pytest.mark.parametrize(
"module_args",
[
{"running": False},
],
)
def test_module_fails_when_required_args_missing(mocker, module_args):
def test_module_fails_when_required_args_missing(mocker):
mocker.patch.object(AnsibleModule, "fail_json", fail_json)
with pytest.raises(AnsibleFailJson):
set_module_args(module_args)
set_module_args({"running": False})
kubevirt_vm_info.main()
FIND_ARGS_DEFAULT = {
"kind": "VirtualMachine",
"api_version": "kubevirt.io/v1",
"name": None,
"namespace": None,
"label_selectors": [],
"field_selectors": [],
"wait": None,
"wait_sleep": 5,
"wait_timeout": 120,
"condition": {"type": "Ready", "status": True},
}
FIND_ARGS_NAME_NAMESPACE = FIND_ARGS_DEFAULT | {
"name": "testvm",
"namespace": "default",
}
FIND_ARGS_LABEL_SELECTOR = FIND_ARGS_DEFAULT | {
"label_selectors": ["app=test"],
}
FIND_ARGS_FIELD_SELECTOR = FIND_ARGS_DEFAULT | {
"field_selectors": ["app=test"],
}
FIND_ARGS_RUNNING = FIND_ARGS_DEFAULT | {
"wait": True,
"condition": {"type": "Ready", "status": True},
}
FIND_ARGS_STOPPED = FIND_ARGS_DEFAULT | {
"wait": True,
"condition": {"type": "Ready", "status": False, "reason": "VMINotExists"},
}
@pytest.mark.parametrize(
"module_args,find_args",
[
({}, "find_args_default"),
({"name": "testvm", "namespace": "default"}, "find_args_name_namespace"),
({"label_selectors": "app=test"}, "find_args_label_selector"),
({"field_selectors": "app=test"}, "find_args_field_selector"),
({"wait": True, "running": True}, "find_args_running"),
({"wait": True, "running": False}, "find_args_stopped"),
({}, FIND_ARGS_DEFAULT),
({"name": "testvm", "namespace": "default"}, FIND_ARGS_NAME_NAMESPACE),
({"label_selectors": "app=test"}, FIND_ARGS_LABEL_SELECTOR),
({"field_selectors": "app=test"}, FIND_ARGS_FIELD_SELECTOR),
({"wait": True, "running": True}, FIND_ARGS_RUNNING),
({"wait": True, "running": False}, FIND_ARGS_STOPPED),
],
)
def test_module(request, mocker, module_args, find_args):
def test_module(mocker, module_args, find_args):
mocker.patch.object(AnsibleModule, "exit_json", exit_json)
mocker.patch.object(kubevirt_vm_info, "get_api_client")
@@ -120,4 +97,4 @@ def test_module(request, mocker, module_args, find_args):
set_module_args(module_args)
kubevirt_vm_info.main()
find.assert_called_once_with(**request.getfixturevalue(find_args))
find.assert_called_once_with(**find_args)