diff --git a/harvester_e2e_tests/fixtures/base.py b/harvester_e2e_tests/fixtures/base.py new file mode 100644 index 000000000..b13a1421d --- /dev/null +++ b/harvester_e2e_tests/fixtures/base.py @@ -0,0 +1,18 @@ +from datetime import datetime, timedelta +from time import sleep + + +def wait_until(timeout, snooze=3): + def wait_until_decorator(api_func): + def wrapped(*args, **kwargs): + endtime = datetime.now() + timedelta(seconds=timeout) + while endtime > datetime.now(): + qualified, (code, data) = api_func(*args, **kwargs) + if qualified: + break + sleep(snooze) + return qualified, (code, data) + + return wrapped + + return wait_until_decorator diff --git a/harvester_e2e_tests/fixtures/images.py b/harvester_e2e_tests/fixtures/images.py index eeef88111..642e9df1e 100644 --- a/harvester_e2e_tests/fixtures/images.py +++ b/harvester_e2e_tests/fixtures/images.py @@ -1,6 +1,7 @@ from urllib.parse import urlparse, urljoin import pytest +from .base import wait_until pytest_plugins = ["harvester_e2e_tests.fixtures.api_client"] @@ -68,3 +69,26 @@ def url(self): if self.is_file: return self.url_result.geturl().split("file://", 1)[-1] return self.url_result.geturl() + + +@pytest.fixture(scope="session") +def image_checker(api_client, wait_timeout, sleep_timeout): + class ImageChecker: + def __init__(self): + self.images = api_client.images + + @wait_until(wait_timeout, sleep_timeout) + def wait_downloaded(self, image_name): + code, data = self.images.get(image_name) + if data.get('status', {}).get('progress') == 100: + return True, (code, data) + return False, (code, data) + + @wait_until(wait_timeout, sleep_timeout) + def wait_deleted(self, image_name): + code, data = self.images.get(image_name) + if code == 404: + return True, (code, data) + return False, (code, data) + + return ImageChecker() diff --git a/harvester_e2e_tests/fixtures/networks.py b/harvester_e2e_tests/fixtures/networks.py index 5d25b4eb1..48bcc8ab2 100644 --- a/harvester_e2e_tests/fixtures/networks.py +++ b/harvester_e2e_tests/fixtures/networks.py @@ -1,4 +1,7 @@ +import json + import pytest +from .base import wait_until @pytest.fixture(scope="session") @@ -13,3 +16,21 @@ def vlan_nic(request): vlan_nic = request.config.getoption('--vlan-nic') assert vlan_nic, f"VLAN NIC {vlan_nic} not configured correctly." return vlan_nic + + +@pytest.fixture(scope="session") +def network_checker(api_client, wait_timeout, sleep_timeout): + class NetworkChecker: + def __init__(self): + self.networks = api_client.networks + + @wait_until(wait_timeout, sleep_timeout) + def wait_routed(self, vnet_name): + code, data = self.networks.get(vnet_name) + annotations = data['metadata'].get('annotations', {}) + route = json.loads(annotations.get('network.harvesterhci.io/route', '{}')) + if code == 200 and route.get('connectivity') == 'true': + return True, (code, data) + return False, (code, data) + + return NetworkChecker() diff --git a/harvester_e2e_tests/fixtures/settings.py b/harvester_e2e_tests/fixtures/settings.py new file mode 100644 index 000000000..e1d3a6c3c --- /dev/null +++ b/harvester_e2e_tests/fixtures/settings.py @@ -0,0 +1,93 @@ +import json +from ipaddress import ip_address, ip_network + +import pytest +from .base import wait_until + + +@pytest.fixture(scope="session") +def setting_checker(api_client, wait_timeout, sleep_timeout): + class SettingChecker: + def __init__(self): + self.settings = api_client.settings + self.network_annotation = 'k8s.v1.cni.cncf.io/network-status' + + def _storage_net_configured(self): + code, data = self.settings.get('storage-network') + if (cs := data.get('status', {}).get('conditions')): + if 'True' == cs[-1].get('status') and 'Completed' == cs[-1].get('reason'): + return True, (code, data) + return False, (code, data) + + @wait_until(wait_timeout, sleep_timeout) + def wait_storage_net_enabled_on_harvester(self): + snet_configured, (code, data) = self._storage_net_configured() + if snet_configured and data.get('value'): + return True, (code, data) + return False, (code, data) + + @wait_until(wait_timeout, sleep_timeout) + def wait_storage_net_disabled_on_harvester(self): + snet_configured, (code, data) = self._storage_net_configured() + if snet_configured and not data.get('value'): + return True, (code, data) + return False, (code, data) + + def _lh_instance_mgrs_running(self): + code, data = api_client.get_pods(namespace='longhorn-system') + if not (code == 200): + return False, (code, data) + + lh_instance_mgrs = [pod for pod in data['data'] if 'instance-manager' in pod['id']] + if not lh_instance_mgrs: + return False, ("No instance-manager pods", data) + + for imgr in lh_instance_mgrs: + if 'Running' != imgr['status']['phase']: + return False, (f"Pod {imgr['id']} is NOT Running", imgr) + + if not (self.network_annotation in imgr['metadata']['annotations']): + return False, (f"No annotation '{self.network_annotation}' on pod", imgr) + + networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) + if not networks: + return False, (f"Pod annotation '{self.network_annotation}' is empty", imgr) + + return True, (None, lh_instance_mgrs) + + @wait_until(wait_timeout, sleep_timeout) + def wait_storage_net_enabled_on_longhorn(self, snet_cidr): + imgrs_running, (code, data) = self._lh_instance_mgrs_running() + if not imgrs_running: + return False, (code, data) + + for imgr in data: + networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) + try: + snet_network = next(n for n in networks if 'lhnet1' == n.get('interface')) + except StopIteration: + return False, ("No dedicated interface interface 'lhnet1'", imgr) + + snet_ips = snet_network.get('ips', ['::1']) + if not all(ip_address(sip) in ip_network(snet_cidr) for sip in snet_ips): + return False, (f"Dedicated IPs {snet_ips} does NOT fits {snet_cidr}", imgr) + + return True, (None, None) + + @wait_until(wait_timeout, sleep_timeout) + def wait_storage_net_disabled_on_longhorn(self): + imgrs_running, (code, data) = self._lh_instance_mgrs_running() + if not imgrs_running: + return False, (code, data) + + for imgr in data: + networks = json.loads(imgr['metadata']['annotations'][self.network_annotation]) + try: + next(n for n in networks if 'lhnet1' == n.get('interface')) + return False, ("No dedicated interface 'lhnet1'", imgr) + except StopIteration: + continue + + return True, (None, None) + + return SettingChecker() diff --git a/harvester_e2e_tests/fixtures/volumes.py b/harvester_e2e_tests/fixtures/volumes.py new file mode 100644 index 000000000..9251b285b --- /dev/null +++ b/harvester_e2e_tests/fixtures/volumes.py @@ -0,0 +1,29 @@ + +import pytest +from .base import wait_until + +pytest_plugins = ["harvester_e2e_tests.fixtures.api_client"] + + +@pytest.fixture(scope="session") +def volume_checker(api_client, wait_timeout, sleep_timeout): + class VolumeChecker: + def __init__(self): + self.volumes = api_client.volumes + self.lhvolumes = api_client.lhvolumes + + @wait_until(wait_timeout, sleep_timeout) + def wait_volumes_detached(self, vol_names): + for vol_name in vol_names: + code, data = self.volumes.get(name=vol_name) + if not (code == 200): + return False, (code, data) + + pvc_name = data["spec"]["volumeName"] + code, data = self.lhvolumes.get(pvc_name) + if not (200 == code and "detached" == data['status']['state']): + return False, (code, data) + + return True, (code, data) + + return VolumeChecker() diff --git a/harvester_e2e_tests/integrations/test_0_storage_network.py b/harvester_e2e_tests/integrations/test_0_storage_network.py index 498221030..3f9ae360e 100644 --- a/harvester_e2e_tests/integrations/test_0_storage_network.py +++ b/harvester_e2e_tests/integrations/test_0_storage_network.py @@ -9,7 +9,8 @@ pytest_plugins = [ "harvester_e2e_tests.fixtures.api_client", - "harvester_e2e_tests.fixtures.networks" + "harvester_e2e_tests.fixtures.networks", + "harvester_e2e_tests.fixtures.settings" ] @@ -78,7 +79,9 @@ def cluster_network(request, api_client, unique_name): @pytest.mark.settings @pytest.mark.networks @pytest.mark.skip_version_before('v1.0.3') -def test_storage_network(api_client, cluster_network, vlan_id, unique_name, wait_timeout): +def test_storage_network( + api_client, cluster_network, vlan_id, unique_name, wait_timeout, setting_checker +): ''' To cover test: - https://harvester.github.io/tests/manual/_incoming/1055_dedicated_storage_network/ @@ -129,12 +132,7 @@ def test_storage_network(api_client, cluster_network, vlan_id, unique_name, wait cidr = route['cidr'] # Create storage-network - code, data = api_client.settings.get('storage-network') - assert 200 == code, (code, data) - origin_spec = api_client.settings.Spec.from_dict(data) - spec = api_client.settings.StorageNetworkSpec.enable_with( - vlan_id, cluster_network, cidr - ) + spec = api_client.settings.StorageNetworkSpec.enable_with(vlan_id, cluster_network, cidr) code, data = api_client.settings.update('storage-network', spec) assert 200 == code, (code, data) @@ -184,6 +182,11 @@ def test_storage_network(api_client, cluster_network, vlan_id, unique_name, wait f"Not completed: {retries}" ) - # Teardown - code, data = api_client.settings.update('storage-network', origin_spec) + # teardown + disable_spec = api_client.settings.StorageNetworkSpec.disable() + code, data = api_client.settings.update('storage-network', disable_spec) assert 200 == code, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester() + assert snet_disabled, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn() + assert snet_disabled, (code, data) diff --git a/harvester_e2e_tests/integrations/test_1_images.py b/harvester_e2e_tests/integrations/test_1_images.py index 6216dc395..44f88c180 100644 --- a/harvester_e2e_tests/integrations/test_1_images.py +++ b/harvester_e2e_tests/integrations/test_1_images.py @@ -3,18 +3,17 @@ import re import zlib from datetime import datetime, timedelta -from ipaddress import ip_address, ip_network from pathlib import Path from tempfile import NamedTemporaryFile from time import sleep import pytest - pytest_plugins = [ "harvester_e2e_tests.fixtures.api_client", "harvester_e2e_tests.fixtures.images", - "harvester_e2e_tests.fixtures.networks" + "harvester_e2e_tests.fixtures.networks", + "harvester_e2e_tests.fixtures.settings" ] @@ -163,65 +162,29 @@ def vlan_cidr(api_client, cluster_network, vlan_id, wait_timeout, sleep_timeout) @pytest.fixture(scope="class") -def storage_network(api_client, cluster_network, vlan_id, vlan_cidr, wait_timeout, sleep_timeout): - code, data = api_client.settings.get('storage-network') - assert 200 == code, (code, data) - - # Enable from Harvester side - spec_orig = api_client.settings.Spec.from_dict(data) - spec = api_client.settings.StorageNetworkSpec.enable_with(vlan_id, cluster_network, vlan_cidr) - code, data = api_client.settings.update('storage-network', spec) +def storage_network(api_client, cluster_network, vlan_id, vlan_cidr, setting_checker): + ''' Ref. https://docs.harvesterhci.io/v1.3/advanced/storagenetwork/#configuration-example + ''' + enable_spec = api_client.settings.StorageNetworkSpec.enable_with( + vlan_id, cluster_network, vlan_cidr + ) + code, data = api_client.settings.update('storage-network', enable_spec) assert 200 == code, (code, data) - - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.settings.get('storage-network') - conds = data.get('status', {}).get('conditions', []) - if conds and 'True' == conds[-1].get('status') and 'Completed' == conds[-1].get('reason'): - break - sleep(sleep_timeout) - else: - raise AssertionError( - f"Fail to enable storage-network with error: {code}, {data}" - ) - - # Check on Longhorn side - done, ip_range = [], ip_network(vlan_cidr) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.get_pods(namespace='longhorn-system') - lh_instance_mgrs = [d for d in data['data'] - if 'instance-manager' in d['id'] and d['id'] not in done] - retries = [] - for im in lh_instance_mgrs: - if 'Running' != im['status']['phase']: - retries.append(im) - continue - nets = json.loads(im['metadata']['annotations']['k8s.v1.cni.cncf.io/network-status']) - try: - dedicated = next(n for n in nets if 'lhnet1' == n.get('interface')) - except StopIteration: - retries.append(im) - continue - - if not all(ip_address(ip) in ip_range for ip in dedicated.get('ips', ['::1'])): - retries.append(im) - continue - - if not retries: - break - sleep(sleep_timeout) - else: - raise AssertionError( - f"{len(retries)} Longhorn's instance manager not be updated after {wait_timeout}s\n" - f"Not completed: {retries}" - ) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester() + assert snet_enabled, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(vlan_cidr) + assert snet_enabled, (code, data) yield # Teardown - code, data = api_client.settings.update('storage-network', spec_orig) + disable_spec = api_client.settings.StorageNetworkSpec.disable() + code, data = api_client.settings.update('storage-network', disable_spec) assert 200 == code, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester() + assert snet_disabled, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn() + assert snet_disabled, (code, data) @pytest.mark.p0 diff --git a/harvester_e2e_tests/integrations/test_3_vm.py b/harvester_e2e_tests/integrations/test_3_vm.py index 41a140599..6cca59105 100644 --- a/harvester_e2e_tests/integrations/test_3_vm.py +++ b/harvester_e2e_tests/integrations/test_3_vm.py @@ -1,56 +1,44 @@ +import json +import yaml from datetime import datetime, timedelta from time import sleep +from types import SimpleNamespace import pytest pytest_plugins = [ - "harvester_e2e_tests.fixtures.api_client" + "harvester_e2e_tests.fixtures.api_client", + "harvester_e2e_tests.fixtures.images", + "harvester_e2e_tests.fixtures.networks", + "harvester_e2e_tests.fixtures.settings", + "harvester_e2e_tests.fixtures.virtualmachines", + "harvester_e2e_tests.fixtures.volumes", ] @pytest.fixture(scope="module") -def focal_image(api_client, unique_name, image_ubuntu, wait_timeout): - code, data = api_client.images.create_by_url(unique_name, image_ubuntu.url) - assert 201 == code, ( - f"Failed to upload focal image with error: {code}, {data}" - ) +def ubuntu_image(api_client, unique_name, image_ubuntu, image_checker): + name = f"{image_ubuntu.name}-{unique_name}" + code, data = api_client.images.create_by_url(name, image_ubuntu.url) + assert 201 == code, (code, data) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.images.get(unique_name) - if 'status' in data and 'progress' in data['status'] and \ - data['status']['progress'] == 100: - break - sleep(5) - else: - raise AssertionError( - f"Image {unique_name} can't be ready with {wait_timeout} timed out\n" - f"Got error: {code}, {data}" - ) + image_downloaded, (code, data) = image_checker.wait_downloaded(name) + assert image_downloaded, (code, data) namespace = data['metadata']['namespace'] - name = data['metadata']['name'] + assert name == data['metadata']['name'], data - yield dict(ssh_user=image_ubuntu.ssh_user, id=f"{namespace}/{name}") + yield SimpleNamespace( + name=name, + id=f"{namespace}/{name}", + ssh_user=image_ubuntu.ssh_user + ) - is_delete = False - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - if not is_delete: - code, data = api_client.images.delete(name, namespace) - if code == 200: - is_delete = True - - if is_delete: - code, data = api_client.images.get(unique_name) - if code == 404: - break - sleep(5) - else: - raise AssertionError( - f"Image {unique_name} can't be deleted with {wait_timeout} timed out\n" - f"Got error: {code}, {data}" - ) + # teardown + code, data = api_client.images.delete(name, namespace) + assert 200 == code, (code, data) + image_deleted, (code, data) = image_checker.wait_deleted(name) + assert image_deleted, (code, data) @pytest.fixture(scope="class") @@ -76,13 +64,117 @@ def available_node_names(api_client): yield node_names +@pytest.fixture(scope="class") +def cluster_network(api_client, vlan_nic): + name = f"cnet-{vlan_nic}" + code, data = api_client.clusternetworks.create(name) + assert 201 == code, (code, data) + code, data = api_client.clusternetworks.create_config(name, name, vlan_nic) + assert 201 == code, (code, data) + + yield name + + # teardown + code, data = api_client.clusternetworks.delete_config(name) + assert 200 == code, (code, data) + code, data = api_client.clusternetworks.delete(name) + assert 200 == code, (code, data) + + +@pytest.fixture(scope="class") +def vm_network(api_client, unique_name, cluster_network, vlan_id, network_checker): + name = f"vnet-{unique_name}" + code, data = api_client.networks.create(name, vlan_id, cluster_network=cluster_network) + assert 201 == code, (code, data) + + vnet_routed, (code, data) = network_checker.wait_routed(name) + assert vnet_routed, (code, data) + route = json.loads(data['metadata'].get('annotations').get('network.harvesterhci.io/route')) + + yield SimpleNamespace( + name=name, + vlan_id=vlan_id, + cidr=route['cidr'] + ) + + # teardown + code, data = api_client.networks.delete(name) + assert 200 == code, (code, data) + + +@pytest.fixture +def minimal_vm(api_client, unique_name, ubuntu_image, ssh_keypair, vm_checker): + unique_vm_name = f"vm-{unique_name}" + cpu, mem = 1, 2 + pub_key, pri_key = ssh_keypair + vm_spec = api_client.vms.Spec(cpu, mem) + vm_spec.add_image("disk-0", ubuntu_image.id) + + userdata = yaml.safe_load(vm_spec.user_data) + userdata['ssh_authorized_keys'] = [pub_key] + userdata['password'] = 'password' + userdata['chpasswd'] = dict(expire=False) + userdata['sshpwauth'] = True + vm_spec.user_data = yaml.dump(userdata) + code, data = api_client.vms.create(unique_vm_name, vm_spec) + + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Fail to start VM and get IP with error: {code}, {data}" + ) + vm_ip = next(i['ipAddress'] for i in data['status']['interfaces'] if i['name'] == 'default') + + code, data = api_client.hosts.get(data['status']['nodeName']) + host_ip = next(a['address'] for a in data['status']['addresses'] if a['type'] == 'InternalIP') + + yield SimpleNamespace(**{ + "name": unique_vm_name, + "host_ip": host_ip, + "vm_ip": vm_ip, + "ssh_user": ubuntu_image.ssh_user + }) + + # teardown + code, data = api_client.vms.get(unique_vm_name) + vm_spec = api_client.vms.Spec.from_dict(data) + vm_deleted, (code, data) = vm_checker.wait_deleted(unique_vm_name) + assert vm_deleted, (code, data) + + for vol in vm_spec.volumes: + vol_name = vol['volume']['persistentVolumeClaim']['claimName'] + api_client.volumes.delete(vol_name) + + +@pytest.fixture +def storage_network(api_client, cluster_network, vm_network, setting_checker): + ''' Ref. https://docs.harvesterhci.io/v1.3/advanced/storagenetwork/#configuration-example + ''' + yield SimpleNamespace(**{ + "vlan_id": vm_network.vlan_id, + "cluster_network": cluster_network, + "cidr": vm_network.cidr, + "enable_spec": api_client.settings.StorageNetworkSpec.enable_with( + vm_network.vlan_id, cluster_network, vm_network.cidr + ) + }) + + # teardown + disable_spec = api_client.settings.StorageNetworkSpec.disable() + code, data = api_client.settings.update('storage-network', disable_spec) + assert 200 == code, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester() + assert snet_disabled, (code, data) + snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn() + assert snet_disabled, (code, data) + + @pytest.mark.p0 @pytest.mark.virtualmachines def test_multiple_migrations( - api_client, unique_name, focal_image, wait_timeout, available_node_names + api_client, unique_name, ubuntu_image, wait_timeout, available_node_names ): vm_spec = api_client.vms.Spec(1, 1) - vm_spec.add_image('disk-0', focal_image['id']) + vm_spec.add_image('disk-0', ubuntu_image.id) vm_names = [f"migrate-1-{unique_name}", f"migrate-2-{unique_name}"] volumes = [] for vm_name in vm_names: @@ -106,6 +198,8 @@ def test_multiple_migrations( break sleep(5) else: + for vm_name in vm_names: + api_client.vms.delete(vm_name) raise AssertionError( f"Can't find VM {vm_name} with {wait_timeout} timed out\n" f"Got error: {code}, {data}" @@ -140,6 +234,8 @@ def test_multiple_migrations( break sleep(5) else: + for vm_name in vm_names: + api_client.vms.delete(vm_name) raise AssertionError("\n".join(fails)) # teardown @@ -171,10 +267,11 @@ def test_multiple_migrations( @pytest.mark.p0 @pytest.mark.virtualmachines -def test_migrate_vm_with_user_data(api_client, unique_name, focal_image, wait_timeout, - available_node_names): +def test_migrate_vm_with_user_data( + api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker +): vm_spec = api_client.vms.Spec(1, 1) - vm_spec.add_image('disk-0', focal_image['id']) + vm_spec.add_image('disk-0', ubuntu_image.id) vm_spec.user_data += ( "password: test\n" "chpasswd:\n" @@ -197,6 +294,7 @@ def test_migrate_vm_with_user_data(api_client, unique_name, focal_image, wait_ti break sleep(5) else: + vm_checker.wait_deleted(unique_name) raise AssertionError( f"Can't find VM {unique_name} with {wait_timeout} timed out\n" f"Got error: {code}, {data}" @@ -221,24 +319,15 @@ def test_migrate_vm_with_user_data(api_client, unique_name, focal_image, wait_ti break sleep(5) else: + vm_checker.wait_deleted(unique_name) raise AssertionError( f"The migration of VM {unique_name} is not completed with {wait_timeout} timed out" f"Got error: {code}, {data}" ) # teardown - api_client.vms.delete(unique_name) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_name) - if code == 404: - break - sleep(5) - else: - raise AssertionError( - f"VM {unique_name} can't be deleted with {wait_timeout} timed out" - f"Got error: {code}, {data}" - ) + vm_deleted, (code, data) = vm_checker.wait_deleted(unique_name) + assert vm_deleted, (code, data) for vol in api_client.vms.Spec.from_dict(vm_data).volumes: if vol['volume'].get('persistentVolumeClaim', {}).get('claimName', "") != "": @@ -247,10 +336,11 @@ def test_migrate_vm_with_user_data(api_client, unique_name, focal_image, wait_ti @pytest.mark.p0 @pytest.mark.virtualmachines -def test_migrate_vm_with_multiple_volumes(api_client, unique_name, focal_image, wait_timeout, - available_node_names): +def test_migrate_vm_with_multiple_volumes( + api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker +): vm_spec = api_client.vms.Spec(1, 1) - vm_spec.add_image('disk-0', focal_image['id']) + vm_spec.add_image('disk-0', ubuntu_image.id) vm_spec.add_volume('disk-1', 1) code, vm_data = api_client.vms.create(unique_name, vm_spec) assert code == 201, ( @@ -268,6 +358,7 @@ def test_migrate_vm_with_multiple_volumes(api_client, unique_name, focal_image, break sleep(5) else: + vm_checker.wait_deleted(unique_name) raise AssertionError( f"Can't find VM {unique_name} with {wait_timeout} timed out\n" f"Got error: {code}, {data}" @@ -292,25 +383,107 @@ def test_migrate_vm_with_multiple_volumes(api_client, unique_name, focal_image, break sleep(5) else: + vm_checker.wait_deleted(unique_name) raise AssertionError( f"The migration of VM {unique_name} is not completed with {wait_timeout} timed out" f"Got error: {code}, {data}" ) # teardown - api_client.vms.delete(unique_name) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_name) - if code == 404: - break - sleep(5) - else: - raise AssertionError( - f"VM {unique_name} can't be deleted with {wait_timeout} timed out" - f"Got error: {code}, {data}" - ) + vm_deleted, (code, data) = vm_checker.wait_deleted(unique_name) + assert vm_deleted, (code, data) for vol in api_client.vms.Spec.from_dict(vm_data).volumes: if vol['volume'].get('persistentVolumeClaim', {}).get('claimName', "") != "": api_client.volumes.delete(vol['volume']['persistentVolumeClaim']['claimName']) + + +@pytest.mark.p0 +@pytest.mark.networks +@pytest.mark.settings +@pytest.mark.virtualmachines +@pytest.mark.skip_version_if("< v1.0.3") +class TestVMWithStorageNetwork: + def test_enable_storage_network_with_api_stopped_vm( + self, api_client, minimal_vm, storage_network, setting_checker, vm_checker, volume_checker + ): + ''' + Steps: + 1. Have at least one Running VM + 2. Enable storage-network (should fail) + 3. Stop all VMs via API + 4. Enable storage-network + ''' + code, data = api_client.settings.update('storage-network', storage_network.enable_spec) + assert 422 == code, ( + f"storage-network should NOT be enabled with running VM: {code}, {data}" + ) + + # stop VM by API + vm_stopped, (code, data) = vm_checker.wait_status_stopped(minimal_vm.name) + assert vm_stopped, (code, data) + + code, data = api_client.vms.get(minimal_vm.name) + spec = api_client.vms.Spec.from_dict(data) + vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes] + vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names) + assert vm_volumes_detached, (code, data) + + # enable storage-network + code, data = api_client.settings.update('storage-network', storage_network.enable_spec) + assert 200 == code, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester() + assert snet_enabled, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn( + storage_network.cidr + ) + assert snet_enabled, (code, data) + + def test_enable_storage_network_with_cli_stopped_vm( + self, api_client, ssh_keypair, minimal_vm, storage_network, setting_checker, + vm_shell_from_host, wait_timeout, volume_checker + ): + ''' Refer to https://github.com/harvester/tests/issues/1022 + Steps: + 1. Have at least one Running VM + 2. Enable storage-network (should fail) + 3. Stop all VMs via VM CLI + 4. Enable storage-network + ''' + code, data = api_client.settings.update('storage-network', storage_network.enable_spec) + assert 422 == code, ( + f"storage-network should NOT be enabled with running VM: {code}, {data}" + ) + + # stop VM by CLI + with vm_shell_from_host( + minimal_vm.host_ip, minimal_vm.vm_ip, minimal_vm.ssh_user, pkey=ssh_keypair[1] + ) as sh: + sh.exec_command('sudo shutdown now') + + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.vms.get(minimal_vm.name) + if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'): + break + sleep(3) + else: + raise AssertionError( + f"Fail to shutdown VM {minimal_vm.name} with error: {code}, {data}" + ) + + code, data = api_client.vms.get(minimal_vm.name) + spec = api_client.vms.Spec.from_dict(data) + vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes] + vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names) + assert vm_volumes_detached, (code, data) + + # enable storage-network + code, data = api_client.settings.update('storage-network', storage_network.enable_spec) + assert 200 == code, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester() + assert snet_enabled, (code, data) + snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn( + storage_network.cidr + ) + assert snet_enabled, (code, data)