diff --git a/test/integration/testpaths/testpath_storage_migration.py b/test/integration/testpaths/testpath_storage_migration.py index e3cf58c0dc7..c487c3ef691 100644 --- a/test/integration/testpaths/testpath_storage_migration.py +++ b/test/integration/testpaths/testpath_storage_migration.py @@ -18,9 +18,7 @@ """ from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase, unittest -from marvin.lib.utils import (cleanup_resources, - random_gen, - format_volume_to_ext3) +from marvin.lib.utils import (cleanup_resources) from marvin.lib.base import (Account, ServiceOffering, DiskOffering, @@ -35,19 +33,19 @@ from marvin.lib.common import (get_domain, get_template, list_volumes, list_virtual_machines, + createChecksum, + compareChecksum list_storage_pools, list_clusters, list_hosts, validateList ) from marvin.codes import (PASS, - ZONETAG1, - CLUSTERTAG1) + ZONETAG1, + CLUSTERTAG1) from marvin.cloudstackAPI import (deleteVolume) -import hashlib -from marvin.sshClient import SshClient import time from threading import Thread @@ -72,6 +70,7 @@ def GetDestinationPool(self, return destinationPool + def MigrateRootVolume(self, vm, destinationPool, @@ -201,200 +200,6 @@ def MigrateDataVolume(self, return -def createChecksum(self, virtual_machine, disk, disk_type): - """ Write data on the disk and return the md5 checksum""" - - random_data_0 = random_gen(size=100) - # creating checksum(MD5) - m = hashlib.md5() - m.update(random_data_0) - checksum_random_data_0 = m.hexdigest() - try: - ssh_client = SshClient( - virtual_machine.ssh_ip, - virtual_machine.ssh_port, - virtual_machine.username, - virtual_machine.password - ) - except Exception as e: - self.fail("SSH failed for VM: %s" % - e) - - self.debug("Formatting volume: %s to ext3" % disk.id) - # Format partition using ext3 - # Note that this is the second data disk partition of virtual machine - # as it was already containing data disk before attaching the new volume, - # Hence datadiskdevice_2 - - format_volume_to_ext3( - ssh_client, - self.testdata["volume_write_path"][ - virtual_machine.hypervisor][disk_type] - ) - cmds = ["fdisk -l", - "mkdir -p %s" % self.testdata["data_write_paths"]["mount_dir"], - "mount -t ext3 %s1 %s" % ( - self.testdata["volume_write_path"][ - virtual_machine.hypervisor][disk_type], - self.testdata["data_write_paths"]["mount_dir"] - ), - "mkdir -p %s/%s/%s " % ( - self.testdata["data_write_paths"]["mount_dir"], - self.testdata["data_write_paths"]["sub_dir"], - self.testdata["data_write_paths"]["sub_lvl_dir1"], - ), - "echo %s > %s/%s/%s/%s" % ( - random_data_0, - self.testdata["data_write_paths"]["mount_dir"], - self.testdata["data_write_paths"]["sub_dir"], - self.testdata["data_write_paths"]["sub_lvl_dir1"], - self.testdata["data_write_paths"]["random_data"] - ), - "cat %s/%s/%s/%s" % ( - self.testdata["data_write_paths"]["mount_dir"], - self.testdata["data_write_paths"]["sub_dir"], - self.testdata["data_write_paths"]["sub_lvl_dir1"], - self.testdata["data_write_paths"]["random_data"] - ) - ] - - for c in cmds: - self.debug("Command: %s" % c) - result = ssh_client.execute(c) - self.debug(result) - - # Unmount the storage - cmds = [ - "umount %s" % (self.testdata["data_write_paths"]["mount_dir"]), - ] - - for c in cmds: - self.debug("Command: %s" % c) - ssh_client.execute(c) - - return checksum_random_data_0 - - -def compareChecksum( - self, - original_checksum, - disk_type, - virt_machine=None, - disk=None, - new_vm=False): - """ - Create md5 checksum of the data present on the disk and compare - it with the given checksum - """ - - if disk_type == "datadiskdevice_1" and new_vm: - new_virtual_machine = VirtualMachine.create( - self.userapiclient, - self.testdata["small"], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_cluster1.id, - zoneid=self.zone.id, - mode=self.zone.networktype - ) - - new_virtual_machine.start(self.userapiclient) - - self.debug("Attaching volume: %s to VM: %s" % ( - disk.id, - new_virtual_machine.id - )) - - new_virtual_machine.attach_volume( - self.apiclient, - disk - ) - - # Rebooting is required so that newly attached disks are detected - self.debug("Rebooting : %s" % new_virtual_machine.id) - new_virtual_machine.reboot(self.apiclient) - - else: - # If the disk is root disk then no need to create new VM - # Just start the original machine on which root disk is - new_virtual_machine = virt_machine - if new_virtual_machine.state != "Running": - new_virtual_machine.start(self.userapiclient) - - try: - # Login to VM to verify test directories and files - - self.debug( - "SSH into (Public IP: ) %s " % new_virtual_machine.ssh_ip) - ssh = SshClient( - new_virtual_machine.ssh_ip, - new_virtual_machine.ssh_port, - new_virtual_machine.username, - new_virtual_machine.password - ) - except Exception as e: - self.fail("SSH access failed for VM: %s, Exception: %s" % - (new_virtual_machine.ipaddress, e)) - - # Mount datadiskdevice_1 because this is the first data disk of the new - # virtual machine - cmds = ["blkid", - "fdisk -l", - "mkdir -p %s" % self.testdata["data_write_paths"]["mount_dir"], - "mount -t ext3 %s1 %s" % ( - self.testdata["volume_write_path"][ - new_virtual_machine.hypervisor][disk_type], - self.testdata["data_write_paths"]["mount_dir"] - ), - ] - - for c in cmds: - self.debug("Command: %s" % c) - result = ssh.execute(c) - self.debug(result) - - returned_data_0 = ssh.execute( - "cat %s/%s/%s/%s" % ( - self.testdata["data_write_paths"]["mount_dir"], - self.testdata["data_write_paths"]["sub_dir"], - self.testdata["data_write_paths"]["sub_lvl_dir1"], - self.testdata["data_write_paths"]["random_data"] - )) - - n = hashlib.md5() - n.update(returned_data_0[0]) - checksum_returned_data_0 = n.hexdigest() - - self.debug("returned_data_0: %s" % returned_data_0[0]) - - # Verify returned data - self.assertEqual( - original_checksum, - checksum_returned_data_0, - "Cheskum does not match with checksum of original data" - ) - - # Unmount the Sec Storage - cmds = [ - "umount %s" % (self.testdata["data_write_paths"]["mount_dir"]), - ] - - for c in cmds: - self.debug("Command: %s" % c) - ssh.execute(c) - - if new_vm: - new_virtual_machine.detach_volume( - self.apiclient, - disk - ) - - new_virtual_machine.delete(self.apiclient) - - return - - class TestStorageMigration(cloudstackTestCase): @classmethod @@ -414,21 +219,19 @@ class TestStorageMigration(cloudstackTestCase): cls.testdata["ostype"]) cls._cleanup = [] - cls.unsupportedHypervisor = False - cls.insuffPools = False - if cls.hypervisor.lower() not in [ "vmware", "kvm", "xenserver", "hyper-v"]: - cls.unsupportedHypervisor = True - return + raise unittest.SkipTest( + "Storage migration not supported on %s" % + cls.hypervisor) + try: cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) except Exception as e: - cls.insuffPools = True - return + raise unittest.SkipTest(e) try: # Create an account @@ -485,6 +288,17 @@ class TestStorageMigration(cloudstackTestCase): ) cls._cleanup.append(cls.disk_offering_cluster1) + cls.new_virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.testdata["small"], + templateid=cls.template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + zoneid=cls.zone.id, + mode=cls.zone.networktype + ) + # If local storage is enabled, alter the offerings to use # localstorage if cls.zone.localstorageenabled: @@ -506,13 +320,10 @@ class TestStorageMigration(cloudstackTestCase): cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) - def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() - if self.unsupportedHypervisor or self.insuffPools: - self.skipTest("Skipping test because unsupported hypervisor\ - %s" % self.hypervisor) + self.cleanup = [] def tearDown(self): @@ -524,7 +335,7 @@ class TestStorageMigration(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(tags=["advanced", "basic"], required_hardware="True") + @attr(tags=["advanced", "basic"],required_hardware="true") def test_01_migrate_root_and_data_disk_nonlive(self): """ Test migrate Volume (root and data disk) @@ -545,8 +356,10 @@ class TestStorageMigration(cloudstackTestCase): In addition to this, Create snapshot of root and data disk after migration. - For root disk, create template from snapshot, deploy Vm and compare checksum - For data disk, Create volume from snapshot, attach to VM and compare checksum + For root disk, create template from snapshot, + deploy Vm and compare checksum + For data disk, Create volume from snapshot, + attach to VM and compare checksum """ @@ -652,10 +465,10 @@ class TestStorageMigration(cloudstackTestCase): # 2. Migrate Volume # 3. Compare checksum with data on volume on new pool checksum_random_root_cluster = createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") vm_cluster.stop(self.userapiclient) @@ -668,11 +481,11 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) self.debug("Done with compare checksum") @@ -691,11 +504,11 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) vm_cluster.stop(self.userapiclient) @@ -714,10 +527,10 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volumes_cluster_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volumes_cluster_list[0], + disk_type="datadiskdevice_1") vm_cluster.detach_volume( self.apiclient, @@ -741,14 +554,27 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) - compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_cluster_list[0], - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_cluster_list[0] + ) + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_cluster_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Add more data to disks data_volume_clust_2 = Volume.create( self.apiclient, @@ -774,10 +600,10 @@ class TestStorageMigration(cloudstackTestCase): # Ensure we can add data to newly added disks createChecksum( - self, - vm_cluster, - data_disk_2_volumes_cluster_list[0], - "datadiskdevice_2") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_disk_2_volumes_cluster_list[0], + disk_type="datadiskdevice_2") vm_cluster.detach_volume( self.apiclient, @@ -810,10 +636,10 @@ class TestStorageMigration(cloudstackTestCase): root_volume_snap = root_volumes_snap_list[0] createChecksum( - self, - vm_from_temp, - root_volume_snap, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=root_volume_snap, + disk_type="rootdiskdevice") templateFromSnapshot.delete(self.apiclient) @@ -840,10 +666,10 @@ class TestStorageMigration(cloudstackTestCase): vm_from_temp.reboot(self.userapiclient) createChecksum( - self, - vm_from_temp, - data_from_snap[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=data_from_snap[0], + disk_type="datadiskdevice_1") vm_from_temp.detach_volume( self.userapiclient, @@ -866,13 +692,24 @@ class TestStorageMigration(cloudstackTestCase): destinationPool) vm_cluster.start(self.userapiclient) + + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_cluster_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_cluster_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.delete(self.apiclient) vm_cluster.stop(self.userapiclient) # Try to Migrate DATA Volume from CWPS to Local Storage @@ -921,8 +758,6 @@ class TestStorageMigration(cloudstackTestCase): mode=self.zone.networktype ) - vm_zone.start(self.userapiclient) - # Get ROOT Volume Id root_volumes_zone_list = list_volumes( self.apiclient, @@ -958,10 +793,10 @@ class TestStorageMigration(cloudstackTestCase): # Step 4 # Migrate ROOT Volume from ZWPS to other ZWPS checksum_random_root_zone = createChecksum( - self, - vm_zone, - data_volumes_zone_list[0], - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_zone, + disk=data_volumes_zone_list[0], + disk_type="rootdiskdevice") vm_zone.stop(self.userapiclient) @@ -973,11 +808,11 @@ class TestStorageMigration(cloudstackTestCase): vm_zone.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_zone, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_zone, + disk_type="rootdiskdevice", + virt_machine=vm_zone ) vm_zone.stop(self.userapiclient) @@ -1000,18 +835,18 @@ class TestStorageMigration(cloudstackTestCase): vm_zone.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_zone, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_zone, + disk_type="rootdiskdevice", + virt_machine=vm_zone ) checksum_random_data_zone = createChecksum( - self, - vm_zone, - data_volumes_zone_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_zone, + disk=data_volumes_zone_list[0], + disk_type="datadiskdevice_1") vm_zone.stop(self.userapiclient) @@ -1022,13 +857,25 @@ class TestStorageMigration(cloudstackTestCase): "ZONE") MigrateDataVolume(self, data_volumes_zone_list[0], destinationPool) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) vm_zone.stop(self.userapiclient) # Try to Migrate DATA Volume from ZWPS to Local Storage @@ -1051,13 +898,26 @@ class TestStorageMigration(cloudstackTestCase): destinationPool) vm_zone.start(self.userapiclient) + + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) # Delete ROOT and DATA Volume from ZWPS @@ -1140,10 +1000,10 @@ class TestStorageMigration(cloudstackTestCase): # Step 6 # Migrate root and data volume from Local to another Local storage checksum_random_root_local = createChecksum( - self, - vm_local, - data_volumes_local_list[0], - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_local, + disk=data_volumes_local_list[0], + disk_type="rootdiskdevice") vm_local.stop(self.userapiclient) @@ -1155,18 +1015,18 @@ class TestStorageMigration(cloudstackTestCase): vm_local.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_local, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_local, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) checksum_random_data_local = createChecksum( - self, - vm_local, - data_volumes_local_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_local, + disk=data_volumes_local_list[0], + disk_type="datadiskdevice_1") vm_local.stop(self.userapiclient) destinationPool = GetDestinationPool( @@ -1178,15 +1038,28 @@ class TestStorageMigration(cloudstackTestCase): data_volumes_local_list[0], destinationPool) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_local_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + vm_local.start(self.userapiclient) compareChecksum( - self, - checksum_random_data_local, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_local_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_local, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_local_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Delete ROOT and DATA Volume from Local Storage self.debug("Deleting Volume %s" % data_volume_local.id) @@ -1211,7 +1084,7 @@ class TestStorageMigration(cloudstackTestCase): ), None, "VM list should be empty") return - @attr(tags=["advanced", "basic"], required_hardware="True") + @attr(tags=["advanced", "basic"],required_hardware="true") def test_02_migration_nonlive_xenserver_supported(self): """ Test migrate Volume (root and data disk) for Hypervisor Xenserver @@ -1316,10 +1189,10 @@ class TestStorageMigration(cloudstackTestCase): # Migrate ROOT Volume from CWPS to other CWPS checksum_random_root_cluster = createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") vm_cluster.stop(self.userapiclient) @@ -1332,11 +1205,11 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) self.debug("Done with compare checksum after first checksum") @@ -1350,10 +1223,10 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.reboot(self.userapiclient) checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volumes_cluster_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volumes_cluster_list[0], + disk_type="datadiskdevice_1") vm_cluster.stop(self.userapiclient) @@ -1369,13 +1242,27 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.detach_volume(self.apiclient, data_volumes_cluster_list[0]) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_cluster_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_cluster_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_cluster_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # snapshot test case t14 compare checksum for same VM vm_cluster.attach_volume( @@ -1386,11 +1273,11 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.reboot(self.apiclient) compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=vm_cluster, - disk=data_volumes_cluster_list[0] + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=vm_cluster ) # Add more data to disks @@ -1417,10 +1304,10 @@ class TestStorageMigration(cloudstackTestCase): ) createChecksum( - self, - vm_cluster, - data_disk_2_volumes_cluster_list[0], - "datadiskdevice_2") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_disk_2_volumes_cluster_list[0], + disk_type="datadiskdevice_2") vm_cluster.detach_volume( self.apiclient, @@ -1479,10 +1366,10 @@ class TestStorageMigration(cloudstackTestCase): root_volume_snap = root_volumes_snap_list[0] createChecksum( - self, - vm_from_temp, - root_volume_snap, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=root_volume_snap, + disk_type="rootdiskdevice") templateFromSnapshot.delete(self.apiclient) @@ -1509,10 +1396,10 @@ class TestStorageMigration(cloudstackTestCase): vm_from_temp.reboot(self.userapiclient) createChecksum( - self, - vm_from_temp, - data_from_snap[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=data_from_snap[0], + disk_type="datadiskdevice_1") vm_from_temp.detach_volume( self.userapiclient, @@ -1603,10 +1490,10 @@ class TestStorageMigration(cloudstackTestCase): vm_local.reboot(self.userapiclient) createChecksum( - self, - vm_local, - root_volume_local, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_local, + disk=root_volume_local, + disk_type="rootdiskdevice") vm_local.stop(self.userapiclient) @@ -1656,7 +1543,7 @@ class TestStorageMigration(cloudstackTestCase): return - @attr(tags=["advanced", "basic"], required_hardware="True") + @attr(tags=["advanced", "basic"],required_hardware="true") def test_03_migrate_root_and_data_disk_nonlive_cwps_vmware(self): """ Test migrate Volume (root and data disk) @@ -1764,10 +1651,10 @@ class TestStorageMigration(cloudstackTestCase): # Step 2 # Migrate ROOT Volume from CWPS to other CWPS checksum_random_root_cluster = createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") vm_cluster.stop(self.userapiclient) @@ -1780,20 +1667,20 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) self.debug("Done with compare checksum") vm_cluster.start(self.userapiclient) checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volumes_cluster_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volumes_cluster_list[0], + disk_type="datadiskdevice_1") vm_cluster.detach_volume( self.apiclient, @@ -1814,14 +1701,27 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.start(self.userapiclient) - compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_cluster_list[0], - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_cluster_list[0] + ) + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_cluster_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # snapshot test case t14 compare checksum for same VM vm_cluster.attach_volume( self.apiclient, @@ -1831,11 +1731,11 @@ class TestStorageMigration(cloudstackTestCase): vm_cluster.reboot(self.apiclient) compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=vm_cluster, - disk=data_volumes_cluster_list[0] + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=vm_cluster ) # Add more data to disks @@ -1862,10 +1762,10 @@ class TestStorageMigration(cloudstackTestCase): ) createChecksum( - self, - vm_cluster, - data_disk_2_volumes_cluster_list[0], - "datadiskdevice_2") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_disk_2_volumes_cluster_list[0], + disk_type="datadiskdevice_2") vm_cluster.detach_volume( self.apiclient, @@ -1898,10 +1798,10 @@ class TestStorageMigration(cloudstackTestCase): root_volume_snap = root_volumes_snap_list[0] createChecksum( - self, - vm_from_temp, - root_volume_snap, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=root_volume_snap, + disk_type="rootdiskdevice") templateFromSnapshot.delete(self.apiclient) @@ -1928,10 +1828,10 @@ class TestStorageMigration(cloudstackTestCase): vm_from_temp.reboot(self.userapiclient) createChecksum( - self, - vm_from_temp, - data_from_snap[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_from_temp, + disk=data_from_snap[0], + disk_type="datadiskdevice_1") vm_from_temp.detach_volume( self.userapiclient, @@ -2023,10 +1923,10 @@ class TestStorageMigration(cloudstackTestCase): # Step 6 # Migrate root and data volume from Local to another Local storage checksum_random_root_local = createChecksum( - self, - vm_local, - data_volumes_local_list[0], - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_local, + disk=data_volumes_local_list[0], + disk_type="rootdiskdevice") vm_local.stop(self.userapiclient) @@ -2038,18 +1938,18 @@ class TestStorageMigration(cloudstackTestCase): vm_local.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_local, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_local, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) checksum_random_data_local = createChecksum( - self, - vm_local, - data_volumes_local_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_local, + disk=data_volumes_local_list[0], + disk_type="datadiskdevice_1") vm_local.stop(self.userapiclient) destinationPool = GetDestinationPool( @@ -2062,14 +1962,28 @@ class TestStorageMigration(cloudstackTestCase): destinationPool) vm_local.start(self.userapiclient) - compareChecksum( - self, - checksum_random_data_local, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_local_list[0], - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_local_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_local, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_local_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Delete ROOT and DATA Volume from Local Storage self.debug("Deleting Volume %s" % data_volume_local.id) @@ -2094,7 +2008,7 @@ class TestStorageMigration(cloudstackTestCase): ), None, "VM list should be empty") return - @attr(tags=["advanced", "basic"], required_hardware="True") + @attr(tags=["advanced", "basic"], required_hardware="true") def test_04_migrate_root_and_data_disk_nonlive_zwps_vmware(self): """ Test migrate Volume (root and data disk) @@ -2147,8 +2061,6 @@ class TestStorageMigration(cloudstackTestCase): mode=self.zone.networktype ) - vm_zone.start(self.userapiclient) - # Get ROOT Volume Id root_volumes_zone_list = list_volumes( self.apiclient, @@ -2184,10 +2096,10 @@ class TestStorageMigration(cloudstackTestCase): # Step 4 # Migrate ROOT Volume from ZWPS to other ZWPS checksum_random_root_zone = createChecksum( - self, - vm_zone, - data_volumes_zone_list[0], - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_zone, + disk=data_volumes_zone_list[0], + disk_type="rootdiskdevice") vm_zone.stop(self.userapiclient) @@ -2199,27 +2111,18 @@ class TestStorageMigration(cloudstackTestCase): vm_zone.start(self.userapiclient) compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_zone, - disk=None, - ) - vm_zone.start(self.userapiclient) - - compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_zone, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_zone, + disk_type="rootdiskdevice", + virt_machine=vm_zone ) checksum_random_data_zone = createChecksum( - self, - vm_zone, - data_volumes_zone_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_zone, + disk=data_volumes_zone_list[0], + disk_type="datadiskdevice_1") vm_zone.stop(self.userapiclient) @@ -2230,14 +2133,27 @@ class TestStorageMigration(cloudstackTestCase): "ZONE") MigrateDataVolume(self, data_volumes_zone_list[0], destinationPool) - compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Delete ROOT and DATA Volume from ZWPS self.debug("Deleting Volume %s" % data_volume_zone.id) @@ -2285,15 +2201,15 @@ class NegativeTestStorageMigration(cloudstackTestCase): cls.testdata["ostype"]) cls._cleanup = [] - cls.unsupportedHypervisor = False - cls.insuffPools = False + if cls.hypervisor.lower() not in [ "vmware", "kvm", "xenserver", "hyper-v"]: - cls.unsupportedHypervisor = True - return + raise unittest.SkipTest( + "Storage migration not supported on %s" % + cls.hypervisor) try: cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) @@ -2306,8 +2222,7 @@ class NegativeTestStorageMigration(cloudstackTestCase): "There must be at least two cluster wide\ storage pools available in the setup" except Exception as e: - cls.insuffPools = True - return + raise unittest.SkipTest(e) try: # Create an account @@ -2389,9 +2304,6 @@ class NegativeTestStorageMigration(cloudstackTestCase): def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() - if self.unsupportedHypervisor or self.insuffPools: - self.skipTest("Skipping test because unsupported hypervisor\ - %s" % self.hypervisor) self.cleanup = [] def tearDown(self): @@ -2444,14 +2356,15 @@ class NegativeTestStorageMigration(cloudstackTestCase): except Exception as e: self.exceptionList.append(e) - @attr(tags=["advanced", "basic"], required_hardware="True") + @attr(tags=["advanced", "basic"],required_hardware="true") def test_01_migrate_data_disk_negative_test(self): """ Negative test cases # 1. Deploy a VM on cluster wide primary storage. # 2. Add some data to disks and create checksum # 3. Migrate root and data volume from cluster-to-cluster wide storage pool - # 4. While migration(ROOT disk) is in progress try following scenarios, they should fail: + # 4. While migration(ROOT disk) is in progress try following scenarios, + they should fail: I. Take snapshot of the disk II. Create Template from the volume III. Destroy the instance @@ -2506,16 +2419,16 @@ class NegativeTestStorageMigration(cloudstackTestCase): # Calculate checksum of ROOT and DATA Disks checksum_root_disk = self.createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") checksum_data_disk = self.createChecksum( - self, - vm_cluster, - data_disk, - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_disk, + disk_type="datadiskdevice_1") volumes = Volume.list( self.userapiclient, @@ -2623,16 +2536,18 @@ class NegativeTestStorageMigration(cloudstackTestCase): self.debug("Done with create checksum") compareChecksum( - self, - checksum_root_disk, - "rootdiskdevice", + self.apiclient, + service=self.testdata, + original_checksum=checksum_root_disk, + disk_type="rootdiskdevice", virt_machine=vm_cluster ) compareChecksum( - self, - checksum_data_disk, - "datadiskdevice_1", + self.apiclient, + service=self.testdata, + original_checksum=checksum_data_disk, + disk_type="datadiskdevice_1", virt_machine=vm_cluster ) @@ -2699,17 +2614,16 @@ class TestLiveStorageMigration(cloudstackTestCase): cls.testdata["ostype"]) cls._cleanup = [] - cls.unsupportedHypervisor = False - cls.insuffPools = False + if cls.hypervisor.lower() in ["kvm", "lxc"]: - cls.unsupportedHypervisor = True - return + raise unittest.SkipTest( + "Live Storage migration not supported on %s" % + cls.hypervisor) try: cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) except Exception as e: - cls.insuffPools = True - return + raise unittest.SkipTest(e) try: # Create an account @@ -2791,9 +2705,6 @@ class TestLiveStorageMigration(cloudstackTestCase): def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() - if self.unsupportedHypervisor or self.insuffPools: - self.skipTest("Skipping test because unsupported hypervisor\ - %s" % self.hypervisor) self.cleanup = [] def tearDown(self): @@ -2805,13 +2716,12 @@ class TestLiveStorageMigration(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(tags=["advanced", "basic"], - required_hardware="True") + @attr(tags=["advanced", "basic"], required_hardware="true") def test_01_migrate_live(self): """ Test migrate Volume (root and data disk) # 1. Deploy a VM on cluster wide primary storage. - # 2. Migrate root and data volume to two different storage pools\ + # 2. Migrate root and data volume to two different storage pools in same cluster. """ @@ -2889,10 +2799,10 @@ class TestLiveStorageMigration(cloudstackTestCase): # Step 2 # Migrate ROOT Volume from CWPS to other CWPS checksum_random_root_cluster = createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") # Get Destnation Pool # Avoid storage Pool on which ROOT disk exists @@ -2911,19 +2821,19 @@ class TestLiveStorageMigration(cloudstackTestCase): islive=True) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) # Migrate DATA Volume from CWPS to other CWPS checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volume_1_cluster, - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volume_1_cluster, + disk_type="datadiskdevice_1") # Get Destnation Pool # Avoid storage Pool allocated for ROOT disk, and Pool on which DATA @@ -2955,14 +2865,27 @@ class TestLiveStorageMigration(cloudstackTestCase): data_volume_clust_1 ) - compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volume_1_cluster, - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volume_1_cluster + ) + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volume_1_cluster) + + self.new_virtual_machine.reboot(self.apiclient) # Destroy and expunge VM and data disk vm_cluster.delete(self.apiclient) @@ -2989,16 +2912,16 @@ class TestLiveStorageMigration(cloudstackTestCase): ), None, "Volume list should be empty") return - @unittest.skip( "Requires setup with 2 pods - Each pod having 2 clusters. \ Yet to be tested") - @attr(tags=["advanced", "basic"], required_hardware="True") + + @attr(tags=["advanced", "basic"],required_hardware="true") def test_02_migration_live_different_pods(self): """ Test migrate Volume (root and data disk) # 1. Deploy a VM on cluster wide primary storage. - # 2. Migrate root and data volume to two different storage pools\ + # 2. Migrate root and data volume to two different storage pools in same cluster. """ @@ -3088,10 +3011,10 @@ class TestLiveStorageMigration(cloudstackTestCase): # Step 2 # Migrate ROOT Volume from CWPS to other CWPS checksum_random_root_cluster = createChecksum( - self, - vm_cluster, - root_volume_cluster, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_cluster, + disk=root_volume_cluster, + disk_type="rootdiskdevice") # Get Destnation Pool # Avoid storage Pool on which ROOT disk exists @@ -3110,19 +3033,19 @@ class TestLiveStorageMigration(cloudstackTestCase): islive=True) compareChecksum( - self, - checksum_random_root_cluster, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) # Migrate DATA Volume from CWPS to other CWPS checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volume_1_cluster, - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volume_1_cluster, + disk_type="datadiskdevice_1") # Get Destnation Pool # Avoid storage Pool allocated for ROOT disk, and Pool on which DATA @@ -3155,14 +3078,27 @@ class TestLiveStorageMigration(cloudstackTestCase): data_volume_clust_1 ) - compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_1", - virt_machine=None, - disk=data_volume_1_cluster, - new_vm=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volume_1_cluster + ) + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volume_1_cluster) + + self.new_virtual_machine.reboot(self.apiclient) # Add disk 2 data_volume_clust_2 = Volume.create( @@ -3192,20 +3128,33 @@ class TestLiveStorageMigration(cloudstackTestCase): # Add data to second data disk checksum_random_data_cluster = createChecksum( - self, - vm_cluster, - data_volume_2_cluster, - "datadiskdevice_2") + service=self.testdata, + virtual_machine=vm_cluster, + disk=data_volume_2_cluster, + disk_type="datadiskdevice_2") # TO-DO Migration + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volume_2_cluster + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) compareChecksum( - self, - checksum_random_data_cluster, - "datadiskdevice_2", - virt_machine=None, - disk=data_volume_2_cluster, - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_cluster, + disk_type="datadiskdevice_2", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volume_2_cluster) + + self.new_virtual_machine.reboot(self.apiclient) # TO-DO: Create Snapshot, Migrate and Restore Snapshot # But Restore snapshot to previous stage @@ -3224,8 +3173,6 @@ class TestLiveStorageMigration(cloudstackTestCase): mode=self.zone.networktype ) - vm_zone.start(self.userapiclient) - # Get ROOT Volume Id root_volumes_zone_list = list_volumes( self.apiclient, @@ -3261,20 +3208,20 @@ class TestLiveStorageMigration(cloudstackTestCase): # Step 4 # Migrate ROOT Volume from ZWPS to other ZWPS checksum_random_root_zone = createChecksum( - self, - vm_zone, - root_volume_zone, - "rootdiskdevice") + service=self.testdata, + virtual_machine=vm_zone, + disk=root_volume_zone, + disk_type="rootdiskdevice") destinationPool = GetDestinationPool(self, root_volume_zone, "ZONE") MigrateRootVolume(self, vm_zone, destinationPool) compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_zone, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_zone, + disk_type="rootdiskdevice", + virt_machine=vm_zone ) # Try to Migrate ROOT Volume from ZWPS to Cluster wide Storage @@ -3287,20 +3234,20 @@ class TestLiveStorageMigration(cloudstackTestCase): expectexception=True) compareChecksum( - self, - checksum_random_root_zone, - "rootdiskdevice", - virt_machine=vm_cluster, - disk=None, + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_root_zone, + disk_type="rootdiskdevice", + virt_machine=vm_cluster ) # DATA Disk checksum_random_data_zone = createChecksum( - self, - vm_zone, - data_volumes_zone_list[0], - "datadiskdevice_1") + service=self.testdata, + virtual_machine=vm_zone, + disk=data_volumes_zone_list[0], + disk_type="datadiskdevice_1") # Migrate DATA Volume from ZWPS to other ZWPS destinationPool = GetDestinationPool( @@ -3309,13 +3256,27 @@ class TestLiveStorageMigration(cloudstackTestCase): "ZONE") MigrateDataVolume(self, data_volumes_zone_list[0], destinationPool) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) + compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Try to Migrate DATA Volume from ZWPS to Cluster wide Storage destinationPool = GetDestinationPool(self, data_volume_zone, "CLUSTER") @@ -3325,13 +3286,26 @@ class TestLiveStorageMigration(cloudstackTestCase): destinationPool, expectexception=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Try to Migrate DATA Volume from ZWPS to Cluster wide Storage destinationPool = GetDestinationPool(self, data_volume_zone, "CLUSTER") @@ -3341,13 +3315,26 @@ class TestLiveStorageMigration(cloudstackTestCase): destinationPool, expectexception=True) + self.new_virtual_machine.attach_volume( + self.apiclient, + data_volumes_zone_list[0] + ) + + # Rebooting is required so that newly attached disks are detected + self.new_virtual_machine.reboot(self.apiclient) compareChecksum( - self, - checksum_random_data_zone, - "datadiskdevice_1", - virt_machine=None, - disk=data_volumes_zone_list[0], - new_vm=True) + self.apiclient, + service=self.testdata, + original_checksum=checksum_random_data_zone, + disk_type="datadiskdevice_1", + virt_machine=self.new_virtual_machine + ) + + self.new_virtual_machine.detach_volume( + self.apiclient, + data_volumes_zone_list[0]) + + self.new_virtual_machine.reboot(self.apiclient) # Destroy and expunge VM and data disk vm_zone.delete(self.apiclient) @@ -3407,10 +3394,10 @@ class TestLiveStorageMigration(cloudstackTestCase): self.apiclient, id=root_volume_cluster.id, ), None, "Volume list should be empty") + + return - return - -def MigrateVmWithVolume(self,vm,destinationHost,volumes,pools): + def MigrateVmWithVolume(self,vm,destinationHost,volumes,pools): """ This method is used to migrate a vm and its volumes using migrate virtual machine with volume API INPUTS: @@ -3868,3 +3855,4 @@ class TestStorageLiveMigrationVmware(cloudstackTestCase): vm = MigrateVmWithVolume(self, virtual_machine_1, destinationHost, vol_list, destinationPools) check_files(self, vm,destinationHost) + diff --git a/test/integration/testpaths/testpath_volume_recurring_snap.py b/test/integration/testpaths/testpath_volume_recurring_snap.py new file mode 100644 index 00000000000..23e0b3a73ac --- /dev/null +++ b/test/integration/testpaths/testpath_volume_recurring_snap.py @@ -0,0 +1,1043 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Test cases for VM/Volume recurring snapshot Test Path +""" +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.lib.utils import (cleanup_resources, + is_snapshot_on_nfs, + validateList + ) +from marvin.lib.base import (Account, + ServiceOffering, + DiskOffering, + VirtualMachine, + SnapshotPolicy, + Snapshot, + Configurations + ) +from marvin.lib.common import (get_domain, + get_zone, + get_template, + list_volumes, + list_snapshots, + list_snapshot_policy + ) + +from marvin.codes import PASS + +import time + + +class TestVolumeRecurringSnapshot(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestVolumeRecurringSnapshot, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.testdata = testClient.getParsedTestDataConfig() + cls.hypervisor = cls.testClient.getHypervisorInfo() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata["ostype"]) + + cls._cleanup = [] + + if cls.hypervisor.lower() not in [ + "vmware", + "kvm", + "xenserver"]: + raise unittest.SkipTest( + "Storage migration not supported on %s" % + cls.hypervisor) + + try: + # Create an account + cls.account = Account.create( + cls.apiclient, + cls.testdata["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + # Create user api client of the account + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, + DomainName=cls.account.domain + ) + # Create Service offering + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.testdata["service_offering"], + ) + cls._cleanup.append(cls.service_offering) + # Create Disk offering + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.testdata["disk_offering"], + ) + cls._cleanup.append(cls.disk_offering) + # Deploy A VM + cls.vm_1 = VirtualMachine.create( + cls.userapiclient, + cls.testdata["small"], + templateid=cls.template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + zoneid=cls.zone.id, + diskofferingid=cls.disk_offering.id, + mode=cls.zone.networktype + ) + + cls.volume = list_volumes( + cls.apiclient, + virtualmachineid=cls.vm_1.id, + type='ROOT', + listall=True + ) + + cls.data_volume = list_volumes( + cls.apiclient, + virtualmachineid=cls.vm_1.id, + type='DATADISK', + listall=True + ) + + except Exception as e: + cls.tearDownClass() + raise e + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "basic"],required_hardware="true") + def test_01_volume_snapshot(self): + """ Test Volume (root) Snapshot + # 1. Create Hourly, Daily,Weekly recurring snapshot policy for ROOT disk and + Verify the presence of the corresponding snapshots on the Secondary Storage + # 2. Delete the snapshot policy and verify the entry as Destroyed in snapshot_schedule + # 3. Verify that maxsnaps should not consider manual snapshots for deletion + # 4. Snapshot policy should reflect the correct timezone + # 5. Verify that listSnapshotPolicies() should return all snapshot policies + that belong to the account (both manual and recurring snapshots) + # 6. Verify that listSnapshotPolicies() should not return snapshot + policies that have been deleted + # 7. Verify that snapshot should not be created for VM in Destroyed state + # 8. Verify that snapshot should get created after resuming the VM + # 9. Verify that All the recurring policies associated with the VM should be + deleted after VM get destroyed. + """ + # Step 1 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + + recurring_snapshot = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + for snapshot in snapshots: + self.assertEqual( + self.dbclient.execute( + "select type_description from snapshots where name='%s'" % + snapshot.name)[0][0], + "HOURLY" + ) + + time.sleep(180) + + for snapshot in snapshots: + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot.id)) + + recurring_snapshot.delete(self.apiclient) + + self.assertEqual( + self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot.id), + [] + ) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'DAILY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00' + recurring_snapshot_daily = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_daily = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_daily.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_daily) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + snap_db_daily = self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_daily.id) + + validation_result_1 = validateList(snap_db_daily) + + self.assertEqual( + validation_result_1[0], + PASS, + "snapshot_policy list validation failed due to %s" % + validation_result_1[2]) + + self.assertNotEqual( + len(snap_db_daily), + 0, + "Check DB Query result set" + ) + + recurring_snapshot_daily.delete(self.apiclient) + + self.assertEqual( + self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_daily.id), + [] + ) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'WEEKLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_weekly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_weekly = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_weekly.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_weekly) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + snap_sch_2 = self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_weekly.id) + + validation_result_2 = validateList(snap_sch_2) + + self.assertEqual( + validation_result_2[0], + PASS, + "snapshot_policy list validation failed due to %s" % + validation_result_2[2]) + + self.assertNotEqual( + len(snap_sch_2), + 0, + "Check DB Query result set" + ) + + recurring_snapshot_weekly.delete(self.apiclient) + + self.assertEqual( + self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_weekly.id), + [] + ) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'MONTHLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_monthly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_monthly = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_monthly.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_monthly) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + snap_sch_3 = self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_monthly.id) + + validation_result = validateList(snap_sch_3) + + self.assertEqual( + validation_result[0], + PASS, + "snapshot_policy list validation failed due to %s" % + validation_result[2]) + + self.assertNotEqual( + len(snap_sch_3), + 0, + "Check DB Query result set" + ) + + recurring_snapshot_monthly.delete(self.apiclient) + + self.assertEqual( + self.dbclient.execute( + "select * from snapshot_policy where uuid='%s'" % + recurring_snapshot_weekly.id), + [] + ) + + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + # Step 3 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot_1 = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_1.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + snap_to_delete = snapshots[0] + + time.sleep( + (self.testdata["recurring_snapshot"]["maxsnaps"]) * 3600 + ) + + snapshots_1 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(snap_to_delete not in snapshots_1) + + time.sleep(360) + + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snap_to_delete.id)[0][0], + "Destroyed" + ) + + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snap_to_delete.id)) + + # Step 4 + recurring_snapshot = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + time.sleep(180) + snap_time_hourly = self.dbclient.execute( + "select scheduled_timestamp from \ + snapshot_schedule where uuid='%s'" % + recurring_snapshot.id) + + self.debug("Timestamp for hourly snapshot %s" % snap_time_hourly) + recurring_snapshot.delete(self.apiclient) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'DAILY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00' + recurring_snapshot_daily = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_daily = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_daily.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_daily) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + time.sleep(180) + snap_time_daily = self.dbclient.execute( + "select scheduled_timestamp from \ + snapshot_schedule where uuid='%s'" % + recurring_snapshot_daily.id) + + self.debug("Timestamp for daily snapshot %s" % snap_time_daily) + recurring_snapshot_daily.delete(self.apiclient) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'WEEKLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_weekly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_weekly = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_weekly.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_weekly) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + time.sleep(180) + snap_time_weekly = self.dbclient.execute( + "select scheduled_timestamp from \ + snapshot_schedule where uuid='%s'" % + recurring_snapshot_weekly.id) + + self.debug("Timestamp for monthly snapshot %s" % snap_time_weekly) + recurring_snapshot_weekly.delete(self.apiclient) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'MONTHLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_monthly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy_monthly = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_monthly.id, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy_monthly) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + time.sleep(180) + snap_time_monthly = self.dbclient.execute( + "select scheduled_timestamp from \ + snapshot_schedule where uuid='%s'" % + recurring_snapshot_monthly.id) + + self.debug("Timestamp for monthly snapshot %s" % snap_time_monthly) + + recurring_snapshot_monthly.delete(self.apiclient) + + # Step 5 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot_hourly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + self.testdata["recurring_snapshot"]["intervaltype"] = 'MONTHLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_monthly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + for rec in [recurring_snapshot_hourly, recurring_snapshot_monthly]: + self.assertTrue( + rec.id in any( + policy['id']) for policy in list_snapshots_policy) + + recurring_snapshot_hourly.delete(self.apiclient) + recurring_snapshot_monthly.delete(self.apiclient) + + # Step 6 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot_hourly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + self.testdata["recurring_snapshot"]["intervaltype"] = 'MONTHLY' + self.testdata["recurring_snapshot"]["schedule"] = '00:00:1' + recurring_snapshot_monthly = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + recurring_snapshot_monthly.delete(self.apiclient) + + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + volumeid=self.volume[0].id + ) + + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + self.assertTrue( + recurring_snapshot_hourly.id in any( + policy['id']) for policy in list_snapshots_policy) + + self.assertTrue( + recurring_snapshot_monthly.id not in any( + policy['id']) for policy in list_snapshots_policy) + + # Step 7 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + self.vm_1.delete(self.apiclient, expunge=False) + + time.sleep(3600) + snapshot_list = Snapshot.list( + self.apiclient, + volumeid=self.volume[0].id + ) + + list_validation = validateList(snapshot_list) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + self.assertEqual(len(snapshot_list), + 1, + "Verify that snapsot is not created after VM deletion" + ) + # Step 8 + self.vm_1.recover(self.apiclient) + time.sleep(3600) + + snapshot_list = Snapshot.list( + self.apiclient, + volumeid=self.volume[0].id + ) + + self.assertEqual(len(snapshot_list), + 2, + "Verify that snapsot is not created after VM deletion" + ) + # Step 9 + self.vm_1.delete(self.apiclient) + time.sleep(180) + with self.assertRaises(Exception): + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + volumeid=self.volume[0].id + ) + + @attr(tags=["advanced", "basic"], required_hardware="true") + def test_02_volume_max_snapshot(self): + """ Test Volume Snapshot + # 1. Create Hourly reccuring snapshot policy with maxsnaps=2 + verify that when 3rd snapshot is taken first snapshot gets deleted + """ + + if self.hypervisor.lower() not in ["kvm", "vmware"]: + self.skipTest("Skip test for hypervisor other than KVM and VMWare") + + # Step 1 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot_1 = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_1.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + snap_to_delete = snapshots[0] + + time.sleep( + (self.testdata["recurring_snapshot"]["maxsnaps"]) * 3600 + ) + + snapshots_1 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(snap_to_delete not in snapshots_1) + + time.sleep(360) + + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snap_to_delete.id)[0][0], + "Destroyed" + ) + + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snap_to_delete.id)) + + # DATA DISK + recurring_snapshot_data = SnapshotPolicy.create( + self.apiclient, + self.data_volume[0].id, + self.testdata["recurring_snapshot"] + ) + # ListSnapshotPolicy should return newly created policy + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_data.id, + volumeid=self.data_volume[0].id + ) + + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + data_snap_to_delete = snapshots[0] + + time.sleep( + (self.testdata["recurring_snapshot"]["maxsnaps"]) * 3600 + ) + + data_snapshots_1 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(data_snap_to_delete not in data_snapshots_1) + + time.sleep(360) + + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snap_to_delete.id)[0][0], + "Destroyed" + ) + + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + data_snap_to_delete.id)) + + @attr(tags=["advanced", "basic"],required_hardware="true") + def test_03_volume_rec_snapshot(self): + """ Test Volume (root) Snapshot + # 1. For snapshot.delta.max > maxsnaps verify that when number of snapshot exceeds + maxsnaps value previous snapshot should get deleted from database but remain + on secondary storage and when the value exceeds snapshot.delta.max the + snapshot should get deleted from secondary storage + """ + + if self.hypervisor.lower() != "xenserver": + self.skipTest("Skip test for hypervisor other than Xenserver") + + # Step 1 + self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY' + self.testdata["recurring_snapshot"]["schedule"] = 1 + recurring_snapshot_root = SnapshotPolicy.create( + self.apiclient, + self.volume[0].id, + self.testdata["recurring_snapshot"] + ) + + Configurations.update(self.apiclient, + name="snapshot.delta.max", + value="3" + ) + + list_snapshots_policy = list_snapshot_policy( + self.apiclient, + id=recurring_snapshot_root.id, + volumeid=self.volume[0].id + ) + list_validation = validateList(list_snapshots_policy) + + self.assertEqual( + list_validation[0], + PASS, + "snapshot list validation failed due to %s" % + list_validation[2]) + + timeout = self.testdata["timeout"] + while True: + snapshots = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata[ + "recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + if isinstance(snapshots, list): + break + + elif timeout == 0: + raise Exception("List snapshots API call failed.") + + time.sleep(3600 * 2) + + snapshots_2 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(snapshots[0] not in snapshots_2) + + for snapshot in snapshots_2: + snapshots.append(snapshot) + + time.sleep(360) + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snapshots[0].id)[0][0], + "Destroyed" + ) + + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshots[0].id)) + + time.sleep(3600) + + snapshots_3 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(snapshots[1] not in snapshots_3) + snapshots.append(snapshots_3[1]) + time.sleep(180) + + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snapshots[1].id)[0][0], + "Destroyed" + ) + + for snapshot in [snapshots[0], snapshots[1]]: + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot.id)) + + time.sleep(3600) + + snapshots_4 = list_snapshots( + self.apiclient, + volumeid=self.volume[0].id, + intervaltype=self.testdata["recurring_snapshot"]["intervaltype"], + snapshottype='RECURRING', + listall=True + ) + + self.assertTrue(snapshots[2] not in snapshots_4) + + snapshots.append(snapshots_4[1]) + time.sleep(180) + + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where uuid='%s'" % + snapshots[2].id)[0][0], + "Destroyed" + ) + + for snapshot in [snapshots[0], snapshots[1], snapshots[2]]: + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snapshot.id)) + + return diff --git a/test/integration/testpaths/testpath_volume_snapshot.py b/test/integration/testpaths/testpath_volume_snapshot.py new file mode 100644 index 00000000000..1115968022a --- /dev/null +++ b/test/integration/testpaths/testpath_volume_snapshot.py @@ -0,0 +1,972 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Test cases for VM/Volume snapshot Test Path +""" +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.lib.utils import (cleanup_resources, + is_snapshot_on_nfs, + validateList) +from marvin.lib.base import (Account, + ServiceOffering, + DiskOffering, + Template, + VirtualMachine, + Snapshot, + Volume + ) +from marvin.lib.common import (get_domain, + get_zone, + get_template, + list_volumes, + list_snapshots, + list_events, + createChecksum, + compareChecksum + ) + +from marvin.codes import PASS +from threading import Thread + + +class TestVolumeSnapshot(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestVolumeSnapshot, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.testdata = testClient.getParsedTestDataConfig() + cls.hypervisor = cls.testClient.getHypervisorInfo() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata["ostype"]) + + cls._cleanup = [] + + if cls.hypervisor.lower() not in [ + "vmware", + "kvm", + "xenserver"]: + raise unittest.SkipTest( + "Storage migration not supported on %s" % + cls.hypervisor) + + try: + # Create an account + cls.account = Account.create( + cls.apiclient, + cls.testdata["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + # Create user api client of the account + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, + DomainName=cls.account.domain + ) + # Create Service offering + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.testdata["service_offering"], + ) + cls._cleanup.append(cls.service_offering) + # Create Disk offering + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.testdata["disk_offering"], + ) + cls._cleanup.append(cls.disk_offering) + #Create VM_1 and VM_2 + cls.vm_1 = VirtualMachine.create( + cls.userapiclient, + cls.testdata["small"], + templateid=cls.template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + zoneid=cls.zone.id, + diskofferingid=cls.disk_offering.id, + mode=cls.zone.networktype + ) + + cls.vm_2 = VirtualMachine.create( + cls.userapiclient, + cls.testdata["small"], + templateid=cls.template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + zoneid=cls.zone.id, + diskofferingid=cls.disk_offering.id, + mode=cls.zone.networktype + ) + + except Exception as e: + cls.tearDownClass() + raise e + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + try: + root_volume = list_volumes( + self.apiclient, + virtualmachineid=self.vm_1.id, + type='ROOT', + listall=True + ) + + self.vm_1.stop(self.apiclient) + snaps = [] + for i in range(2): + + root_vol_snap = Snapshot.create( + self.apiclient, + root_volume[0].id) + + + self.assertEqual( + root_vol_snap.state, + "BackedUp", + "Check if the data vol snapshot state is correct " + ) + + snaps.append(root_vol_snap) + + for snap in snaps: + + self.assertNotEqual( + self.dbclient.execute( + "select status from snapshots where name='%s'" % + snap.name), + "Destroyed" + ) + + for snap in snaps: + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snap.id)) + + self.account.delete(self.apiclient) + + for snap in snaps: + self.assertEqual( + self.dbclient.execute( + "select status from snapshots where name='%s'" % + snap.name)[0][0], + "Destroyed" + ) + + for snap in snaps: + self.assertFalse( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + snap.id)) + + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "basic"], required_hardware="true") + def test_01_volume_snapshot(self): + """ Test Volume (root) Snapshot + # 1. Deploy a VM on primary storage and . + # 2. Take snapshot on root disk + # 3. Verify the snapshot's entry in the "snapshots" table + and presence of the corresponding + snapshot on the Secondary Storage + # 4. Create Template from the Snapshot and Deploy a + VM using the Template + # 5. Log in to the VM from template and make verify + the contents of the ROOT disk matches with the snapshot. + # 6. Delete Snapshot and Deploy a Linux VM from the + Template and verify the successful deployment of the VM. + # 7. Create multiple snapshots on the same volume and + Check the integrity of all the snapshots by creating + a template from the snapshot and deploying a Vm from it + and delete one of the snapshots + # 8. Verify that the original checksum matches with the checksum + of VM's created from remaning snapshots + # 9. Make verify the contents of the ROOT disk + matches with the snapshot + # 10.Verify that Snapshot of both DATA and ROOT volume should + succeed when snapshot of Data disk of a VM is taken + when snapshot of ROOT volume of VM is in progress + # 11.Create snapshot of data disk and verify the original checksum + matches with the volume created from snapshot + # 12.Verify that volume's state should not change when snapshot of + a DATA volume is taken that is attached to a VM + # 13.Verify that volume's state should not change when snapshot of + a DATA volume is taken that is not attached to a VM + # 14.Verify that create Snapshot with quiescevm=True should succeed + # 15.revertSnapshot() to revert VM to a specified + Volume snapshot for root volume + """ + + # Step 1 + # Get ROOT Volume Id + root_volumes_cluster_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm_1.id, + type='ROOT', + listall=True + ) + + root_volume_cluster = root_volumes_cluster_list[0] + + disk_volumes_cluster_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm_1.id, + type='DATADISK', + listall=True + ) + + data_disk = disk_volumes_cluster_list[0] + + root_vol_state = root_volume_cluster.state + + ckecksum_random_root_cluster = createChecksum( + service=self.testdata, + virtual_machine=self.vm_1, + disk=root_volume_cluster, + disk_type="rootdiskdevice") + + self.vm_1.stop(self.apiclient) + root_vol_snap = Snapshot.create( + self.apiclient, + root_volume_cluster.id) + + self.assertEqual( + root_vol_snap.state, + "BackedUp", + "Check if the snapshot state is correct " + ) + + self.assertEqual( + root_vol_state, + root_volume_cluster.state, + "Check if volume state has changed" + ) + + self.vm_1.start(self.apiclient) + # Step 2 + snapshot_list = list_snapshots( + self.apiclient, + id=root_vol_snap.id + ) + + self.assertNotEqual( + snapshot_list, + None, + "Check if result exists in list item call" + ) + self.assertEqual( + snapshot_list[0].id, + root_vol_snap.id, + "Check resource id in list resources call" + ) + + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + root_vol_snap.id)) + + events = list_events( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + type='SNAPSHOT.CREATE') + + event_list_validation_result = validateList(events) + + self.assertEqual( + event_list_validation_result[0], + PASS, + "event list validation failed due to %s" % + event_list_validation_result[2]) + self.debug("Events list contains event SNAPSHOT.CREATE") + + qresultset = self.dbclient.execute( + "select * from event where type='SNAPSHOT.CREATE' AND \ + description like '%%%s%%' AND state='Completed';" % + root_volume_cluster.id) + + event_validation_result = validateList(qresultset) + + self.assertEqual( + event_validation_result[0], + PASS, + "event list validation failed due to %s" % + event_validation_result[2]) + + self.assertNotEqual( + len(qresultset), + 0, + "Check DB Query result set" + ) + + qresult = str(qresultset) + self.assertEqual( + qresult.count('SNAPSHOT.CREATE') > 0, + True, + "Check SNAPSHOT.CREATE event in events table" + ) + + #Usage_Event + qresultset = self.dbclient.execute( + "select * from usage_event where type='SNAPSHOT.CREATE' AND \ + resource_name='%s'" % + root_vol_snap.name) + + usage_event_validation_result = validateList(qresultset) + + self.assertEqual( + usage_event_validation_result[0], + PASS, + "event list validation failed due to %s" % + usage_event_validation_result[2]) + + self.assertNotEqual( + len(qresultset), + 0, + "Check DB Query result set" + ) + + self.assertEqual( + self.dbclient.execute("select size from usage_event where type='SNAPSHOT.CREATE' AND \ + resource_name='%s'" % + root_vol_snap.name)[0][0], + root_vol_snap.physicalsize) + + # Step 3 + # create template from snapshot root_vol_snap + templateFromSnapshot = Template.create_from_snapshot( + self.apiclient, + root_vol_snap, + self.testdata["template_2"]) + + self.assertNotEqual( + templateFromSnapshot, + None, + "Check if result exists in list item call" + ) + + vm_from_temp = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + templateid=templateFromSnapshot.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + self.assertNotEqual( + vm_from_temp, + None, + "Check if result exists in list item call" + ) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_from_temp + ) + vm_from_temp.delete(self.apiclient) + # Step 4 + root_vol_snap.delete(self.userapiclient) + + self.assertEqual( + list_snapshots( + self.apiclient, + volumeid=root_volume_cluster.id, + ), None, "Snapshot list should be empty") + + events = list_events( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + type='SNAPSHOT.DELETE') + + event_list_validation_result = validateList(events) + + self.assertEqual( + event_list_validation_result[0], + PASS, + "event list validation failed due to %s" % + event_list_validation_result[2]) + + self.debug("Events list contains event SNAPSHOT.DELETE") + + self.debug("select id from account where uuid = '%s';" + % self.account.id) + + qresultset = self.dbclient.execute( + "select id from account where uuid = '%s';" + % self.account.id + ) + + account_validation_result = validateList(qresultset) + + self.assertEqual( + account_validation_result[0], + PASS, + "event list validation failed due to %s" % + account_validation_result[2]) + + self.assertNotEqual( + len(qresultset), + 0, + "Check DB Query result set" + ) + qresult = qresultset[0] + + account_id = qresult[0] + + qresultset = self.dbclient.execute( + "select * from event where type='SNAPSHOT.DELETE' AND \ + account_id='%s' AND state='Completed';" % + account_id) + + delete_snap_validation_result = validateList(qresultset) + + self.assertEqual( + delete_snap_validation_result[0], + PASS, + "event list validation failed due to %s" % + delete_snap_validation_result[2]) + + self.assertNotEqual( + len(qresultset), + 0, + "Check DB Query result set" + ) + + qresult = str(qresultset) + self.assertEqual( + qresult.count('SNAPSHOT.DELETE') > 0, + True, + "Check SNAPSHOT.DELETE event in events table" + ) + + # Step 5 + # delete snapshot and deploy vm from snapshot + vm_from_temp_2 = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + templateid=templateFromSnapshot.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + self.assertNotEqual( + vm_from_temp_2, + None, + "Check if result exists in list item call" + ) + + # Step 6: + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_from_temp_2 + ) + + vm_from_temp_2.delete(self.apiclient) + # Step 7 + # Multiple Snapshots + self.vm_1.stop(self.apiclient) + snaps = [] + for i in range(2): + + root_vol_snap = Snapshot.create( + self.apiclient, + root_volume_cluster.id) + + self.assertEqual( + root_vol_snap.state, + "BackedUp", + "Check if the data vol snapshot state is correct " + ) + + snaps.append(root_vol_snap) + + templateFromSnapshot = Template.create_from_snapshot( + self.apiclient, + root_vol_snap, + self.testdata["template_2"]) + + self.assertNotEqual( + templateFromSnapshot, + None, + "Check if result exists in list item call" + ) + + vm_from_temp = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + templateid=templateFromSnapshot.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + self.assertNotEqual( + vm_from_temp, + None, + "Check if result exists in list item call" + ) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_from_temp + ) + vm_from_temp.delete(self.apiclient) + templateFromSnapshot.delete(self.apiclient) + + self.vm_1.start(self.apiclient) + + delete_snap = snaps.pop(1) + delete_snap.delete(self.apiclient) + + self.assertEqual( + Snapshot.list( + self.apiclient, + id=delete_snap.id + ), None, "Snapshot list should be empty") + + # Step 8 + for snap in snaps: + + templateFromSnapshot = Template.create_from_snapshot( + self.apiclient, + snap, + self.testdata["template_2"]) + + self.assertNotEqual( + templateFromSnapshot, + None, + "Check if result exists in list item call" + ) + + vm_from_temp = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + templateid=templateFromSnapshot.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + self.assertNotEqual( + vm_from_temp, + None, + "Check if result exists in list item call" + ) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_random_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_from_temp + ) + + templateFromSnapshot.delete(self.apiclient) + vm_from_temp.delete(self.apiclient) + + for snap in snaps: + snap.delete(self.apiclient) + + # Step 9 + ckecksum_root_cluster = createChecksum( + service=self.testdata, + virtual_machine=self.vm_1, + disk=root_volume_cluster, + disk_type="rootdiskdevice") + + self.vm_1.stop(self.apiclient) + + root_vol_snap_2 = Snapshot.create( + self.apiclient, + root_volume_cluster.id) + + self.assertEqual( + root_vol_snap_2.state, + "BackedUp", + "Check if the data vol snapshot state is correct " + ) + snap_list_validation_result = validateList(events) + + self.assertEqual( + snap_list_validation_result[0], + PASS, + "snapshot list validation failed due to %s" % + snap_list_validation_result[2]) + + self.assertNotEqual( + snapshot_list, + None, + "Check if result exists in list item call" + ) + + templateFromSnapshot = Template.create_from_snapshot( + self.apiclient, + root_vol_snap_2, + self.testdata["template_2"]) + + self.debug( + "create template event comlites with template %s name" % + templateFromSnapshot.name) + + self.assertNotEqual( + templateFromSnapshot, + None, + "Check if result exists in list item call" + ) + + vm_from_temp_2 = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + templateid=templateFromSnapshot.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + self.assertNotEqual( + vm_from_temp_2, + None, + "Check if result exists in list item call" + ) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_root_cluster, + disk_type="rootdiskdevice", + virt_machine=vm_from_temp_2 + ) + + vm_from_temp_2.delete(self.apiclient) + + # Step 10 + # Take snapshot of Data disk of a VM , when snapshot of ROOT volume of + # VM is in progress + try: + self.vm_1.stop(self.apiclient) + + t1 = Thread( + target=Snapshot.create, + args=( + self.apiclient, + root_volume_cluster.id + )) + + t2 = Thread( + target=Snapshot.create, + args=( + self.apiclient, + data_disk.id + )) + + t1.start() + t2.start() + t1.join() + t2.join() + + except: + self.debug("Error: unable to start thread") + + # Step 11 + # Data Disk + self.vm_1.start(self.apiclient) + ckecksum_data_disk = createChecksum( + service=self.testdata, + virtual_machine=self.vm_1, + disk=data_disk, + disk_type="datadiskdevice_1") + + data_vol_state = data_disk.state + + self.vm_1.stop(self.apiclient) + + data_vol_snap = Snapshot.create( + self.apiclient, + data_disk.id) + + self.assertEqual( + data_vol_snap.state, + "BackedUp", + "Check if the data vol snapshot state is correct " + ) + + self.assertEqual( + data_vol_state, + data_disk.state, + "Check if volume state has changed" + ) + + data_snapshot_list = list_snapshots( + self.apiclient, + id=data_vol_snap.id + ) + + self.assertNotEqual( + data_snapshot_list, + None, + "Check if result exists in list item call" + ) + self.assertEqual( + data_snapshot_list[0].id, + data_vol_snap.id, + "Check resource id in list resources call" + ) + + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + data_vol_snap.id)) + + events = list_events( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + type='SNAPSHOT.CREATE') + + event_list_validation_result = validateList(events) + + self.assertEqual( + event_list_validation_result[0], + PASS, + "event list validation failed due to %s" % + event_list_validation_result[2]) + self.debug("Events list contains event SNAPSHOT.CREATE") + + volumeFromSnap = Volume.create_from_snapshot( + self.apiclient, + data_vol_snap.id, + self.testdata["volume"], + account=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + + self.assertTrue( + is_snapshot_on_nfs( + self.apiclient, + self.dbclient, + self.config, + self.zone.id, + data_vol_snap.id)) + + new_vm = VirtualMachine.create( + self.userapiclient, + self.testdata["small"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id, + mode=self.zone.networktype + ) + + new_vm.attach_volume( + self.apiclient, + volumeFromSnap + ) + + new_vm.reboot(self.apiclient) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=ckecksum_data_disk, + disk_type="datadiskdevice_1", + virt_machine=new_vm + ) + + # Step 12 + data_volume_2 = Volume.create( + self.apiclient, + self.testdata["volume"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) + + self.vm_1.start(self.apiclient) + self.vm_1.attach_volume( + self.userapiclient, + data_volume_2 + ) + + self.vm_1.reboot(self.apiclient) + self.vm_1.stop(self.apiclient) + + data_vol_snap_1 = Snapshot.create( + self.apiclient, + data_volume_2.id) + + self.assertEqual( + data_vol_snap_1.state, + "BackedUp", + "Check if the snapshot state is correct " + ) + + data_disk_2_list = Volume.list( + self.userapiclient, + listall=self.testdata["listall"], + id=data_volume_2.id + ) + + self.vm_1.start(self.apiclient) + + checksum_data_2 = createChecksum( + service=self.testdata, + virtual_machine=self.vm_1, + disk=data_disk_2_list[0], + disk_type="datadiskdevice_2") + + # Step 13 + self.vm_1.detach_volume(self.apiclient, + data_volume_2) + + self.vm_1.reboot(self.apiclient) + + prev_state = data_volume_2.state + + data_vol_snap_2 = Snapshot.create( + self.apiclient, + data_volume_2.id) + + self.assertEqual( + data_vol_snap_2.state, + prev_state, + "Check if the volume state is correct " + ) + + data_snapshot_list_2 = list_snapshots( + self.apiclient, + id=data_vol_snap_2.id + ) + + self.assertNotEqual( + data_snapshot_list_2, + None, + "Check if result exists in list item call" + ) + + self.assertEqual( + data_snapshot_list_2[0].id, + data_vol_snap_2.id, + "Check resource id in list resources call" + ) + + volumeFromSnap_2 = Volume.create_from_snapshot( + self.apiclient, + data_vol_snap_2.id, + self.testdata["volume"], + account=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + + self.vm_2.attach_volume( + self.userapiclient, + volumeFromSnap_2 + ) + + self.vm_2.reboot(self.apiclient) + + data_disk_2_list = Volume.list( + self.userapiclient, + listall=self.testdata["listall"], + id=volumeFromSnap_2.id + ) + + compareChecksum( + self.apiclient, + service=self.testdata, + original_checksum=checksum_data_2, + disk_type="datadiskdevice_2", + virt_machine=self.vm_2 + ) + + # Step 14 + self.vm_1.stop(self.apiclient) + with self.assertRaises(Exception): + root_vol_snap.revertVolToSnapshot(self.apiclient) + + # Step 15 + root_snap = Snapshot.create( + self.apiclient, + root_volume_cluster.id) + + with self.assertRaises(Exception): + root_snap.revertVolToSnapshot(self.apiclient) + + return diff --git a/tools/marvin/marvin/lib/common.py b/tools/marvin/marvin/lib/common.py index ca9d9e81470..b1efb626f68 100644 --- a/tools/marvin/marvin/lib/common.py +++ b/tools/marvin/marvin/lib/common.py @@ -64,9 +64,11 @@ from marvin.codes import (PASS, FAILED, ISOLATED_NETWORK, VPC_NETWORK, RESOURCE_CPU, RESOURCE_MEMORY, PUBLIC_TRAFFIC, GUEST_TRAFFIC, MANAGEMENT_TRAFFIC, STORAGE_TRAFFIC, VMWAREDVS) -from marvin.lib.utils import (validateList, - xsplit, - get_process_status) +from marvin.lib.utils import (validateList, + xsplit, + get_process_status, + random_gen, + format_volume_to_ext3) from marvin.lib.base import (PhysicalNetwork, PublicIPAddress, NetworkOffering, @@ -93,6 +95,9 @@ from netaddr import IPAddress import random import re import itertools +import random +import hashlib + # Import System modules import time @@ -1402,6 +1407,149 @@ def isNetworkDeleted(apiclient, networkid, timeout=600): #end while return networkDeleted + +def createChecksum(service=None, + virtual_machine=None, + disk=None, + disk_type=None): + + """ Calculate the MD5 checksum of the disk by writing \ + data on the disk where disk_type is either root disk or data disk + @return: returns the calculated checksum""" + + random_data_0 = random_gen(size=100) + # creating checksum(MD5) + m = hashlib.md5() + m.update(random_data_0) + ckecksum_random_data_0 = m.hexdigest() + try: + ssh_client = SshClient( + virtual_machine.ssh_ip, + virtual_machine.ssh_port, + virtual_machine.username, + virtual_machine.password + ) + except Exception: + raise Exception("SSH access failed for server with IP address: %s" % + virtual_machine.ssh_ip) + + # Format partition using ext3 + + format_volume_to_ext3( + ssh_client, + service["volume_write_path"][ + virtual_machine.hypervisor][disk_type] + ) + cmds = ["fdisk -l", + "mkdir -p %s" % service["data_write_paths"]["mount_dir"], + "mount -t ext3 %s1 %s" % ( + service["volume_write_path"][ + virtual_machine.hypervisor][disk_type], + service["data_write_paths"]["mount_dir"] + ), + "mkdir -p %s/%s/%s " % ( + service["data_write_paths"]["mount_dir"], + service["data_write_paths"]["sub_dir"], + service["data_write_paths"]["sub_lvl_dir1"], + ), + "echo %s > %s/%s/%s/%s" % ( + random_data_0, + service["data_write_paths"]["mount_dir"], + service["data_write_paths"]["sub_dir"], + service["data_write_paths"]["sub_lvl_dir1"], + service["data_write_paths"]["random_data"] + ), + "cat %s/%s/%s/%s" % ( + service["data_write_paths"]["mount_dir"], + service["data_write_paths"]["sub_dir"], + service["data_write_paths"]["sub_lvl_dir1"], + service["data_write_paths"]["random_data"] + ) + ] + + for c in cmds: + ssh_client.execute(c) + + # Unmount the storage + cmds = [ + "umount %s" % (service["data_write_paths"]["mount_dir"]), + ] + + for c in cmds: + ssh_client.execute(c) + + return ckecksum_random_data_0 + + +def compareChecksum( + apiclient, + service=None, + original_checksum=None, + disk_type=None, + virt_machine=None + ): + """ + Create md5 checksum of the data present on the disk and compare + it with the given checksum + """ + if virt_machine.state != "Running": + virt_machine.start(apiclient) + + try: + # Login to VM to verify test directories and files + ssh = SshClient( + virt_machine.ssh_ip, + virt_machine.ssh_port, + virt_machine.username, + virt_machine.password + ) + except Exception: + raise Exception("SSH access failed for server with IP address: %s" % + virt_machine.ssh_ip) + + # Mount datadiskdevice_1 because this is the first data disk of the new + # virtual machine + cmds = ["blkid", + "fdisk -l", + "mkdir -p %s" % service["data_write_paths"]["mount_dir"], + "mount -t ext3 %s1 %s" % ( + service["volume_write_path"][ + virt_machine.hypervisor][disk_type], + service["data_write_paths"]["mount_dir"] + ), + ] + + for c in cmds: + ssh.execute(c) + + returned_data_0 = ssh.execute( + "cat %s/%s/%s/%s" % ( + service["data_write_paths"]["mount_dir"], + service["data_write_paths"]["sub_dir"], + service["data_write_paths"]["sub_lvl_dir1"], + service["data_write_paths"]["random_data"] + )) + + n = hashlib.md5() + n.update(returned_data_0[0]) + ckecksum_returned_data_0 = n.hexdigest() + + # Verify returned data + assert original_checksum == ckecksum_returned_data_0, \ + "Cheskum does not match with checksum of original data" + + # Unmount the Sec Storage + cmds = [ + "umount %s" % (service["data_write_paths"]["mount_dir"]), + ] + + for c in cmds: + ssh.execute(c) + + return + + + def verifyRouterState(apiclient, routerid, state, listall=True): """List router and check if the router state matches the given state""" retriesCount = 10