diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 5483331cc33..3d398ca5dd9 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -1710,7 +1710,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } } catch (final Throwable th) { - logger.warn("Caught: ", th); + logger.error("Caught: ", th); answer = new Answer(cmd, false, th.getMessage()); } answers[i] = answer; @@ -1725,7 +1725,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send response because connection is closed: {}", response); + logger.error("Unable to send response because connection is closed: {}", response); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index ba50d5ff9fa..7af9b6b8492 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -184,6 +184,7 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDetailsDao; +import com.cloud.vm.dao.VMInstanceDao; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { @@ -270,6 +271,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati ConfigDepot configDepot; @Inject ConfigurationDao configurationDao; + @Inject + VMInstanceDao vmInstanceDao; @Inject protected SnapshotHelper snapshotHelper; @@ -972,9 +975,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati // Create event and update resource count for volumes if vm is a user vm if (vm.getType() == VirtualMachine.Type.User) { - Long offeringId = null; - if (!offering.isComputeOnly()) { offeringId = offering.getId(); } @@ -1943,14 +1944,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (newSize != vol.getSize()) { DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId()); - if (newSize > vol.getSize()) { - _resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()), - vol.isDisplay(), newSize - vol.getSize(), diskOffering); - _resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(), - newSize - vol.getSize(), diskOffering); - } else { - _resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(), - vol.getSize() - newSize, diskOffering); + VMInstanceVO vm = vol.getInstanceId() != null ? vmInstanceDao.findById(vol.getInstanceId()) : null; + if (vm == null || vm.getType() == VirtualMachine.Type.User) { + // Update resource count for user vm volumes when volume is attached + if (newSize > vol.getSize()) { + _resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()), + vol.isDisplay(), newSize - vol.getSize(), diskOffering); + _resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(), + newSize - vol.getSize(), diskOffering); + } else { + _resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(), + vol.getSize() - newSize, diskOffering); + } } vol.setSize(newSize); _volsDao.persist(vol); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 11d7aa30bb8..a86efeb8a1f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -3473,7 +3473,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } if (vmSpec.getOs().toLowerCase().contains("window")) { - isWindowsTemplate =true; + isWindowsTemplate = true; } for (final DiskTO volume : disks) { KVMPhysicalDisk physicalDisk = null; @@ -3592,6 +3592,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(), pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW); } else if (pool.getType() == StoragePoolType.PowerFlex) { + if (isWindowsTemplate && isUefiEnabled) { + diskBusTypeData = DiskDef.DiskBus.SATA; + } disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData); if (physicalDisk.getFormat().equals(PhysicalDiskFormat.QCOW2)) { disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2); @@ -3622,7 +3625,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2); } } - } pool.customizeLibvirtDiskDef(disk); } @@ -4911,6 +4913,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return token[1]; } } else if (token.length > 3) { + // for powerflex/scaleio, path = /dev/disk/by-id/emc-vol-2202eefc4692120f-540fd8fa00000003 + if (token.length > 4 && StringUtils.isNotBlank(token[4]) && token[4].startsWith("emc-vol-")) { + final String[] emcVolToken = token[4].split("-"); + if (emcVolToken.length == 4) { + return emcVolToken[3]; + } + } + // for example, path = /mnt/pool_uuid/disk_path/ return token[3]; } diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index b2fc033268c..d2336f3cd58 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -235,7 +235,7 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements @Override public ListResponse searchForVolumeMetricsStats(ListVolumesUsageHistoryCmd cmd) { Pair, Integer> volumeList = searchForVolumesInternal(cmd); - Map> volumeStatsList = searchForVolumeMetricsStatsInternal(cmd, volumeList.first()); + Map> volumeStatsList = searchForVolumeMetricsStatsInternal(cmd, volumeList.first()); return createVolumeMetricsStatsResponse(volumeList, volumeStatsList); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index d68d34ce1c0..20ca292f9ec 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -573,8 +573,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } } else { - logger.debug("No encryption configured for data volume [id: {}, uuid: {}, name: {}]", - volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName()); + logger.debug("No encryption configured for volume [id: {}, uuid: {}, name: {}]", + volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName()); } return answer; @@ -1592,7 +1592,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { * @return true if resize is required */ private boolean needsExpansionForEncryptionHeader(long srcSize, long dstSize) { - int headerSize = 32<<20; // ensure we have 32MiB for encryption header + int headerSize = 32 << 20; // ensure we have 32MiB for encryption header return srcSize + headerSize > dstSize; } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java index c13ad61a6cd..3d7d1cf279c 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java @@ -61,6 +61,15 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { private Logger logger = LogManager.getLogger(getClass()); + static ConfigKey ConnectOnDemand = new ConfigKey<>("Storage", + Boolean.class, + "powerflex.connect.on.demand", + Boolean.TRUE.toString(), + "Connect PowerFlex client on Host when first Volume is mapped to SDC and disconnect when last Volume is unmapped from SDC," + + " otherwise no action (that is connection remains in the same state whichever it is, connected or disconnected).", + Boolean.TRUE, + ConfigKey.Scope.Zone); + @Inject AgentManager agentManager; @Inject diff --git a/scripts/util/keystore-cert-import b/scripts/util/keystore-cert-import index a7523ca51e2..a9465f273a3 100755 --- a/scripts/util/keystore-cert-import +++ b/scripts/util/keystore-cert-import @@ -122,7 +122,7 @@ if [ -f "$LIBVIRTD_FILE" ]; then ln -sf /etc/pki/libvirt/private/serverkey.pem /etc/pki/libvirt-vnc/server-key.pem cloudstack-setup-agent -s > /dev/null - QEMU_GROUP=$(sed -n 's/^group=//p' /etc/libvirt/qemu.conf | awk -F'"' '{print $2}' | tail -n1) + QEMU_GROUP=$(sed -n 's/^group\s*=//p' /etc/libvirt/qemu.conf | tr -d '"' | tr -d ' ' | tr -d "'" | tail -n1) if [ ! -z "${QEMU_GROUP// }" ]; then chgrp $QEMU_GROUP /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem chmod 750 /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem diff --git a/server/conf/cloudstack-sudoers.in b/server/conf/cloudstack-sudoers.in index 710241022f5..6e799297574 100644 --- a/server/conf/cloudstack-sudoers.in +++ b/server/conf/cloudstack-sudoers.in @@ -18,7 +18,7 @@ # The CloudStack management server needs sudo permissions # without a password. -Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img +Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img, /usr/bin/qemu-img Defaults:@MSUSER@ !requiretty diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index e82d99028d7..70c95bdc70e 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1462,7 +1462,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc for (VmDiskStats vmDiskStat : vmDiskStats) { VmDiskStatsEntry vmDiskStatEntry = (VmDiskStatsEntry)vmDiskStat; SearchCriteria sc_volume = _volsDao.createSearchCriteria(); - sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStatEntry.getPath()); + sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStatEntry.getPath() + "%"); List volumes = _volsDao.search(sc_volume, null); if (CollectionUtils.isEmpty(volumes)) diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index e24c6db604d..a27885527d4 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1965,9 +1965,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement Type snapshotType = getSnapshotType(policyId); Account owner = _accountMgr.getAccount(volume.getAccountId()); + ResourceType storeResourceType = ResourceType.secondary_storage; + if (!isBackupSnapshotToSecondaryForZone(volume.getDataCenterId()) || + Snapshot.LocationType.PRIMARY.equals(locationType)) { + storeResourceType = ResourceType.primary_storage; + } try { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot); - _resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue()); + _resourceLimitMgr.checkResourceLimit(owner, storeResourceType, volume.getSize()); } catch (ResourceAllocationException e) { if (snapshotType != Type.MANUAL) { String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner); @@ -2018,7 +2023,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid()); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot); - _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, new Long(volume.getSize())); + _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), storeResourceType, volume.getSize()); return snapshot; } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 26072971f43..cff0b15f9dd 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -678,7 +678,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir "Wait Interval (in seconds) for shared network vm dhcp ip addr fetch for next iteration ", true); private static final ConfigKey VmIpFetchTrialMax = new ConfigKey("Advanced", Integer.class, "externaldhcp.vmip.max.retry", "10", - "The max number of retrieval times for shared entwork vm dhcp ip fetch, in case of failures", true); + "The max number of retrieval times for shared network vm dhcp ip fetch, in case of failures", true); private static final ConfigKey VmIpFetchThreadPoolMax = new ConfigKey("Advanced", Integer.class, "externaldhcp.vmipFetch.threadPool.max", "10", "number of threads for fetching vms ip address", true); @@ -2705,7 +2705,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vmIdAndCount.getRetrievalCount() <= 0) { vmIdCountMap.remove(nicId); - logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map "); + logger.debug("Vm {} nic {} count is zero .. removing vm nic from map ", vmId, nicId); ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH, @@ -2714,12 +2714,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir continue; } - UserVm userVm = _vmDao.findById(vmId); VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); NicVO nicVo = _nicDao.findById(nicId); - NetworkVO network = _networkDao.findById(nicVo.getNetworkId()); + if (ObjectUtils.anyNull(userVm, vmInstance, nicVo)) { + logger.warn("Couldn't fetch ip addr, Vm {} or nic {} doesn't exists", vmId, nicId); + continue; + } + NetworkVO network = _networkDao.findById(nicVo.getNetworkId()); VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(userVm); VirtualMachine vm = vmProfile.getVirtualMachine(); boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); @@ -5984,7 +5987,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir for (VmDiskStatsEntry vmDiskStat : vmDiskStats) { SearchCriteria sc_volume = _volsDao.createSearchCriteria(); - sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath()); + sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStat.getPath() + "%"); List volumes = _volsDao.search(sc_volume, null); if ((volumes == null) || (volumes.size() == 0)) { break; diff --git a/test/integration/smoke/test_deploy_vm_root_resize.py b/test/integration/smoke/test_deploy_vm_root_resize.py index 1ef5d7d6ea6..b9d14e5bdca 100644 --- a/test/integration/smoke/test_deploy_vm_root_resize.py +++ b/test/integration/smoke/test_deploy_vm_root_resize.py @@ -32,6 +32,7 @@ from marvin.codes import FAILED, INVALID_INPUT, PASS,\ RESOURCE_PRIMARY_STORAGE from nose.plugins.attrib import attr from marvin.sshClient import SshClient +import math import time import re from marvin.cloudstackAPI import updateTemplate,registerTemplate @@ -276,6 +277,14 @@ class TestDeployVmRootSize(cloudstackTestCase): self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list VM " "response") rootvolume = list_volume_response[0] + list_volume_pool_response = list_storage_pools( + self.apiclient, + id=rootvolume.storageid + ) + rootvolume_pool = list_volume_pool_response[0] + if rootvolume_pool.type.lower() == "powerflex": + newrootsize = (int(math.ceil(newrootsize / 8) * 8)) + success = False if rootvolume is not None and rootvolume.size == (newrootsize << 30): success = True diff --git a/test/integration/smoke/test_import_unmanage_volumes.py b/test/integration/smoke/test_import_unmanage_volumes.py index 9001e97a79e..fc1c558d70f 100644 --- a/test/integration/smoke/test_import_unmanage_volumes.py +++ b/test/integration/smoke/test_import_unmanage_volumes.py @@ -26,7 +26,11 @@ from marvin.lib.base import (Account, ServiceOffering, DiskOffering, VirtualMachine) -from marvin.lib.common import (get_domain, get_zone, get_suitable_test_template) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + list_volumes, + list_storage_pools) # Import System modules from nose.plugins.attrib import attr @@ -107,6 +111,22 @@ class TestImportAndUnmanageVolumes(cloudstackTestCase): def test_01_detach_unmanage_import_volume(self): """Test attach/detach/unmanage/import volume """ + + volumes = list_volumes( + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True + ) + volume = volumes[0] + volume_pool_response = list_storage_pools( + self.apiclient, + id=volume.storageid + ) + volume_pool = volume_pool_response[0] + if volume_pool.type.lower() == "powerflex": + self.skipTest("This test is not supported for storage pool type %s on hypervisor KVM" % volume_pool.type) + # Create DATA volume volume = Volume.create( self.apiclient, diff --git a/test/integration/smoke/test_over_provisioning.py b/test/integration/smoke/test_over_provisioning.py index 94e4096b1ef..c2b1a5ac205 100644 --- a/test/integration/smoke/test_over_provisioning.py +++ b/test/integration/smoke/test_over_provisioning.py @@ -60,9 +60,10 @@ class TestUpdateOverProvision(cloudstackTestCase): "The environment don't have storage pools required for test") for pool in storage_pools: - if pool.type == "NetworkFilesystem" or pool.type == "VMFS": + if pool.type == "NetworkFilesystem" or pool.type == "VMFS" or pool.type == "PowerFlex": break - if pool.type != "NetworkFilesystem" and pool.type != "VMFS": + + if pool.type != "NetworkFilesystem" and pool.type != "VMFS" and pool.type != "PowerFlex": raise self.skipTest("Storage overprovisioning currently not supported on " + pool.type + " pools") self.poolId = pool.id @@ -101,6 +102,9 @@ class TestUpdateOverProvision(cloudstackTestCase): """Reset the storage.overprovisioning.factor back to its original value @return: """ + if not hasattr(self, 'poolId'): + return + storage_pools = StoragePool.list( self.apiClient, id = self.poolId diff --git a/test/integration/smoke/test_restore_vm.py b/test/integration/smoke/test_restore_vm.py index 3798bef852a..b961bee39f2 100644 --- a/test/integration/smoke/test_restore_vm.py +++ b/test/integration/smoke/test_restore_vm.py @@ -16,10 +16,13 @@ # under the License. """ P1 tests for Scaling up Vm """ + +import math + # Import Local Modules from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.base import (VirtualMachine, Volume, DiskOffering, ServiceOffering, Template) -from marvin.lib.common import (get_zone, get_domain) +from marvin.lib.common import (get_zone, get_domain, list_storage_pools) from nose.plugins.attrib import attr _multiprocess_shared_ = True @@ -78,8 +81,13 @@ class TestRestoreVM(cloudstackTestCase): self._cleanup.append(virtual_machine) old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0] + old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid) + old_root_vol_pool = old_root_vol_pool_res[0] + expected_old_root_vol_size = self.template_t1.size + if old_root_vol_pool.type.lower() == "powerflex": + expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3) self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state") - self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match") + self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match") virtual_machine.restore(self.apiclient, self.template_t2.id, expunge=True) @@ -88,8 +96,13 @@ class TestRestoreVM(cloudstackTestCase): self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect") root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0] + root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid) + root_vol_pool = root_vol_pool_res[0] + expected_root_vol_size = self.template_t2.size + if root_vol_pool.type.lower() == "powerflex": + expected_root_vol_size = (int(math.ceil((expected_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3) self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state") - self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume and template should match") + self.assertEqual(root_vol.size, expected_root_vol_size, "Size of volume and template should match") old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id) self.assertEqual(old_root_vol, None, "Old volume should be deleted") @@ -105,8 +118,13 @@ class TestRestoreVM(cloudstackTestCase): self._cleanup.append(virtual_machine) old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0] + old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid) + old_root_vol_pool = old_root_vol_pool_res[0] + expected_old_root_vol_size = self.template_t1.size + if old_root_vol_pool.type.lower() == "powerflex": + expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3) self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state") - self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match") + self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match") virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, expunge=True) @@ -115,9 +133,14 @@ class TestRestoreVM(cloudstackTestCase): self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect") root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0] + root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid) + root_vol_pool = root_vol_pool_res[0] + expected_root_vol_size = self.disk_offering.disksize + if root_vol_pool.type.lower() == "powerflex": + expected_root_vol_size = (int(math.ceil(expected_root_vol_size / 8) * 8)) self.assertEqual(root_vol.diskofferingid, self.disk_offering.id, "Disk offering id should match") self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state") - self.assertEqual(root_vol.size, self.disk_offering.disksize * 1024 * 1024 * 1024, + self.assertEqual(root_vol.size, expected_root_vol_size * 1024 * 1024 * 1024, "Size of volume and disk offering should match") old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id) @@ -134,8 +157,13 @@ class TestRestoreVM(cloudstackTestCase): self._cleanup.append(virtual_machine) old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0] + old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid) + old_root_vol_pool = old_root_vol_pool_res[0] + expected_old_root_vol_size = self.template_t1.size + if old_root_vol_pool.type.lower() == "powerflex": + expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3) self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state") - self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match") + self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match") virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, rootdisksize=16) diff --git a/test/integration/smoke/test_sharedfs_lifecycle.py b/test/integration/smoke/test_sharedfs_lifecycle.py index f4b2c2fc593..4daf0d7696a 100644 --- a/test/integration/smoke/test_sharedfs_lifecycle.py +++ b/test/integration/smoke/test_sharedfs_lifecycle.py @@ -38,7 +38,8 @@ from marvin.lib.base import (Account, ) from marvin.lib.common import (get_domain, get_zone, - get_template) + get_template, + list_storage_pools) from marvin.codes import FAILED from marvin.lib.decoratorGenerators import skipTestIf @@ -258,15 +259,23 @@ class TestSharedFSLifecycle(cloudstackTestCase): def test_resize_shared_fs(self): """Resize the shared filesystem by changing the disk offering and validate """ + sharedfs_pool_response = list_storage_pools(self.apiclient, id=self.sharedfs.storageid) + sharedfs_pool = sharedfs_pool_response[0] + self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs) result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0] self.debug(result) size = result.split()[-5] self.debug("Size of the filesystem is " + size) - self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G") + if sharedfs_pool.type.lower() == "powerflex": + self.assertEqual(size, "8.0G", "SharedFS size should be 8.0G") + new_size = 9 + else: + self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G") + new_size = 3 response = SharedFS.stop(self.sharedfs, self.apiclient) - response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, 3) + response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, new_size) self.debug(response) response = SharedFS.start(self.sharedfs, self.apiclient) time.sleep(10) @@ -274,4 +283,7 @@ class TestSharedFSLifecycle(cloudstackTestCase): result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0] size = result.split()[-5] self.debug("Size of the filesystem is " + size) - self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G") + if sharedfs_pool.type.lower() == "powerflex": + self.assertEqual(size, "16G", "SharedFS size should be 16G") + else: + self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G") diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index f8346093c64..b1a2569d969 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -18,8 +18,10 @@ from marvin.codes import FAILED from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackException import CloudstackAPIException from marvin.lib.utils import (cleanup_resources, is_snapshot_on_nfs, + is_snapshot_on_powerflex, validateList) from marvin.lib.base import (VirtualMachine, Account, @@ -146,10 +148,16 @@ class TestSnapshotRootDisk(cloudstackTestCase): type='ROOT', listall=True ) + volume = volumes[0] + volume_pool_response = list_storage_pools( + self.apiclient, + id=volume.storageid + ) + volume_pool = volume_pool_response[0] snapshot = Snapshot.create( self.apiclient, - volumes[0].id, + volume.id, account=self.account.name, domainid=self.account.domainid ) @@ -209,6 +217,11 @@ class TestSnapshotRootDisk(cloudstackTestCase): "Check if backup_snap_id is not null" ) + if volume_pool.type.lower() == "powerflex": + self.assertTrue(is_snapshot_on_powerflex( + self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) + return + self.assertTrue(is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return @@ -246,6 +259,11 @@ class TestSnapshotRootDisk(cloudstackTestCase): PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id + volume_pool_response = list_storage_pools(self.apiclient, + id=vol_res[0].storageid) + volume_pool = volume_pool_response[0] + if volume_pool.type.lower() != 'networkfilesystem': + self.skipTest("This test is not supported for volume created on storage pool type %s" % volume_pool.type) clusters = list_clusters( self.apiclient, zoneid=self.zone.id @@ -437,15 +455,16 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase): ) cls._cleanup.append(cls.virtual_machine) - volumes =Volume.list( + volumes = Volume.list( cls.userapiclient, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) + cls.volume = volumes[0] cls.snapshot = Snapshot.create( cls.userapiclient, - volumes[0].id, + cls.volume.id, account=cls.account.name, domainid=cls.account.domainid ) @@ -475,13 +494,28 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase): """Test creating volume from snapshot """ self.services['volume_from_snapshot']['zoneid'] = self.zone.id - self.volume_from_snap = Volume.create_from_snapshot( - self.userapiclient, - snapshot_id=self.snapshot.id, - services=self.services["volume_from_snapshot"], - account=self.account.name, - domainid=self.account.domainid + snapshot_volume_pool_response = list_storage_pools( + self.apiclient, + id=self.volume.storageid ) + snapshot_volume_pool = snapshot_volume_pool_response[0] + try: + self.volume_from_snap = Volume.create_from_snapshot( + self.userapiclient, + snapshot_id=self.snapshot.id, + services=self.services["volume_from_snapshot"], + account=self.account.name, + domainid=self.account.domainid + ) + except CloudstackAPIException as cs: + self.debug(cs.errorMsg) + if snapshot_volume_pool.type.lower() == "powerflex": + self.assertTrue( + cs.errorMsg.find("Create volume from snapshot is not supported for PowerFlex volume snapshots") > 0, + msg="Other than unsupported error while creating volume from snapshot for volume on PowerFlex pool") + return + self.fail("Failed to create volume from snapshot: %s" % cs) + self.cleanup.append(self.volume_from_snap) self.assertEqual( diff --git a/test/integration/smoke/test_usage.py b/test/integration/smoke/test_usage.py index a65e4917a46..fef0d8fe3c1 100644 --- a/test/integration/smoke/test_usage.py +++ b/test/integration/smoke/test_usage.py @@ -40,6 +40,7 @@ from marvin.lib.base import (Account, from marvin.lib.common import (get_zone, get_domain, get_suitable_test_template, + list_storage_pools, find_storage_pool_type) @@ -611,17 +612,17 @@ class TestVolumeUsage(cloudstackTestCase): except Exception as e: self.fail("Failed to stop instance: %s" % e) - volume_response = Volume.list( + data_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertEqual( - isinstance(volume_response, list), + isinstance(data_volume_response, list), True, "Check for valid list volumes response" ) - data_volume = volume_response[0] + data_volume = data_volume_response[0] # Detach data Disk self.debug("Detaching volume ID: %s VM with ID: %s" % ( @@ -769,7 +770,25 @@ class TestVolumeUsage(cloudstackTestCase): "Running", "VM state should be running after deployment" ) - self.virtual_machine.attach_volume(self.apiclient,volume_uploaded) + root_volume_response = Volume.list( + self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True) + root_volume = root_volume_response[0] + rool_volume_pool_response = list_storage_pools( + self.apiclient, + id=root_volume.storageid + ) + rool_volume_pool = rool_volume_pool_response[0] + try: + self.virtual_machine.attach_volume(self.apiclient,volume_uploaded) + except Exception as e: + self.debug("Exception %s: " % e) + if rool_volume_pool.type.lower() == "powerflex" and "this operation is unsupported on storage pool type PowerFlex" in str(e): + return + self.fail(e) + self.debug("select type from usage_event where offering_id = 6 and volume_id = '%s';" % volume_id) diff --git a/test/integration/smoke/test_vm_autoscaling.py b/test/integration/smoke/test_vm_autoscaling.py index 7ae61ce57da..782d2bce3ad 100644 --- a/test/integration/smoke/test_vm_autoscaling.py +++ b/test/integration/smoke/test_vm_autoscaling.py @@ -22,6 +22,7 @@ Tests of VM Autoscaling import logging import time import datetime +import math from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase @@ -53,7 +54,8 @@ from marvin.lib.base import (Account, from marvin.lib.common import (get_domain, get_zone, - get_template) + get_template, + list_storage_pools) from marvin.lib.utils import wait_until MIN_MEMBER = 1 @@ -466,8 +468,10 @@ class TestVmAutoScaling(cloudstackTestCase): def verifyVmProfile(self, vm, autoscalevmprofileid, networkid=None, projectid=None): self.message("Verifying profiles of new VM %s (%s)" % (vm.name, vm.id)) datadisksizeInBytes = None + datadiskpoolid = None diskofferingid = None rootdisksizeInBytes = None + rootdiskpoolid = None sshkeypairs = None affinitygroupIdsArray = [] @@ -496,10 +500,24 @@ class TestVmAutoScaling(cloudstackTestCase): for volume in volumes: if volume.type == 'ROOT': rootdisksizeInBytes = volume.size + rootdiskpoolid = volume.storageid elif volume.type == 'DATADISK': datadisksizeInBytes = volume.size + datadiskpoolid = volume.storageid diskofferingid = volume.diskofferingid + rootdisk_pool_response = list_storage_pools( + self.apiclient, + id=rootdiskpoolid + ) + rootdisk_pool = rootdisk_pool_response[0] + + datadisk_pool_response = list_storage_pools( + self.apiclient, + id=datadiskpoolid + ) + datadisk_pool = datadisk_pool_response[0] + vmprofiles_list = AutoScaleVmProfile.list( self.regular_user_apiclient, listall=True, @@ -522,18 +540,26 @@ class TestVmAutoScaling(cloudstackTestCase): self.assertEquals(templateid, vmprofile.templateid) self.assertEquals(serviceofferingid, vmprofile.serviceofferingid) + rootdisksize = None if vmprofile_otherdeployparams.rootdisksize: - self.assertEquals(int(rootdisksizeInBytes), int(vmprofile_otherdeployparams.rootdisksize) * (1024 ** 3)) + rootdisksize = int(vmprofile_otherdeployparams.rootdisksize) elif vmprofile_otherdeployparams.overridediskofferingid: self.assertEquals(vmprofile_otherdeployparams.overridediskofferingid, self.disk_offering_override.id) - self.assertEquals(int(rootdisksizeInBytes), int(self.disk_offering_override.disksize) * (1024 ** 3)) + rootdisksize = int(self.disk_offering_override.disksize) else: - self.assertEquals(int(rootdisksizeInBytes), int(self.templatesize) * (1024 ** 3)) + rootdisksize = int(self.templatesize) + + if rootdisk_pool.type.lower() == "powerflex": + rootdisksize = (int(math.ceil(rootdisksize / 8) * 8)) + self.assertEquals(int(rootdisksizeInBytes), rootdisksize * (1024 ** 3)) if vmprofile_otherdeployparams.diskofferingid: self.assertEquals(diskofferingid, vmprofile_otherdeployparams.diskofferingid) if vmprofile_otherdeployparams.disksize: - self.assertEquals(int(datadisksizeInBytes), int(vmprofile_otherdeployparams.disksize) * (1024 ** 3)) + datadisksize = int(vmprofile_otherdeployparams.disksize) + if datadisk_pool.type.lower() == "powerflex": + datadisksize = (int(math.ceil(datadisksize / 8) * 8)) + self.assertEquals(int(datadisksizeInBytes), datadisksize * (1024 ** 3)) if vmprofile_otherdeployparams.keypairs: self.assertEquals(sshkeypairs, vmprofile_otherdeployparams.keypairs) diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index c7c9a01bd32..8df0b994a55 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -1710,8 +1710,8 @@ class TestKVMLiveMigration(cloudstackTestCase): def get_target_pool(self, volid): target_pools = StoragePool.listForMigration(self.apiclient, id=volid) - if len(target_pools) < 1: - self.skipTest("Not enough storage pools found") + if target_pools is None or len(target_pools) == 0: + self.skipTest("Not enough storage pools found for migration") return target_pools[0] diff --git a/test/integration/smoke/test_vm_snapshot_kvm.py b/test/integration/smoke/test_vm_snapshot_kvm.py index 5c133f6e762..9dd7c529de5 100644 --- a/test/integration/smoke/test_vm_snapshot_kvm.py +++ b/test/integration/smoke/test_vm_snapshot_kvm.py @@ -77,6 +77,18 @@ class TestVmSnapshot(cloudstackTestCase): Configurations.update(cls.apiclient, name = "kvm.vmstoragesnapshot.enabled", value = "true") + + cls.services["domainid"] = cls.domain.id + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["zoneid"] = cls.zone.id + + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + #The version of CentOS has to be supported templ = { "name": "CentOS8", @@ -91,36 +103,33 @@ class TestVmSnapshot(cloudstackTestCase): "directdownload": True, } - template = Template.register(cls.apiclient, templ, zoneid=cls.zone.id, hypervisor=cls.hypervisor) + template = Template.register( + cls.apiclient, + templ, + zoneid=cls.zone.id, + account=cls.account.name, + domainid=cls.account.domainid, + hypervisor=cls.hypervisor + ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] - cls.services["domainid"] = cls.domain.id - cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid - cls.services["zoneid"] = cls.zone.id - cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=cls.domain.id - ) - cls._cleanup.append(cls.account) - - service_offerings_nfs = { + service_offering_nfs = { "name": "nfs", - "displaytext": "nfs", - "cpunumber": 1, - "cpuspeed": 500, - "memory": 512, - "storagetype": "shared", - "customizediops": False, - } + "displaytext": "nfs", + "cpunumber": 1, + "cpuspeed": 500, + "memory": 512, + "storagetype": "shared", + "customizediops": False, + } cls.service_offering = ServiceOffering.create( cls.apiclient, - service_offerings_nfs, + service_offering_nfs, ) cls._cleanup.append(cls.service_offering) @@ -138,7 +147,7 @@ class TestVmSnapshot(cloudstackTestCase): rootdisksize=20, ) cls.random_data_0 = random_gen(size=100) - cls.test_dir = "/tmp" + cls.test_dir = "$HOME" cls.random_data = "random.data" return @@ -201,8 +210,8 @@ class TestVmSnapshot(cloudstackTestCase): self.apiclient, self.virtual_machine.id, MemorySnapshot, - "TestSnapshot", - "Display Text" + "TestVmSnapshot", + "Test VM Snapshot" ) self.assertEqual( vm_snapshot.state, @@ -269,6 +278,8 @@ class TestVmSnapshot(cloudstackTestCase): self.virtual_machine.start(self.apiclient) + time.sleep(30) + try: ssh_client = self.virtual_machine.get_ssh_client(reconnect=True) @@ -288,7 +299,7 @@ class TestVmSnapshot(cloudstackTestCase): self.assertEqual( self.random_data_0, result[0], - "Check the random data is equal with the ramdom file!" + "Check the random data is equal with the random file!" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @@ -320,7 +331,7 @@ class TestVmSnapshot(cloudstackTestCase): list_snapshot_response = VmSnapshot.list( self.apiclient, virtualmachineid=self.virtual_machine.id, - listall=False) + listall=True) self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response) self.assertIsNone(list_snapshot_response, "snapshot is already deleted") diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py index 07779e78c58..8c106f05a9f 100644 --- a/test/integration/smoke/test_vm_snapshots.py +++ b/test/integration/smoke/test_vm_snapshots.py @@ -27,7 +27,9 @@ from marvin.lib.base import (Account, from marvin.lib.common import (get_zone, get_domain, get_suitable_test_template, + list_volumes, list_snapshots, + list_storage_pools, list_virtual_machines) import time @@ -87,6 +89,18 @@ class TestVmSnapshot(cloudstackTestCase): serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) + volumes = list_volumes( + cls.apiclient, + virtualmachineid=cls.virtual_machine.id, + type='ROOT', + listall=True + ) + volume = volumes[0] + volume_pool_response = list_storage_pools( + cls.apiclient, + id=volume.storageid + ) + cls.volume_pool = volume_pool_response[0] cls.random_data_0 = random_gen(size=100) cls.test_dir = "$HOME" cls.random_data = "random.data" @@ -146,15 +160,15 @@ class TestVmSnapshot(cloudstackTestCase): #KVM VM Snapshot needs to set snapshot with memory MemorySnapshot = False - if self.hypervisor.lower() in (KVM.lower()): + if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex": MemorySnapshot = True vm_snapshot = VmSnapshot.create( self.apiclient, self.virtual_machine.id, MemorySnapshot, - "TestSnapshot", - "Display Text" + "TestVmSnapshot", + "Test VM Snapshot" ) self.assertEqual( vm_snapshot.state, @@ -214,7 +228,7 @@ class TestVmSnapshot(cloudstackTestCase): ) #We don't need to stop the VM when taking a VM Snapshot on KVM - if self.hypervisor.lower() in (KVM.lower()): + if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex": pass else: self.virtual_machine.stop(self.apiclient) @@ -224,7 +238,7 @@ class TestVmSnapshot(cloudstackTestCase): list_snapshot_response[0].id) #We don't need to start the VM when taking a VM Snapshot on KVM - if self.hypervisor.lower() in (KVM.lower()): + if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex": pass else: self.virtual_machine.start(self.apiclient) diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 28a029adf70..6cf3f082bc2 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -19,6 +19,7 @@ import os import tempfile import time +import math import unittest import urllib.error import urllib.parse @@ -42,6 +43,7 @@ from marvin.lib.common import (get_domain, get_zone, find_storage_pool_type, get_pod, + list_storage_pools, list_disk_offering) from marvin.lib.utils import (cleanup_resources, checkVolumeSize) from marvin.lib.utils import (format_volume_to_ext3, @@ -235,7 +237,6 @@ class TestCreateVolume(cloudstackTestCase): "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 - vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client( reconnect=True ) @@ -243,6 +244,7 @@ class TestCreateVolume(cloudstackTestCase): list_volume_response = Volume.list( self.apiClient, id=volume.id) + vol_sz = str(list_volume_response[0].size) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower(): volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) @@ -533,6 +535,17 @@ class TestVolumes(cloudstackTestCase): # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) + list_volume_response = Volume.list( + self.apiClient, + id=self.volume.id + ) + volume = list_volume_response[0] + + list_volume_pool_response = list_storage_pools(self.apiClient, id=volume.storageid) + volume_pool = list_volume_pool_response[0] + if volume_pool.type.lower() == "powerflex": + self.skipTest("Extract volume operation is unsupported for volumes on storage pool type %s" % volume_pool.type) + cmd = extractVolume.extractVolumeCmd() cmd.id = self.volume.id cmd.mode = "HTTP_DOWNLOAD" @@ -658,7 +671,15 @@ class TestVolumes(cloudstackTestCase): type='DATADISK' ) for vol in list_volume_response: - if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready': + list_volume_pool_response = list_storage_pools( + self.apiClient, + id=vol.storageid + ) + volume_pool = list_volume_pool_response[0] + disksize = (int(disk_offering_20_GB.disksize)) + if volume_pool.type.lower() == "powerflex": + disksize = (int(math.ceil(disksize / 8) * 8)) + if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready': success = True if success: break @@ -925,7 +946,15 @@ class TestVolumes(cloudstackTestCase): type='DATADISK' ) for vol in list_volume_response: - if vol.id == self.volume.id and int(vol.size) == (20 * (1024 ** 3)) and vol.state == 'Ready': + list_volume_pool_response = list_storage_pools( + self.apiClient, + id=vol.storageid + ) + volume_pool = list_volume_pool_response[0] + disksize = 20 + if volume_pool.type.lower() == "powerflex": + disksize = (int(math.ceil(disksize / 8) * 8)) + if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready': success = True if success: break @@ -1283,7 +1312,6 @@ class TestVolumeEncryption(cloudstackTestCase): "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 - vol_sz = str(list_volume_response[0].size) ssh = virtual_machine.get_ssh_client( reconnect=True ) @@ -1292,6 +1320,7 @@ class TestVolumeEncryption(cloudstackTestCase): list_volume_response = Volume.list( self.apiclient, id=volume.id) + vol_sz = str(list_volume_response[0].size) volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) @@ -1410,7 +1439,6 @@ class TestVolumeEncryption(cloudstackTestCase): "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 - vol_sz = str(list_volume_response[0].size) ssh = virtual_machine.get_ssh_client( reconnect=True ) @@ -1419,6 +1447,12 @@ class TestVolumeEncryption(cloudstackTestCase): list_volume_response = Volume.list( self.apiclient, id=volume.id) + vol_sz = str(list_volume_response[0].size) + list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid) + volume_pool = list_volume_pool_response[0] + if volume_pool.type.lower() == "powerflex": + vol_sz = int(vol_sz) + vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704)) volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) @@ -1543,7 +1577,6 @@ class TestVolumeEncryption(cloudstackTestCase): "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 - vol_sz = str(list_volume_response[0].size) ssh = virtual_machine.get_ssh_client( reconnect=True ) @@ -1552,6 +1585,12 @@ class TestVolumeEncryption(cloudstackTestCase): list_volume_response = Volume.list( self.apiclient, id=volume.id) + vol_sz = str(list_volume_response[0].size) + list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid) + volume_pool = list_volume_pool_response[0] + if volume_pool.type.lower() == "powerflex": + vol_sz = int(vol_sz) + vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704)) volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) diff --git a/tools/marvin/marvin/lib/utils.py b/tools/marvin/marvin/lib/utils.py index f80eccf1159..c822a587dfc 100644 --- a/tools/marvin/marvin/lib/utils.py +++ b/tools/marvin/marvin/lib/utils.py @@ -300,12 +300,63 @@ def get_hypervisor_version(apiclient): assert hosts_list_validation_result[0] == PASS, "host list validation failed" return hosts_list_validation_result[1].hypervisorversion +def is_snapshot_on_powerflex(apiclient, dbconn, config, zoneid, snapshotid): + """ + Checks whether a snapshot with id (not UUID) `snapshotid` is present on the powerflex storage + + @param apiclient: api client connection + @param dbconn: connection to the cloudstack db + @param config: marvin configuration file + @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted + @param snapshotid: uuid of the snapshot + @return: True if snapshot is found, False otherwise + """ + + qresultset = dbconn.execute( + "SELECT id FROM snapshots WHERE uuid = '%s';" \ + % str(snapshotid) + ) + if len(qresultset) == 0: + raise Exception( + "No snapshot found in cloudstack with id %s" % snapshotid) + + + snapshotid = qresultset[0][0] + qresultset = dbconn.execute( + "SELECT install_path, store_id FROM snapshot_store_ref WHERE snapshot_id='%s' AND store_role='Primary';" % snapshotid + ) + + assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid + + if len(qresultset) == 0: + #Snapshot does not exist + return False + + from .base import StoragePool + #pass store_id to get the exact storage pool where snapshot is stored + primaryStores = StoragePool.list(apiclient, zoneid=zoneid, id=int(qresultset[0][1])) + + assert isinstance(primaryStores, list), "Not a valid response for listStoragePools" + assert len(primaryStores) != 0, "No storage pools found in zone %s" % zoneid + + primaryStore = primaryStores[0] + + if str(primaryStore.provider).lower() != "powerflex": + raise Exception( + "is_snapshot_on_powerflex works only against powerflex storage pool. found %s" % str(primaryStore.provider)) + + snapshotPath = str(qresultset[0][0]) + if not snapshotPath: + return False + + return True + def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid): """ Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage @param apiclient: api client connection - @param @dbconn: connection to the cloudstack db + @param dbconn: connection to the cloudstack db @param config: marvin configuration file @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted @param snapshotid: uuid of the snapshot diff --git a/ui/src/components/view/ResourceView.vue b/ui/src/components/view/ResourceView.vue index 17d2a13e157..4359afd7868 100644 --- a/ui/src/components/view/ResourceView.vue +++ b/ui/src/components/view/ResourceView.vue @@ -33,17 +33,17 @@ :is="tabs[0].component" :resource="resource" :loading="loading" - :tab="tabs[0].name" /> + :tab="tabName(tabs[0])" /> -