mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.20'
This commit is contained in:
commit
ada750e391
@ -1710,7 +1710,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (final Throwable th) {
|
} catch (final Throwable th) {
|
||||||
logger.warn("Caught: ", th);
|
logger.error("Caught: ", th);
|
||||||
answer = new Answer(cmd, false, th.getMessage());
|
answer = new Answer(cmd, false, th.getMessage());
|
||||||
}
|
}
|
||||||
answers[i] = answer;
|
answers[i] = answer;
|
||||||
@ -1725,7 +1725,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
try {
|
try {
|
||||||
link.send(response.toBytes());
|
link.send(response.toBytes());
|
||||||
} catch (final ClosedChannelException e) {
|
} catch (final ClosedChannelException e) {
|
||||||
logger.warn("Unable to send response because connection is closed: {}", response);
|
logger.error("Unable to send response because connection is closed: {}", response);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -184,6 +184,7 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
|
|||||||
import com.cloud.vm.dao.UserVmCloneSettingDao;
|
import com.cloud.vm.dao.UserVmCloneSettingDao;
|
||||||
import com.cloud.vm.dao.UserVmDao;
|
import com.cloud.vm.dao.UserVmDao;
|
||||||
import com.cloud.vm.dao.VMInstanceDetailsDao;
|
import com.cloud.vm.dao.VMInstanceDetailsDao;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
|
public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
|
||||||
|
|
||||||
@ -270,6 +271,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
ConfigDepot configDepot;
|
ConfigDepot configDepot;
|
||||||
@Inject
|
@Inject
|
||||||
ConfigurationDao configurationDao;
|
ConfigurationDao configurationDao;
|
||||||
|
@Inject
|
||||||
|
VMInstanceDao vmInstanceDao;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
protected SnapshotHelper snapshotHelper;
|
protected SnapshotHelper snapshotHelper;
|
||||||
@ -972,9 +975,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
|
|
||||||
// Create event and update resource count for volumes if vm is a user vm
|
// Create event and update resource count for volumes if vm is a user vm
|
||||||
if (vm.getType() == VirtualMachine.Type.User) {
|
if (vm.getType() == VirtualMachine.Type.User) {
|
||||||
|
|
||||||
Long offeringId = null;
|
Long offeringId = null;
|
||||||
|
|
||||||
if (!offering.isComputeOnly()) {
|
if (!offering.isComputeOnly()) {
|
||||||
offeringId = offering.getId();
|
offeringId = offering.getId();
|
||||||
}
|
}
|
||||||
@ -1943,6 +1944,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
|
|
||||||
if (newSize != vol.getSize()) {
|
if (newSize != vol.getSize()) {
|
||||||
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
|
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
|
||||||
|
VMInstanceVO vm = vol.getInstanceId() != null ? vmInstanceDao.findById(vol.getInstanceId()) : null;
|
||||||
|
if (vm == null || vm.getType() == VirtualMachine.Type.User) {
|
||||||
|
// Update resource count for user vm volumes when volume is attached
|
||||||
if (newSize > vol.getSize()) {
|
if (newSize > vol.getSize()) {
|
||||||
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
|
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
|
||||||
vol.isDisplay(), newSize - vol.getSize(), diskOffering);
|
vol.isDisplay(), newSize - vol.getSize(), diskOffering);
|
||||||
@ -1952,6 +1956,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
_resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
|
_resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
|
||||||
vol.getSize() - newSize, diskOffering);
|
vol.getSize() - newSize, diskOffering);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
vol.setSize(newSize);
|
vol.setSize(newSize);
|
||||||
_volsDao.persist(vol);
|
_volsDao.persist(vol);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3592,6 +3592,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(),
|
disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(),
|
||||||
pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
|
pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
|
||||||
} else if (pool.getType() == StoragePoolType.PowerFlex) {
|
} else if (pool.getType() == StoragePoolType.PowerFlex) {
|
||||||
|
if (isWindowsTemplate && isUefiEnabled) {
|
||||||
|
diskBusTypeData = DiskDef.DiskBus.SATA;
|
||||||
|
}
|
||||||
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData);
|
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData);
|
||||||
if (physicalDisk.getFormat().equals(PhysicalDiskFormat.QCOW2)) {
|
if (physicalDisk.getFormat().equals(PhysicalDiskFormat.QCOW2)) {
|
||||||
disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
|
disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
|
||||||
@ -3622,7 +3625,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
|
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
pool.customizeLibvirtDiskDef(disk);
|
pool.customizeLibvirtDiskDef(disk);
|
||||||
}
|
}
|
||||||
@ -4911,6 +4913,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
return token[1];
|
return token[1];
|
||||||
}
|
}
|
||||||
} else if (token.length > 3) {
|
} else if (token.length > 3) {
|
||||||
|
// for powerflex/scaleio, path = /dev/disk/by-id/emc-vol-2202eefc4692120f-540fd8fa00000003
|
||||||
|
if (token.length > 4 && StringUtils.isNotBlank(token[4]) && token[4].startsWith("emc-vol-")) {
|
||||||
|
final String[] emcVolToken = token[4].split("-");
|
||||||
|
if (emcVolToken.length == 4) {
|
||||||
|
return emcVolToken[3];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// for example, path = /mnt/pool_uuid/disk_path/
|
// for example, path = /mnt/pool_uuid/disk_path/
|
||||||
return token[3];
|
return token[3];
|
||||||
}
|
}
|
||||||
|
|||||||
@ -573,7 +573,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.debug("No encryption configured for data volume [id: {}, uuid: {}, name: {}]",
|
logger.debug("No encryption configured for volume [id: {}, uuid: {}, name: {}]",
|
||||||
volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName());
|
volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -61,6 +61,15 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
||||||
private Logger logger = LogManager.getLogger(getClass());
|
private Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
|
static ConfigKey<Boolean> ConnectOnDemand = new ConfigKey<>("Storage",
|
||||||
|
Boolean.class,
|
||||||
|
"powerflex.connect.on.demand",
|
||||||
|
Boolean.TRUE.toString(),
|
||||||
|
"Connect PowerFlex client on Host when first Volume is mapped to SDC and disconnect when last Volume is unmapped from SDC," +
|
||||||
|
" otherwise no action (that is connection remains in the same state whichever it is, connected or disconnected).",
|
||||||
|
Boolean.TRUE,
|
||||||
|
ConfigKey.Scope.Zone);
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
AgentManager agentManager;
|
AgentManager agentManager;
|
||||||
@Inject
|
@Inject
|
||||||
|
|||||||
@ -122,7 +122,7 @@ if [ -f "$LIBVIRTD_FILE" ]; then
|
|||||||
ln -sf /etc/pki/libvirt/private/serverkey.pem /etc/pki/libvirt-vnc/server-key.pem
|
ln -sf /etc/pki/libvirt/private/serverkey.pem /etc/pki/libvirt-vnc/server-key.pem
|
||||||
cloudstack-setup-agent -s > /dev/null
|
cloudstack-setup-agent -s > /dev/null
|
||||||
|
|
||||||
QEMU_GROUP=$(sed -n 's/^group=//p' /etc/libvirt/qemu.conf | awk -F'"' '{print $2}' | tail -n1)
|
QEMU_GROUP=$(sed -n 's/^group\s*=//p' /etc/libvirt/qemu.conf | tr -d '"' | tr -d ' ' | tr -d "'" | tail -n1)
|
||||||
if [ ! -z "${QEMU_GROUP// }" ]; then
|
if [ ! -z "${QEMU_GROUP// }" ]; then
|
||||||
chgrp $QEMU_GROUP /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
|
chgrp $QEMU_GROUP /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
|
||||||
chmod 750 /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
|
chmod 750 /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
# The CloudStack management server needs sudo permissions
|
# The CloudStack management server needs sudo permissions
|
||||||
# without a password.
|
# without a password.
|
||||||
|
|
||||||
Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img
|
Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img, /usr/bin/qemu-img
|
||||||
|
|
||||||
Defaults:@MSUSER@ !requiretty
|
Defaults:@MSUSER@ !requiretty
|
||||||
|
|
||||||
|
|||||||
@ -1462,7 +1462,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
|||||||
for (VmDiskStats vmDiskStat : vmDiskStats) {
|
for (VmDiskStats vmDiskStat : vmDiskStats) {
|
||||||
VmDiskStatsEntry vmDiskStatEntry = (VmDiskStatsEntry)vmDiskStat;
|
VmDiskStatsEntry vmDiskStatEntry = (VmDiskStatsEntry)vmDiskStat;
|
||||||
SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
|
SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
|
||||||
sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStatEntry.getPath());
|
sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStatEntry.getPath() + "%");
|
||||||
List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
|
List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
|
||||||
|
|
||||||
if (CollectionUtils.isEmpty(volumes))
|
if (CollectionUtils.isEmpty(volumes))
|
||||||
|
|||||||
@ -1965,9 +1965,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||||||
Type snapshotType = getSnapshotType(policyId);
|
Type snapshotType = getSnapshotType(policyId);
|
||||||
Account owner = _accountMgr.getAccount(volume.getAccountId());
|
Account owner = _accountMgr.getAccount(volume.getAccountId());
|
||||||
|
|
||||||
|
ResourceType storeResourceType = ResourceType.secondary_storage;
|
||||||
|
if (!isBackupSnapshotToSecondaryForZone(volume.getDataCenterId()) ||
|
||||||
|
Snapshot.LocationType.PRIMARY.equals(locationType)) {
|
||||||
|
storeResourceType = ResourceType.primary_storage;
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot);
|
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot);
|
||||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue());
|
_resourceLimitMgr.checkResourceLimit(owner, storeResourceType, volume.getSize());
|
||||||
} catch (ResourceAllocationException e) {
|
} catch (ResourceAllocationException e) {
|
||||||
if (snapshotType != Type.MANUAL) {
|
if (snapshotType != Type.MANUAL) {
|
||||||
String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner);
|
String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner);
|
||||||
@ -2018,7 +2023,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||||||
}
|
}
|
||||||
CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid());
|
CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid());
|
||||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
|
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
|
||||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, new Long(volume.getSize()));
|
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), storeResourceType, volume.getSize());
|
||||||
return snapshot;
|
return snapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -678,7 +678,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
"Wait Interval (in seconds) for shared network vm dhcp ip addr fetch for next iteration ", true);
|
"Wait Interval (in seconds) for shared network vm dhcp ip addr fetch for next iteration ", true);
|
||||||
|
|
||||||
private static final ConfigKey<Integer> VmIpFetchTrialMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmip.max.retry", "10",
|
private static final ConfigKey<Integer> VmIpFetchTrialMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmip.max.retry", "10",
|
||||||
"The max number of retrieval times for shared entwork vm dhcp ip fetch, in case of failures", true);
|
"The max number of retrieval times for shared network vm dhcp ip fetch, in case of failures", true);
|
||||||
|
|
||||||
private static final ConfigKey<Integer> VmIpFetchThreadPoolMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmipFetch.threadPool.max", "10",
|
private static final ConfigKey<Integer> VmIpFetchThreadPoolMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmipFetch.threadPool.max", "10",
|
||||||
"number of threads for fetching vms ip address", true);
|
"number of threads for fetching vms ip address", true);
|
||||||
@ -2705,7 +2705,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
|
|
||||||
if (vmIdAndCount.getRetrievalCount() <= 0) {
|
if (vmIdAndCount.getRetrievalCount() <= 0) {
|
||||||
vmIdCountMap.remove(nicId);
|
vmIdCountMap.remove(nicId);
|
||||||
logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map ");
|
logger.debug("Vm {} nic {} count is zero .. removing vm nic from map ", vmId, nicId);
|
||||||
|
|
||||||
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,
|
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,
|
||||||
Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH,
|
Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH,
|
||||||
@ -2714,12 +2714,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
UserVm userVm = _vmDao.findById(vmId);
|
UserVm userVm = _vmDao.findById(vmId);
|
||||||
VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId);
|
VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId);
|
||||||
NicVO nicVo = _nicDao.findById(nicId);
|
NicVO nicVo = _nicDao.findById(nicId);
|
||||||
NetworkVO network = _networkDao.findById(nicVo.getNetworkId());
|
if (ObjectUtils.anyNull(userVm, vmInstance, nicVo)) {
|
||||||
|
logger.warn("Couldn't fetch ip addr, Vm {} or nic {} doesn't exists", vmId, nicId);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
NetworkVO network = _networkDao.findById(nicVo.getNetworkId());
|
||||||
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(userVm);
|
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(userVm);
|
||||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||||
boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
|
boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
|
||||||
@ -5984,7 +5987,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
|
|
||||||
for (VmDiskStatsEntry vmDiskStat : vmDiskStats) {
|
for (VmDiskStatsEntry vmDiskStat : vmDiskStats) {
|
||||||
SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
|
SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
|
||||||
sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath());
|
sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStat.getPath() + "%");
|
||||||
List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
|
List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
|
||||||
if ((volumes == null) || (volumes.size() == 0)) {
|
if ((volumes == null) || (volumes.size() == 0)) {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@ -32,6 +32,7 @@ from marvin.codes import FAILED, INVALID_INPUT, PASS,\
|
|||||||
RESOURCE_PRIMARY_STORAGE
|
RESOURCE_PRIMARY_STORAGE
|
||||||
from nose.plugins.attrib import attr
|
from nose.plugins.attrib import attr
|
||||||
from marvin.sshClient import SshClient
|
from marvin.sshClient import SshClient
|
||||||
|
import math
|
||||||
import time
|
import time
|
||||||
import re
|
import re
|
||||||
from marvin.cloudstackAPI import updateTemplate,registerTemplate
|
from marvin.cloudstackAPI import updateTemplate,registerTemplate
|
||||||
@ -276,6 +277,14 @@ class TestDeployVmRootSize(cloudstackTestCase):
|
|||||||
self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list VM "
|
self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list VM "
|
||||||
"response")
|
"response")
|
||||||
rootvolume = list_volume_response[0]
|
rootvolume = list_volume_response[0]
|
||||||
|
list_volume_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=rootvolume.storageid
|
||||||
|
)
|
||||||
|
rootvolume_pool = list_volume_pool_response[0]
|
||||||
|
if rootvolume_pool.type.lower() == "powerflex":
|
||||||
|
newrootsize = (int(math.ceil(newrootsize / 8) * 8))
|
||||||
|
|
||||||
success = False
|
success = False
|
||||||
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
|
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
|
||||||
success = True
|
success = True
|
||||||
|
|||||||
@ -26,7 +26,11 @@ from marvin.lib.base import (Account,
|
|||||||
ServiceOffering,
|
ServiceOffering,
|
||||||
DiskOffering,
|
DiskOffering,
|
||||||
VirtualMachine)
|
VirtualMachine)
|
||||||
from marvin.lib.common import (get_domain, get_zone, get_suitable_test_template)
|
from marvin.lib.common import (get_domain,
|
||||||
|
get_zone,
|
||||||
|
get_suitable_test_template,
|
||||||
|
list_volumes,
|
||||||
|
list_storage_pools)
|
||||||
|
|
||||||
# Import System modules
|
# Import System modules
|
||||||
from nose.plugins.attrib import attr
|
from nose.plugins.attrib import attr
|
||||||
@ -107,6 +111,22 @@ class TestImportAndUnmanageVolumes(cloudstackTestCase):
|
|||||||
def test_01_detach_unmanage_import_volume(self):
|
def test_01_detach_unmanage_import_volume(self):
|
||||||
"""Test attach/detach/unmanage/import volume
|
"""Test attach/detach/unmanage/import volume
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
volumes = list_volumes(
|
||||||
|
self.apiclient,
|
||||||
|
virtualmachineid=self.virtual_machine.id,
|
||||||
|
type='ROOT',
|
||||||
|
listall=True
|
||||||
|
)
|
||||||
|
volume = volumes[0]
|
||||||
|
volume_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=volume.storageid
|
||||||
|
)
|
||||||
|
volume_pool = volume_pool_response[0]
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
self.skipTest("This test is not supported for storage pool type %s on hypervisor KVM" % volume_pool.type)
|
||||||
|
|
||||||
# Create DATA volume
|
# Create DATA volume
|
||||||
volume = Volume.create(
|
volume = Volume.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
|
|||||||
@ -60,9 +60,10 @@ class TestUpdateOverProvision(cloudstackTestCase):
|
|||||||
"The environment don't have storage pools required for test")
|
"The environment don't have storage pools required for test")
|
||||||
|
|
||||||
for pool in storage_pools:
|
for pool in storage_pools:
|
||||||
if pool.type == "NetworkFilesystem" or pool.type == "VMFS":
|
if pool.type == "NetworkFilesystem" or pool.type == "VMFS" or pool.type == "PowerFlex":
|
||||||
break
|
break
|
||||||
if pool.type != "NetworkFilesystem" and pool.type != "VMFS":
|
|
||||||
|
if pool.type != "NetworkFilesystem" and pool.type != "VMFS" and pool.type != "PowerFlex":
|
||||||
raise self.skipTest("Storage overprovisioning currently not supported on " + pool.type + " pools")
|
raise self.skipTest("Storage overprovisioning currently not supported on " + pool.type + " pools")
|
||||||
|
|
||||||
self.poolId = pool.id
|
self.poolId = pool.id
|
||||||
@ -101,6 +102,9 @@ class TestUpdateOverProvision(cloudstackTestCase):
|
|||||||
"""Reset the storage.overprovisioning.factor back to its original value
|
"""Reset the storage.overprovisioning.factor back to its original value
|
||||||
@return:
|
@return:
|
||||||
"""
|
"""
|
||||||
|
if not hasattr(self, 'poolId'):
|
||||||
|
return
|
||||||
|
|
||||||
storage_pools = StoragePool.list(
|
storage_pools = StoragePool.list(
|
||||||
self.apiClient,
|
self.apiClient,
|
||||||
id = self.poolId
|
id = self.poolId
|
||||||
|
|||||||
@ -16,10 +16,13 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
""" P1 tests for Scaling up Vm
|
""" P1 tests for Scaling up Vm
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
|
|
||||||
# Import Local Modules
|
# Import Local Modules
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
from marvin.lib.base import (VirtualMachine, Volume, DiskOffering, ServiceOffering, Template)
|
from marvin.lib.base import (VirtualMachine, Volume, DiskOffering, ServiceOffering, Template)
|
||||||
from marvin.lib.common import (get_zone, get_domain)
|
from marvin.lib.common import (get_zone, get_domain, list_storage_pools)
|
||||||
from nose.plugins.attrib import attr
|
from nose.plugins.attrib import attr
|
||||||
|
|
||||||
_multiprocess_shared_ = True
|
_multiprocess_shared_ = True
|
||||||
@ -78,8 +81,13 @@ class TestRestoreVM(cloudstackTestCase):
|
|||||||
self._cleanup.append(virtual_machine)
|
self._cleanup.append(virtual_machine)
|
||||||
|
|
||||||
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
||||||
|
old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
|
||||||
|
old_root_vol_pool = old_root_vol_pool_res[0]
|
||||||
|
expected_old_root_vol_size = self.template_t1.size
|
||||||
|
if old_root_vol_pool.type.lower() == "powerflex":
|
||||||
|
expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
|
||||||
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||||
self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
|
self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
|
||||||
|
|
||||||
virtual_machine.restore(self.apiclient, self.template_t2.id, expunge=True)
|
virtual_machine.restore(self.apiclient, self.template_t2.id, expunge=True)
|
||||||
|
|
||||||
@ -88,8 +96,13 @@ class TestRestoreVM(cloudstackTestCase):
|
|||||||
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
|
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
|
||||||
|
|
||||||
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
||||||
|
root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid)
|
||||||
|
root_vol_pool = root_vol_pool_res[0]
|
||||||
|
expected_root_vol_size = self.template_t2.size
|
||||||
|
if root_vol_pool.type.lower() == "powerflex":
|
||||||
|
expected_root_vol_size = (int(math.ceil((expected_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
|
||||||
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||||
self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume and template should match")
|
self.assertEqual(root_vol.size, expected_root_vol_size, "Size of volume and template should match")
|
||||||
|
|
||||||
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
|
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
|
||||||
self.assertEqual(old_root_vol, None, "Old volume should be deleted")
|
self.assertEqual(old_root_vol, None, "Old volume should be deleted")
|
||||||
@ -105,8 +118,13 @@ class TestRestoreVM(cloudstackTestCase):
|
|||||||
self._cleanup.append(virtual_machine)
|
self._cleanup.append(virtual_machine)
|
||||||
|
|
||||||
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
||||||
|
old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
|
||||||
|
old_root_vol_pool = old_root_vol_pool_res[0]
|
||||||
|
expected_old_root_vol_size = self.template_t1.size
|
||||||
|
if old_root_vol_pool.type.lower() == "powerflex":
|
||||||
|
expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
|
||||||
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||||
self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
|
self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
|
||||||
|
|
||||||
virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, expunge=True)
|
virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, expunge=True)
|
||||||
|
|
||||||
@ -115,9 +133,14 @@ class TestRestoreVM(cloudstackTestCase):
|
|||||||
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
|
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
|
||||||
|
|
||||||
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
|
||||||
|
root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid)
|
||||||
|
root_vol_pool = root_vol_pool_res[0]
|
||||||
|
expected_root_vol_size = self.disk_offering.disksize
|
||||||
|
if root_vol_pool.type.lower() == "powerflex":
|
||||||
|
expected_root_vol_size = (int(math.ceil(expected_root_vol_size / 8) * 8))
|
||||||
self.assertEqual(root_vol.diskofferingid, self.disk_offering.id, "Disk offering id should match")
|
self.assertEqual(root_vol.diskofferingid, self.disk_offering.id, "Disk offering id should match")
|
||||||
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||||
self.assertEqual(root_vol.size, self.disk_offering.disksize * 1024 * 1024 * 1024,
|
self.assertEqual(root_vol.size, expected_root_vol_size * 1024 * 1024 * 1024,
|
||||||
"Size of volume and disk offering should match")
|
"Size of volume and disk offering should match")
|
||||||
|
|
||||||
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
|
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
|
||||||
@ -134,8 +157,13 @@ class TestRestoreVM(cloudstackTestCase):
|
|||||||
self._cleanup.append(virtual_machine)
|
self._cleanup.append(virtual_machine)
|
||||||
|
|
||||||
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
|
||||||
|
old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
|
||||||
|
old_root_vol_pool = old_root_vol_pool_res[0]
|
||||||
|
expected_old_root_vol_size = self.template_t1.size
|
||||||
|
if old_root_vol_pool.type.lower() == "powerflex":
|
||||||
|
expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
|
||||||
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
|
||||||
self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
|
self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
|
||||||
|
|
||||||
virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, rootdisksize=16)
|
virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, rootdisksize=16)
|
||||||
|
|
||||||
|
|||||||
@ -38,7 +38,8 @@ from marvin.lib.base import (Account,
|
|||||||
)
|
)
|
||||||
from marvin.lib.common import (get_domain,
|
from marvin.lib.common import (get_domain,
|
||||||
get_zone,
|
get_zone,
|
||||||
get_template)
|
get_template,
|
||||||
|
list_storage_pools)
|
||||||
from marvin.codes import FAILED
|
from marvin.codes import FAILED
|
||||||
|
|
||||||
from marvin.lib.decoratorGenerators import skipTestIf
|
from marvin.lib.decoratorGenerators import skipTestIf
|
||||||
@ -258,15 +259,23 @@ class TestSharedFSLifecycle(cloudstackTestCase):
|
|||||||
def test_resize_shared_fs(self):
|
def test_resize_shared_fs(self):
|
||||||
"""Resize the shared filesystem by changing the disk offering and validate
|
"""Resize the shared filesystem by changing the disk offering and validate
|
||||||
"""
|
"""
|
||||||
|
sharedfs_pool_response = list_storage_pools(self.apiclient, id=self.sharedfs.storageid)
|
||||||
|
sharedfs_pool = sharedfs_pool_response[0]
|
||||||
|
|
||||||
self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs)
|
self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs)
|
||||||
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
|
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
|
||||||
self.debug(result)
|
self.debug(result)
|
||||||
size = result.split()[-5]
|
size = result.split()[-5]
|
||||||
self.debug("Size of the filesystem is " + size)
|
self.debug("Size of the filesystem is " + size)
|
||||||
|
if sharedfs_pool.type.lower() == "powerflex":
|
||||||
|
self.assertEqual(size, "8.0G", "SharedFS size should be 8.0G")
|
||||||
|
new_size = 9
|
||||||
|
else:
|
||||||
self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
|
self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
|
||||||
|
new_size = 3
|
||||||
|
|
||||||
response = SharedFS.stop(self.sharedfs, self.apiclient)
|
response = SharedFS.stop(self.sharedfs, self.apiclient)
|
||||||
response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, 3)
|
response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, new_size)
|
||||||
self.debug(response)
|
self.debug(response)
|
||||||
response = SharedFS.start(self.sharedfs, self.apiclient)
|
response = SharedFS.start(self.sharedfs, self.apiclient)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
@ -274,4 +283,7 @@ class TestSharedFSLifecycle(cloudstackTestCase):
|
|||||||
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
|
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
|
||||||
size = result.split()[-5]
|
size = result.split()[-5]
|
||||||
self.debug("Size of the filesystem is " + size)
|
self.debug("Size of the filesystem is " + size)
|
||||||
|
if sharedfs_pool.type.lower() == "powerflex":
|
||||||
|
self.assertEqual(size, "16G", "SharedFS size should be 16G")
|
||||||
|
else:
|
||||||
self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
|
self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
|
||||||
|
|||||||
@ -18,8 +18,10 @@
|
|||||||
from marvin.codes import FAILED
|
from marvin.codes import FAILED
|
||||||
from nose.plugins.attrib import attr
|
from nose.plugins.attrib import attr
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
from marvin.cloudstackException import CloudstackAPIException
|
||||||
from marvin.lib.utils import (cleanup_resources,
|
from marvin.lib.utils import (cleanup_resources,
|
||||||
is_snapshot_on_nfs,
|
is_snapshot_on_nfs,
|
||||||
|
is_snapshot_on_powerflex,
|
||||||
validateList)
|
validateList)
|
||||||
from marvin.lib.base import (VirtualMachine,
|
from marvin.lib.base import (VirtualMachine,
|
||||||
Account,
|
Account,
|
||||||
@ -146,10 +148,16 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||||||
type='ROOT',
|
type='ROOT',
|
||||||
listall=True
|
listall=True
|
||||||
)
|
)
|
||||||
|
volume = volumes[0]
|
||||||
|
volume_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=volume.storageid
|
||||||
|
)
|
||||||
|
volume_pool = volume_pool_response[0]
|
||||||
|
|
||||||
snapshot = Snapshot.create(
|
snapshot = Snapshot.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
volumes[0].id,
|
volume.id,
|
||||||
account=self.account.name,
|
account=self.account.name,
|
||||||
domainid=self.account.domainid
|
domainid=self.account.domainid
|
||||||
)
|
)
|
||||||
@ -209,6 +217,11 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||||||
"Check if backup_snap_id is not null"
|
"Check if backup_snap_id is not null"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
self.assertTrue(is_snapshot_on_powerflex(
|
||||||
|
self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
|
||||||
|
return
|
||||||
|
|
||||||
self.assertTrue(is_snapshot_on_nfs(
|
self.assertTrue(is_snapshot_on_nfs(
|
||||||
self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
|
self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
|
||||||
return
|
return
|
||||||
@ -246,6 +259,11 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||||||
PASS,
|
PASS,
|
||||||
"Invalid response returned for list volumes")
|
"Invalid response returned for list volumes")
|
||||||
vol_uuid = vol_res[0].id
|
vol_uuid = vol_res[0].id
|
||||||
|
volume_pool_response = list_storage_pools(self.apiclient,
|
||||||
|
id=vol_res[0].storageid)
|
||||||
|
volume_pool = volume_pool_response[0]
|
||||||
|
if volume_pool.type.lower() != 'networkfilesystem':
|
||||||
|
self.skipTest("This test is not supported for volume created on storage pool type %s" % volume_pool.type)
|
||||||
clusters = list_clusters(
|
clusters = list_clusters(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
zoneid=self.zone.id
|
zoneid=self.zone.id
|
||||||
@ -443,9 +461,10 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase):
|
|||||||
type='ROOT',
|
type='ROOT',
|
||||||
listall=True
|
listall=True
|
||||||
)
|
)
|
||||||
|
cls.volume = volumes[0]
|
||||||
cls.snapshot = Snapshot.create(
|
cls.snapshot = Snapshot.create(
|
||||||
cls.userapiclient,
|
cls.userapiclient,
|
||||||
volumes[0].id,
|
cls.volume.id,
|
||||||
account=cls.account.name,
|
account=cls.account.name,
|
||||||
domainid=cls.account.domainid
|
domainid=cls.account.domainid
|
||||||
)
|
)
|
||||||
@ -475,6 +494,12 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase):
|
|||||||
"""Test creating volume from snapshot
|
"""Test creating volume from snapshot
|
||||||
"""
|
"""
|
||||||
self.services['volume_from_snapshot']['zoneid'] = self.zone.id
|
self.services['volume_from_snapshot']['zoneid'] = self.zone.id
|
||||||
|
snapshot_volume_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=self.volume.storageid
|
||||||
|
)
|
||||||
|
snapshot_volume_pool = snapshot_volume_pool_response[0]
|
||||||
|
try:
|
||||||
self.volume_from_snap = Volume.create_from_snapshot(
|
self.volume_from_snap = Volume.create_from_snapshot(
|
||||||
self.userapiclient,
|
self.userapiclient,
|
||||||
snapshot_id=self.snapshot.id,
|
snapshot_id=self.snapshot.id,
|
||||||
@ -482,6 +507,15 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase):
|
|||||||
account=self.account.name,
|
account=self.account.name,
|
||||||
domainid=self.account.domainid
|
domainid=self.account.domainid
|
||||||
)
|
)
|
||||||
|
except CloudstackAPIException as cs:
|
||||||
|
self.debug(cs.errorMsg)
|
||||||
|
if snapshot_volume_pool.type.lower() == "powerflex":
|
||||||
|
self.assertTrue(
|
||||||
|
cs.errorMsg.find("Create volume from snapshot is not supported for PowerFlex volume snapshots") > 0,
|
||||||
|
msg="Other than unsupported error while creating volume from snapshot for volume on PowerFlex pool")
|
||||||
|
return
|
||||||
|
self.fail("Failed to create volume from snapshot: %s" % cs)
|
||||||
|
|
||||||
self.cleanup.append(self.volume_from_snap)
|
self.cleanup.append(self.volume_from_snap)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|||||||
@ -40,6 +40,7 @@ from marvin.lib.base import (Account,
|
|||||||
from marvin.lib.common import (get_zone,
|
from marvin.lib.common import (get_zone,
|
||||||
get_domain,
|
get_domain,
|
||||||
get_suitable_test_template,
|
get_suitable_test_template,
|
||||||
|
list_storage_pools,
|
||||||
find_storage_pool_type)
|
find_storage_pool_type)
|
||||||
|
|
||||||
|
|
||||||
@ -611,17 +612,17 @@ class TestVolumeUsage(cloudstackTestCase):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail("Failed to stop instance: %s" % e)
|
self.fail("Failed to stop instance: %s" % e)
|
||||||
|
|
||||||
volume_response = Volume.list(
|
data_volume_response = Volume.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
virtualmachineid=self.virtual_machine.id,
|
virtualmachineid=self.virtual_machine.id,
|
||||||
type='DATADISK',
|
type='DATADISK',
|
||||||
listall=True)
|
listall=True)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
isinstance(volume_response, list),
|
isinstance(data_volume_response, list),
|
||||||
True,
|
True,
|
||||||
"Check for valid list volumes response"
|
"Check for valid list volumes response"
|
||||||
)
|
)
|
||||||
data_volume = volume_response[0]
|
data_volume = data_volume_response[0]
|
||||||
|
|
||||||
# Detach data Disk
|
# Detach data Disk
|
||||||
self.debug("Detaching volume ID: %s VM with ID: %s" % (
|
self.debug("Detaching volume ID: %s VM with ID: %s" % (
|
||||||
@ -769,7 +770,25 @@ class TestVolumeUsage(cloudstackTestCase):
|
|||||||
"Running",
|
"Running",
|
||||||
"VM state should be running after deployment"
|
"VM state should be running after deployment"
|
||||||
)
|
)
|
||||||
|
root_volume_response = Volume.list(
|
||||||
|
self.apiclient,
|
||||||
|
virtualmachineid=self.virtual_machine.id,
|
||||||
|
type='ROOT',
|
||||||
|
listall=True)
|
||||||
|
root_volume = root_volume_response[0]
|
||||||
|
rool_volume_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=root_volume.storageid
|
||||||
|
)
|
||||||
|
rool_volume_pool = rool_volume_pool_response[0]
|
||||||
|
try:
|
||||||
self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
|
self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
|
||||||
|
except Exception as e:
|
||||||
|
self.debug("Exception %s: " % e)
|
||||||
|
if rool_volume_pool.type.lower() == "powerflex" and "this operation is unsupported on storage pool type PowerFlex" in str(e):
|
||||||
|
return
|
||||||
|
self.fail(e)
|
||||||
|
|
||||||
self.debug("select type from usage_event where offering_id = 6 and volume_id = '%s';"
|
self.debug("select type from usage_event where offering_id = 6 and volume_id = '%s';"
|
||||||
% volume_id)
|
% volume_id)
|
||||||
|
|
||||||
|
|||||||
@ -22,6 +22,7 @@ Tests of VM Autoscaling
|
|||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
import datetime
|
import datetime
|
||||||
|
import math
|
||||||
|
|
||||||
from nose.plugins.attrib import attr
|
from nose.plugins.attrib import attr
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
@ -53,7 +54,8 @@ from marvin.lib.base import (Account,
|
|||||||
|
|
||||||
from marvin.lib.common import (get_domain,
|
from marvin.lib.common import (get_domain,
|
||||||
get_zone,
|
get_zone,
|
||||||
get_template)
|
get_template,
|
||||||
|
list_storage_pools)
|
||||||
from marvin.lib.utils import wait_until
|
from marvin.lib.utils import wait_until
|
||||||
|
|
||||||
MIN_MEMBER = 1
|
MIN_MEMBER = 1
|
||||||
@ -466,8 +468,10 @@ class TestVmAutoScaling(cloudstackTestCase):
|
|||||||
def verifyVmProfile(self, vm, autoscalevmprofileid, networkid=None, projectid=None):
|
def verifyVmProfile(self, vm, autoscalevmprofileid, networkid=None, projectid=None):
|
||||||
self.message("Verifying profiles of new VM %s (%s)" % (vm.name, vm.id))
|
self.message("Verifying profiles of new VM %s (%s)" % (vm.name, vm.id))
|
||||||
datadisksizeInBytes = None
|
datadisksizeInBytes = None
|
||||||
|
datadiskpoolid = None
|
||||||
diskofferingid = None
|
diskofferingid = None
|
||||||
rootdisksizeInBytes = None
|
rootdisksizeInBytes = None
|
||||||
|
rootdiskpoolid = None
|
||||||
sshkeypairs = None
|
sshkeypairs = None
|
||||||
|
|
||||||
affinitygroupIdsArray = []
|
affinitygroupIdsArray = []
|
||||||
@ -496,10 +500,24 @@ class TestVmAutoScaling(cloudstackTestCase):
|
|||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
if volume.type == 'ROOT':
|
if volume.type == 'ROOT':
|
||||||
rootdisksizeInBytes = volume.size
|
rootdisksizeInBytes = volume.size
|
||||||
|
rootdiskpoolid = volume.storageid
|
||||||
elif volume.type == 'DATADISK':
|
elif volume.type == 'DATADISK':
|
||||||
datadisksizeInBytes = volume.size
|
datadisksizeInBytes = volume.size
|
||||||
|
datadiskpoolid = volume.storageid
|
||||||
diskofferingid = volume.diskofferingid
|
diskofferingid = volume.diskofferingid
|
||||||
|
|
||||||
|
rootdisk_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=rootdiskpoolid
|
||||||
|
)
|
||||||
|
rootdisk_pool = rootdisk_pool_response[0]
|
||||||
|
|
||||||
|
datadisk_pool_response = list_storage_pools(
|
||||||
|
self.apiclient,
|
||||||
|
id=datadiskpoolid
|
||||||
|
)
|
||||||
|
datadisk_pool = datadisk_pool_response[0]
|
||||||
|
|
||||||
vmprofiles_list = AutoScaleVmProfile.list(
|
vmprofiles_list = AutoScaleVmProfile.list(
|
||||||
self.regular_user_apiclient,
|
self.regular_user_apiclient,
|
||||||
listall=True,
|
listall=True,
|
||||||
@ -522,18 +540,26 @@ class TestVmAutoScaling(cloudstackTestCase):
|
|||||||
self.assertEquals(templateid, vmprofile.templateid)
|
self.assertEquals(templateid, vmprofile.templateid)
|
||||||
self.assertEquals(serviceofferingid, vmprofile.serviceofferingid)
|
self.assertEquals(serviceofferingid, vmprofile.serviceofferingid)
|
||||||
|
|
||||||
|
rootdisksize = None
|
||||||
if vmprofile_otherdeployparams.rootdisksize:
|
if vmprofile_otherdeployparams.rootdisksize:
|
||||||
self.assertEquals(int(rootdisksizeInBytes), int(vmprofile_otherdeployparams.rootdisksize) * (1024 ** 3))
|
rootdisksize = int(vmprofile_otherdeployparams.rootdisksize)
|
||||||
elif vmprofile_otherdeployparams.overridediskofferingid:
|
elif vmprofile_otherdeployparams.overridediskofferingid:
|
||||||
self.assertEquals(vmprofile_otherdeployparams.overridediskofferingid, self.disk_offering_override.id)
|
self.assertEquals(vmprofile_otherdeployparams.overridediskofferingid, self.disk_offering_override.id)
|
||||||
self.assertEquals(int(rootdisksizeInBytes), int(self.disk_offering_override.disksize) * (1024 ** 3))
|
rootdisksize = int(self.disk_offering_override.disksize)
|
||||||
else:
|
else:
|
||||||
self.assertEquals(int(rootdisksizeInBytes), int(self.templatesize) * (1024 ** 3))
|
rootdisksize = int(self.templatesize)
|
||||||
|
|
||||||
|
if rootdisk_pool.type.lower() == "powerflex":
|
||||||
|
rootdisksize = (int(math.ceil(rootdisksize / 8) * 8))
|
||||||
|
self.assertEquals(int(rootdisksizeInBytes), rootdisksize * (1024 ** 3))
|
||||||
|
|
||||||
if vmprofile_otherdeployparams.diskofferingid:
|
if vmprofile_otherdeployparams.diskofferingid:
|
||||||
self.assertEquals(diskofferingid, vmprofile_otherdeployparams.diskofferingid)
|
self.assertEquals(diskofferingid, vmprofile_otherdeployparams.diskofferingid)
|
||||||
if vmprofile_otherdeployparams.disksize:
|
if vmprofile_otherdeployparams.disksize:
|
||||||
self.assertEquals(int(datadisksizeInBytes), int(vmprofile_otherdeployparams.disksize) * (1024 ** 3))
|
datadisksize = int(vmprofile_otherdeployparams.disksize)
|
||||||
|
if datadisk_pool.type.lower() == "powerflex":
|
||||||
|
datadisksize = (int(math.ceil(datadisksize / 8) * 8))
|
||||||
|
self.assertEquals(int(datadisksizeInBytes), datadisksize * (1024 ** 3))
|
||||||
|
|
||||||
if vmprofile_otherdeployparams.keypairs:
|
if vmprofile_otherdeployparams.keypairs:
|
||||||
self.assertEquals(sshkeypairs, vmprofile_otherdeployparams.keypairs)
|
self.assertEquals(sshkeypairs, vmprofile_otherdeployparams.keypairs)
|
||||||
|
|||||||
@ -1710,8 +1710,8 @@ class TestKVMLiveMigration(cloudstackTestCase):
|
|||||||
def get_target_pool(self, volid):
|
def get_target_pool(self, volid):
|
||||||
target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
|
target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
|
||||||
|
|
||||||
if len(target_pools) < 1:
|
if target_pools is None or len(target_pools) == 0:
|
||||||
self.skipTest("Not enough storage pools found")
|
self.skipTest("Not enough storage pools found for migration")
|
||||||
|
|
||||||
return target_pools[0]
|
return target_pools[0]
|
||||||
|
|
||||||
|
|||||||
@ -77,6 +77,18 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
Configurations.update(cls.apiclient,
|
Configurations.update(cls.apiclient,
|
||||||
name = "kvm.vmstoragesnapshot.enabled",
|
name = "kvm.vmstoragesnapshot.enabled",
|
||||||
value = "true")
|
value = "true")
|
||||||
|
|
||||||
|
cls.services["domainid"] = cls.domain.id
|
||||||
|
cls.services["small"]["zoneid"] = cls.zone.id
|
||||||
|
cls.services["zoneid"] = cls.zone.id
|
||||||
|
|
||||||
|
cls.account = Account.create(
|
||||||
|
cls.apiclient,
|
||||||
|
cls.services["account"],
|
||||||
|
domainid=cls.domain.id
|
||||||
|
)
|
||||||
|
cls._cleanup.append(cls.account)
|
||||||
|
|
||||||
#The version of CentOS has to be supported
|
#The version of CentOS has to be supported
|
||||||
templ = {
|
templ = {
|
||||||
"name": "CentOS8",
|
"name": "CentOS8",
|
||||||
@ -91,24 +103,21 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
"directdownload": True,
|
"directdownload": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
template = Template.register(cls.apiclient, templ, zoneid=cls.zone.id, hypervisor=cls.hypervisor)
|
template = Template.register(
|
||||||
|
cls.apiclient,
|
||||||
|
templ,
|
||||||
|
zoneid=cls.zone.id,
|
||||||
|
account=cls.account.name,
|
||||||
|
domainid=cls.account.domainid,
|
||||||
|
hypervisor=cls.hypervisor
|
||||||
|
)
|
||||||
if template == FAILED:
|
if template == FAILED:
|
||||||
assert False, "get_template() failed to return template\
|
assert False, "get_template() failed to return template\
|
||||||
with description %s" % cls.services["ostype"]
|
with description %s" % cls.services["ostype"]
|
||||||
|
|
||||||
cls.services["domainid"] = cls.domain.id
|
|
||||||
cls.services["small"]["zoneid"] = cls.zone.id
|
|
||||||
cls.services["templates"]["ostypeid"] = template.ostypeid
|
cls.services["templates"]["ostypeid"] = template.ostypeid
|
||||||
cls.services["zoneid"] = cls.zone.id
|
|
||||||
|
|
||||||
cls.account = Account.create(
|
service_offering_nfs = {
|
||||||
cls.apiclient,
|
|
||||||
cls.services["account"],
|
|
||||||
domainid=cls.domain.id
|
|
||||||
)
|
|
||||||
cls._cleanup.append(cls.account)
|
|
||||||
|
|
||||||
service_offerings_nfs = {
|
|
||||||
"name": "nfs",
|
"name": "nfs",
|
||||||
"displaytext": "nfs",
|
"displaytext": "nfs",
|
||||||
"cpunumber": 1,
|
"cpunumber": 1,
|
||||||
@ -120,7 +129,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
|
|
||||||
cls.service_offering = ServiceOffering.create(
|
cls.service_offering = ServiceOffering.create(
|
||||||
cls.apiclient,
|
cls.apiclient,
|
||||||
service_offerings_nfs,
|
service_offering_nfs,
|
||||||
)
|
)
|
||||||
|
|
||||||
cls._cleanup.append(cls.service_offering)
|
cls._cleanup.append(cls.service_offering)
|
||||||
@ -138,7 +147,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
rootdisksize=20,
|
rootdisksize=20,
|
||||||
)
|
)
|
||||||
cls.random_data_0 = random_gen(size=100)
|
cls.random_data_0 = random_gen(size=100)
|
||||||
cls.test_dir = "/tmp"
|
cls.test_dir = "$HOME"
|
||||||
cls.random_data = "random.data"
|
cls.random_data = "random.data"
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -201,8 +210,8 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
self.apiclient,
|
self.apiclient,
|
||||||
self.virtual_machine.id,
|
self.virtual_machine.id,
|
||||||
MemorySnapshot,
|
MemorySnapshot,
|
||||||
"TestSnapshot",
|
"TestVmSnapshot",
|
||||||
"Display Text"
|
"Test VM Snapshot"
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
vm_snapshot.state,
|
vm_snapshot.state,
|
||||||
@ -269,6 +278,8 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.start(self.apiclient)
|
self.virtual_machine.start(self.apiclient)
|
||||||
|
|
||||||
|
time.sleep(30)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
|
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
|
||||||
|
|
||||||
@ -288,7 +299,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.random_data_0,
|
self.random_data_0,
|
||||||
result[0],
|
result[0],
|
||||||
"Check the random data is equal with the ramdom file!"
|
"Check the random data is equal with the random file!"
|
||||||
)
|
)
|
||||||
|
|
||||||
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
|
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
|
||||||
@ -320,7 +331,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
list_snapshot_response = VmSnapshot.list(
|
list_snapshot_response = VmSnapshot.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
virtualmachineid=self.virtual_machine.id,
|
virtualmachineid=self.virtual_machine.id,
|
||||||
listall=False)
|
listall=True)
|
||||||
self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
|
self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
|
||||||
|
|
||||||
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
|
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
|
||||||
|
|||||||
@ -27,7 +27,9 @@ from marvin.lib.base import (Account,
|
|||||||
from marvin.lib.common import (get_zone,
|
from marvin.lib.common import (get_zone,
|
||||||
get_domain,
|
get_domain,
|
||||||
get_suitable_test_template,
|
get_suitable_test_template,
|
||||||
|
list_volumes,
|
||||||
list_snapshots,
|
list_snapshots,
|
||||||
|
list_storage_pools,
|
||||||
list_virtual_machines)
|
list_virtual_machines)
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -87,6 +89,18 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
serviceofferingid=cls.service_offering.id,
|
serviceofferingid=cls.service_offering.id,
|
||||||
mode=cls.zone.networktype
|
mode=cls.zone.networktype
|
||||||
)
|
)
|
||||||
|
volumes = list_volumes(
|
||||||
|
cls.apiclient,
|
||||||
|
virtualmachineid=cls.virtual_machine.id,
|
||||||
|
type='ROOT',
|
||||||
|
listall=True
|
||||||
|
)
|
||||||
|
volume = volumes[0]
|
||||||
|
volume_pool_response = list_storage_pools(
|
||||||
|
cls.apiclient,
|
||||||
|
id=volume.storageid
|
||||||
|
)
|
||||||
|
cls.volume_pool = volume_pool_response[0]
|
||||||
cls.random_data_0 = random_gen(size=100)
|
cls.random_data_0 = random_gen(size=100)
|
||||||
cls.test_dir = "$HOME"
|
cls.test_dir = "$HOME"
|
||||||
cls.random_data = "random.data"
|
cls.random_data = "random.data"
|
||||||
@ -146,15 +160,15 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
|
|
||||||
#KVM VM Snapshot needs to set snapshot with memory
|
#KVM VM Snapshot needs to set snapshot with memory
|
||||||
MemorySnapshot = False
|
MemorySnapshot = False
|
||||||
if self.hypervisor.lower() in (KVM.lower()):
|
if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
|
||||||
MemorySnapshot = True
|
MemorySnapshot = True
|
||||||
|
|
||||||
vm_snapshot = VmSnapshot.create(
|
vm_snapshot = VmSnapshot.create(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
self.virtual_machine.id,
|
self.virtual_machine.id,
|
||||||
MemorySnapshot,
|
MemorySnapshot,
|
||||||
"TestSnapshot",
|
"TestVmSnapshot",
|
||||||
"Display Text"
|
"Test VM Snapshot"
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
vm_snapshot.state,
|
vm_snapshot.state,
|
||||||
@ -214,7 +228,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
#We don't need to stop the VM when taking a VM Snapshot on KVM
|
#We don't need to stop the VM when taking a VM Snapshot on KVM
|
||||||
if self.hypervisor.lower() in (KVM.lower()):
|
if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self.virtual_machine.stop(self.apiclient)
|
self.virtual_machine.stop(self.apiclient)
|
||||||
@ -224,7 +238,7 @@ class TestVmSnapshot(cloudstackTestCase):
|
|||||||
list_snapshot_response[0].id)
|
list_snapshot_response[0].id)
|
||||||
|
|
||||||
#We don't need to start the VM when taking a VM Snapshot on KVM
|
#We don't need to start the VM when taking a VM Snapshot on KVM
|
||||||
if self.hypervisor.lower() in (KVM.lower()):
|
if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self.virtual_machine.start(self.apiclient)
|
self.virtual_machine.start(self.apiclient)
|
||||||
|
|||||||
@ -19,6 +19,7 @@
|
|||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
import math
|
||||||
import unittest
|
import unittest
|
||||||
import urllib.error
|
import urllib.error
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
@ -42,6 +43,7 @@ from marvin.lib.common import (get_domain,
|
|||||||
get_zone,
|
get_zone,
|
||||||
find_storage_pool_type,
|
find_storage_pool_type,
|
||||||
get_pod,
|
get_pod,
|
||||||
|
list_storage_pools,
|
||||||
list_disk_offering)
|
list_disk_offering)
|
||||||
from marvin.lib.utils import (cleanup_resources, checkVolumeSize)
|
from marvin.lib.utils import (cleanup_resources, checkVolumeSize)
|
||||||
from marvin.lib.utils import (format_volume_to_ext3,
|
from marvin.lib.utils import (format_volume_to_ext3,
|
||||||
@ -235,7 +237,6 @@ class TestCreateVolume(cloudstackTestCase):
|
|||||||
"Failed to start VM (ID: %s) " % vm.id)
|
"Failed to start VM (ID: %s) " % vm.id)
|
||||||
timeout = timeout - 1
|
timeout = timeout - 1
|
||||||
|
|
||||||
vol_sz = str(list_volume_response[0].size)
|
|
||||||
ssh = self.virtual_machine.get_ssh_client(
|
ssh = self.virtual_machine.get_ssh_client(
|
||||||
reconnect=True
|
reconnect=True
|
||||||
)
|
)
|
||||||
@ -243,6 +244,7 @@ class TestCreateVolume(cloudstackTestCase):
|
|||||||
list_volume_response = Volume.list(
|
list_volume_response = Volume.list(
|
||||||
self.apiClient,
|
self.apiClient,
|
||||||
id=volume.id)
|
id=volume.id)
|
||||||
|
vol_sz = str(list_volume_response[0].size)
|
||||||
if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower():
|
if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower():
|
||||||
volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
||||||
self.debug(" Using XenServer volume_name: %s" % (volume_name))
|
self.debug(" Using XenServer volume_name: %s" % (volume_name))
|
||||||
@ -533,6 +535,17 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
# Sleep to ensure the current state will reflected in other calls
|
# Sleep to ensure the current state will reflected in other calls
|
||||||
time.sleep(self.services["sleep"])
|
time.sleep(self.services["sleep"])
|
||||||
|
|
||||||
|
list_volume_response = Volume.list(
|
||||||
|
self.apiClient,
|
||||||
|
id=self.volume.id
|
||||||
|
)
|
||||||
|
volume = list_volume_response[0]
|
||||||
|
|
||||||
|
list_volume_pool_response = list_storage_pools(self.apiClient, id=volume.storageid)
|
||||||
|
volume_pool = list_volume_pool_response[0]
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
self.skipTest("Extract volume operation is unsupported for volumes on storage pool type %s" % volume_pool.type)
|
||||||
|
|
||||||
cmd = extractVolume.extractVolumeCmd()
|
cmd = extractVolume.extractVolumeCmd()
|
||||||
cmd.id = self.volume.id
|
cmd.id = self.volume.id
|
||||||
cmd.mode = "HTTP_DOWNLOAD"
|
cmd.mode = "HTTP_DOWNLOAD"
|
||||||
@ -658,7 +671,15 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
type='DATADISK'
|
type='DATADISK'
|
||||||
)
|
)
|
||||||
for vol in list_volume_response:
|
for vol in list_volume_response:
|
||||||
if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
|
list_volume_pool_response = list_storage_pools(
|
||||||
|
self.apiClient,
|
||||||
|
id=vol.storageid
|
||||||
|
)
|
||||||
|
volume_pool = list_volume_pool_response[0]
|
||||||
|
disksize = (int(disk_offering_20_GB.disksize))
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
disksize = (int(math.ceil(disksize / 8) * 8))
|
||||||
|
if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready':
|
||||||
success = True
|
success = True
|
||||||
if success:
|
if success:
|
||||||
break
|
break
|
||||||
@ -925,7 +946,15 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
type='DATADISK'
|
type='DATADISK'
|
||||||
)
|
)
|
||||||
for vol in list_volume_response:
|
for vol in list_volume_response:
|
||||||
if vol.id == self.volume.id and int(vol.size) == (20 * (1024 ** 3)) and vol.state == 'Ready':
|
list_volume_pool_response = list_storage_pools(
|
||||||
|
self.apiClient,
|
||||||
|
id=vol.storageid
|
||||||
|
)
|
||||||
|
volume_pool = list_volume_pool_response[0]
|
||||||
|
disksize = 20
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
disksize = (int(math.ceil(disksize / 8) * 8))
|
||||||
|
if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready':
|
||||||
success = True
|
success = True
|
||||||
if success:
|
if success:
|
||||||
break
|
break
|
||||||
@ -1283,7 +1312,6 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
"Failed to start VM (ID: %s) " % vm.id)
|
"Failed to start VM (ID: %s) " % vm.id)
|
||||||
timeout = timeout - 1
|
timeout = timeout - 1
|
||||||
|
|
||||||
vol_sz = str(list_volume_response[0].size)
|
|
||||||
ssh = virtual_machine.get_ssh_client(
|
ssh = virtual_machine.get_ssh_client(
|
||||||
reconnect=True
|
reconnect=True
|
||||||
)
|
)
|
||||||
@ -1292,6 +1320,7 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
list_volume_response = Volume.list(
|
list_volume_response = Volume.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=volume.id)
|
id=volume.id)
|
||||||
|
vol_sz = str(list_volume_response[0].size)
|
||||||
|
|
||||||
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
||||||
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
||||||
@ -1410,7 +1439,6 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
"Failed to start VM (ID: %s) " % vm.id)
|
"Failed to start VM (ID: %s) " % vm.id)
|
||||||
timeout = timeout - 1
|
timeout = timeout - 1
|
||||||
|
|
||||||
vol_sz = str(list_volume_response[0].size)
|
|
||||||
ssh = virtual_machine.get_ssh_client(
|
ssh = virtual_machine.get_ssh_client(
|
||||||
reconnect=True
|
reconnect=True
|
||||||
)
|
)
|
||||||
@ -1419,6 +1447,12 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
list_volume_response = Volume.list(
|
list_volume_response = Volume.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=volume.id)
|
id=volume.id)
|
||||||
|
vol_sz = str(list_volume_response[0].size)
|
||||||
|
list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid)
|
||||||
|
volume_pool = list_volume_pool_response[0]
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
vol_sz = int(vol_sz)
|
||||||
|
vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
|
||||||
|
|
||||||
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
||||||
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
||||||
@ -1543,7 +1577,6 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
"Failed to start VM (ID: %s) " % vm.id)
|
"Failed to start VM (ID: %s) " % vm.id)
|
||||||
timeout = timeout - 1
|
timeout = timeout - 1
|
||||||
|
|
||||||
vol_sz = str(list_volume_response[0].size)
|
|
||||||
ssh = virtual_machine.get_ssh_client(
|
ssh = virtual_machine.get_ssh_client(
|
||||||
reconnect=True
|
reconnect=True
|
||||||
)
|
)
|
||||||
@ -1552,6 +1585,12 @@ class TestVolumeEncryption(cloudstackTestCase):
|
|||||||
list_volume_response = Volume.list(
|
list_volume_response = Volume.list(
|
||||||
self.apiclient,
|
self.apiclient,
|
||||||
id=volume.id)
|
id=volume.id)
|
||||||
|
vol_sz = str(list_volume_response[0].size)
|
||||||
|
list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid)
|
||||||
|
volume_pool = list_volume_pool_response[0]
|
||||||
|
if volume_pool.type.lower() == "powerflex":
|
||||||
|
vol_sz = int(vol_sz)
|
||||||
|
vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
|
||||||
|
|
||||||
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
|
||||||
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
self.debug(" Using KVM volume_name: %s" % (volume_name))
|
||||||
|
|||||||
@ -300,12 +300,63 @@ def get_hypervisor_version(apiclient):
|
|||||||
assert hosts_list_validation_result[0] == PASS, "host list validation failed"
|
assert hosts_list_validation_result[0] == PASS, "host list validation failed"
|
||||||
return hosts_list_validation_result[1].hypervisorversion
|
return hosts_list_validation_result[1].hypervisorversion
|
||||||
|
|
||||||
|
def is_snapshot_on_powerflex(apiclient, dbconn, config, zoneid, snapshotid):
|
||||||
|
"""
|
||||||
|
Checks whether a snapshot with id (not UUID) `snapshotid` is present on the powerflex storage
|
||||||
|
|
||||||
|
@param apiclient: api client connection
|
||||||
|
@param dbconn: connection to the cloudstack db
|
||||||
|
@param config: marvin configuration file
|
||||||
|
@param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted
|
||||||
|
@param snapshotid: uuid of the snapshot
|
||||||
|
@return: True if snapshot is found, False otherwise
|
||||||
|
"""
|
||||||
|
|
||||||
|
qresultset = dbconn.execute(
|
||||||
|
"SELECT id FROM snapshots WHERE uuid = '%s';" \
|
||||||
|
% str(snapshotid)
|
||||||
|
)
|
||||||
|
if len(qresultset) == 0:
|
||||||
|
raise Exception(
|
||||||
|
"No snapshot found in cloudstack with id %s" % snapshotid)
|
||||||
|
|
||||||
|
|
||||||
|
snapshotid = qresultset[0][0]
|
||||||
|
qresultset = dbconn.execute(
|
||||||
|
"SELECT install_path, store_id FROM snapshot_store_ref WHERE snapshot_id='%s' AND store_role='Primary';" % snapshotid
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid
|
||||||
|
|
||||||
|
if len(qresultset) == 0:
|
||||||
|
#Snapshot does not exist
|
||||||
|
return False
|
||||||
|
|
||||||
|
from .base import StoragePool
|
||||||
|
#pass store_id to get the exact storage pool where snapshot is stored
|
||||||
|
primaryStores = StoragePool.list(apiclient, zoneid=zoneid, id=int(qresultset[0][1]))
|
||||||
|
|
||||||
|
assert isinstance(primaryStores, list), "Not a valid response for listStoragePools"
|
||||||
|
assert len(primaryStores) != 0, "No storage pools found in zone %s" % zoneid
|
||||||
|
|
||||||
|
primaryStore = primaryStores[0]
|
||||||
|
|
||||||
|
if str(primaryStore.provider).lower() != "powerflex":
|
||||||
|
raise Exception(
|
||||||
|
"is_snapshot_on_powerflex works only against powerflex storage pool. found %s" % str(primaryStore.provider))
|
||||||
|
|
||||||
|
snapshotPath = str(qresultset[0][0])
|
||||||
|
if not snapshotPath:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid):
|
def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid):
|
||||||
"""
|
"""
|
||||||
Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage
|
Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage
|
||||||
|
|
||||||
@param apiclient: api client connection
|
@param apiclient: api client connection
|
||||||
@param @dbconn: connection to the cloudstack db
|
@param dbconn: connection to the cloudstack db
|
||||||
@param config: marvin configuration file
|
@param config: marvin configuration file
|
||||||
@param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted
|
@param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted
|
||||||
@param snapshotid: uuid of the snapshot
|
@param snapshotid: uuid of the snapshot
|
||||||
|
|||||||
@ -33,17 +33,17 @@
|
|||||||
:is="tabs[0].component"
|
:is="tabs[0].component"
|
||||||
:resource="resource"
|
:resource="resource"
|
||||||
:loading="loading"
|
:loading="loading"
|
||||||
:tab="tabs[0].name" />
|
:tab="tabName(tabs[0])" />
|
||||||
</keep-alive>
|
</keep-alive>
|
||||||
<a-tabs
|
<a-tabs
|
||||||
v-else
|
v-else
|
||||||
style="width: 100%; margin-top: -12px"
|
style="width: 100%; margin-top: -12px"
|
||||||
:animated="false"
|
:animated="false"
|
||||||
:activeKey="activeTab || tabs[0].name"
|
:activeKey="activeTab || tabName(tabs[0])"
|
||||||
@change="onTabChange" >
|
@change="onTabChange" >
|
||||||
<template v-for="tab in tabs" :key="tab.name">
|
<template v-for="tab in tabs" :key="tabName(tab)">
|
||||||
<a-tab-pane
|
<a-tab-pane
|
||||||
:key="tab.name"
|
:key="tabName(tab)"
|
||||||
:tab="$t('label.' + tabName(tab))"
|
:tab="$t('label.' + tabName(tab))"
|
||||||
v-if="showTab(tab)">
|
v-if="showTab(tab)">
|
||||||
<keep-alive>
|
<keep-alive>
|
||||||
@ -183,12 +183,12 @@ export default {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if (!this.historyTab || !this.$route.meta.tabs || this.$route.meta.tabs.length === 0) {
|
if (!this.historyTab || !this.$route.meta.tabs || this.$route.meta.tabs.length === 0) {
|
||||||
this.activeTab = this.tabs[0].name
|
this.activeTab = this.tabName(this.tabs[0])
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
const tabIdx = this.$route.meta.tabs.findIndex(tab => tab.name === this.historyTab)
|
const tabIdx = this.$route.meta.tabs.findIndex(tab => this.tabName(tab) === this.historyTab)
|
||||||
if (tabIdx === -1) {
|
if (tabIdx === -1) {
|
||||||
this.activeTab = this.tabs[0].name
|
this.activeTab = this.tabName(this.tabs[0])
|
||||||
} else {
|
} else {
|
||||||
this.activeTab = this.historyTab
|
this.activeTab = this.historyTab
|
||||||
}
|
}
|
||||||
|
|||||||
@ -142,7 +142,7 @@ export default {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
show: (record) => { return record.state === 'Active' },
|
show: (record) => { return record.state === 'Active' },
|
||||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
|
||||||
}]
|
}]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -224,7 +224,7 @@ export default {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
show: (record) => { return record.state === 'Active' },
|
show: (record) => { return record.state === 'Active' },
|
||||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
|
||||||
}]
|
}]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -331,7 +331,7 @@ export default {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
show: (record) => { return record.state === 'Active' },
|
show: (record) => { return record.state === 'Active' },
|
||||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
|
||||||
}]
|
}]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@ -1986,9 +1986,8 @@ export default {
|
|||||||
},
|
},
|
||||||
onSearch (opts) {
|
onSearch (opts) {
|
||||||
const query = Object.assign({}, this.$route.query)
|
const query = Object.assign({}, this.$route.query)
|
||||||
for (const key in this.searchParams) {
|
const searchFilters = this.$route?.meta?.searchFilters || []
|
||||||
delete query[key]
|
searchFilters.forEach(key => delete query[key])
|
||||||
}
|
|
||||||
delete query.name
|
delete query.name
|
||||||
delete query.templatetype
|
delete query.templatetype
|
||||||
delete query.keyword
|
delete query.keyword
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user