mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Adding support for cross-cluster storage migration for managed storage when using XenServer
This commit is contained in:
parent
1d9735c346
commit
b508fb8692
@ -104,4 +104,6 @@ public interface StoragePool extends Identity, InternalIdentity {
|
|||||||
boolean isInMaintenance();
|
boolean isInMaintenance();
|
||||||
|
|
||||||
Hypervisor.HypervisorType getHypervisor();
|
Hypervisor.HypervisorType getHypervisor();
|
||||||
|
|
||||||
|
boolean isManaged();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,26 +21,25 @@ package com.cloud.agent.api;
|
|||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import com.cloud.agent.api.to.StorageFilerTO;
|
|
||||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||||
import com.cloud.agent.api.to.VolumeTO;
|
import com.cloud.agent.api.to.VolumeTO;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
public class MigrateWithStorageReceiveCommand extends Command {
|
public class MigrateWithStorageReceiveCommand extends Command {
|
||||||
VirtualMachineTO vm;
|
VirtualMachineTO vm;
|
||||||
List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler;
|
List<Pair<VolumeTO, String>> volumeToStorageUuid;
|
||||||
|
|
||||||
public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler) {
|
public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List<Pair<VolumeTO, String>> volumeToStorageUuid) {
|
||||||
this.vm = vm;
|
this.vm = vm;
|
||||||
this.volumeToFiler = volumeToFiler;
|
this.volumeToStorageUuid = volumeToStorageUuid;
|
||||||
}
|
}
|
||||||
|
|
||||||
public VirtualMachineTO getVirtualMachine() {
|
public VirtualMachineTO getVirtualMachine() {
|
||||||
return vm;
|
return vm;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Pair<VolumeTO, StorageFilerTO>> getVolumeToFiler() {
|
public List<Pair<VolumeTO, String>> getVolumeToStorageUuid() {
|
||||||
return volumeToFiler;
|
return volumeToStorageUuid;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -134,6 +134,9 @@ public class BackupSnapshotCommandTest {
|
|||||||
return 25;
|
return 25;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isManaged() { return false; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getPodId() {
|
public Long getPodId() {
|
||||||
return 0L;
|
return 0L;
|
||||||
|
|||||||
@ -173,6 +173,9 @@ public class CheckNetworkAnswerTest {
|
|||||||
return 25;
|
return 25;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isManaged() { return false; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getPodId() {
|
public Long getPodId() {
|
||||||
return 0L;
|
return 0L;
|
||||||
|
|||||||
@ -135,6 +135,9 @@ public class SnapshotCommandTest {
|
|||||||
return 25;
|
return 25;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isManaged() { return false; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getPodId() {
|
public Long getPodId() {
|
||||||
return 0L;
|
return 0L;
|
||||||
|
|||||||
@ -25,6 +25,13 @@ import com.cloud.host.Host;
|
|||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
||||||
|
String BASIC_CREATE = "basicCreate";
|
||||||
|
String BASIC_DELETE = "basicDelete";
|
||||||
|
String BASIC_DELETE_FAILURE = "basicDeleteFailure";
|
||||||
|
String BASIC_GRANT_ACCESS = "basicGrantAccess";
|
||||||
|
String BASIC_REVOKE_ACCESS = "basicRevokeAccess";
|
||||||
|
String BASIC_IQN = "basicIqn";
|
||||||
|
|
||||||
ChapInfo getChapInfo(DataObject dataObject);
|
ChapInfo getChapInfo(DataObject dataObject);
|
||||||
|
|
||||||
boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
|
boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
|
||||||
|
|||||||
@ -2045,62 +2045,74 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
|
|
||||||
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
|
private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
|
||||||
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
|
final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
|
||||||
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<Volume, StoragePool> ();
|
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
|
||||||
|
|
||||||
for (final VolumeVO volume : allVolumes) {
|
for (final VolumeVO volume : allVolumes) {
|
||||||
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
|
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
|
||||||
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
|
final StoragePoolVO destPool = _storagePoolDao.findById(poolId);
|
||||||
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
|
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
|
||||||
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
||||||
if (pool != null) {
|
|
||||||
|
if (destPool != null) {
|
||||||
// Check if pool is accessible from the destination host and disk offering with which the volume was
|
// Check if pool is accessible from the destination host and disk offering with which the volume was
|
||||||
// created is compliant with the pool type.
|
// created is compliant with the pool type.
|
||||||
if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) {
|
if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
|
||||||
// Cannot find a pool for the volume. Throw an exception.
|
// Cannot find a pool for the volume. Throw an exception.
|
||||||
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host +
|
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host +
|
||||||
". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " +
|
". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " +
|
||||||
"the given pool.");
|
"the given pool.");
|
||||||
} else if (pool.getId() == currentPool.getId()) {
|
} else if (destPool.getId() == currentPool.getId()) {
|
||||||
// If the pool to migrate too is the same as current pool, the volume doesn't need to be migrated.
|
// If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
|
||||||
} else {
|
} else {
|
||||||
volumeToPoolObjectMap.put(volume, pool);
|
volumeToPoolObjectMap.put(volume, destPool);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
|
if (currentPool.isManaged()) {
|
||||||
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
|
volumeToPoolObjectMap.put(volume, currentPool);
|
||||||
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
|
} else {
|
||||||
final ExcludeList avoid = new ExcludeList();
|
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
|
||||||
boolean currentPoolAvailable = false;
|
|
||||||
|
|
||||||
final List<StoragePool> poolList = new ArrayList<StoragePool>();
|
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
|
||||||
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
|
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
|
||||||
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
|
|
||||||
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
|
|
||||||
poolList.addAll(poolListFromAllocator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (poolList != null && !poolList.isEmpty()) {
|
final List<StoragePool> poolList = new ArrayList<>();
|
||||||
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
|
final ExcludeList avoid = new ExcludeList();
|
||||||
// volume to a pool only if it is required; that is the current pool on which the volume resides
|
|
||||||
// is not available on the destination host.
|
for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
|
||||||
final Iterator<StoragePool> iter = poolList.iterator();
|
final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
|
||||||
while (iter.hasNext()) {
|
|
||||||
if (currentPool.getId() == iter.next().getId()) {
|
if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
|
||||||
currentPoolAvailable = true;
|
poolList.addAll(poolListFromAllocator);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!currentPoolAvailable) {
|
boolean currentPoolAvailable = false;
|
||||||
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
|
|
||||||
|
if (poolList != null && !poolList.isEmpty()) {
|
||||||
|
// Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
|
||||||
|
// volume to a pool only if it is required; that is the current pool on which the volume resides
|
||||||
|
// is not available on the destination host.
|
||||||
|
|
||||||
|
final Iterator<StoragePool> iter = poolList.iterator();
|
||||||
|
|
||||||
|
while (iter.hasNext()) {
|
||||||
|
if (currentPool.getId() == iter.next().getId()) {
|
||||||
|
currentPoolAvailable = true;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!currentPoolAvailable) {
|
||||||
|
volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
|
||||||
if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
|
// Cannot find a pool for the volume. Throw an exception.
|
||||||
// Cannot find a pool for the volume. Throw an exception.
|
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " +
|
||||||
throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " +
|
profile.getVirtualMachine() + " to host " + host);
|
||||||
profile.getVirtualMachine() + " to host " + host);
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -231,6 +231,7 @@ public class StoragePoolVO implements StoragePool {
|
|||||||
this.managed = managed;
|
this.managed = managed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean isManaged() {
|
public boolean isManaged() {
|
||||||
return managed;
|
return managed;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,7 +31,6 @@ import com.cloud.agent.api.Answer;
|
|||||||
import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
|
import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
|
||||||
import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
|
import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
|
||||||
import com.cloud.agent.api.to.NicTO;
|
import com.cloud.agent.api.to.NicTO;
|
||||||
import com.cloud.agent.api.to.StorageFilerTO;
|
|
||||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||||
import com.cloud.agent.api.to.VolumeTO;
|
import com.cloud.agent.api.to.VolumeTO;
|
||||||
import com.cloud.hypervisor.xenserver.resource.XenServer610Resource;
|
import com.cloud.hypervisor.xenserver.resource.XenServer610Resource;
|
||||||
@ -56,7 +55,7 @@ public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends C
|
|||||||
public Answer execute(final MigrateWithStorageReceiveCommand command, final XenServer610Resource xenServer610Resource) {
|
public Answer execute(final MigrateWithStorageReceiveCommand command, final XenServer610Resource xenServer610Resource) {
|
||||||
final Connection connection = xenServer610Resource.getConnection();
|
final Connection connection = xenServer610Resource.getConnection();
|
||||||
final VirtualMachineTO vmSpec = command.getVirtualMachine();
|
final VirtualMachineTO vmSpec = command.getVirtualMachine();
|
||||||
final List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler = command.getVolumeToFiler();
|
final List<Pair<VolumeTO, String>> volumeToStorageUuid = command.getVolumeToStorageUuid();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// In a cluster management server setup, the migrate with storage receive and send
|
// In a cluster management server setup, the migrate with storage receive and send
|
||||||
@ -69,10 +68,12 @@ public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends C
|
|||||||
// storage send command execution.
|
// storage send command execution.
|
||||||
Gson gson = new Gson();
|
Gson gson = new Gson();
|
||||||
// Get a map of all the SRs to which the vdis will be migrated.
|
// Get a map of all the SRs to which the vdis will be migrated.
|
||||||
final List<Pair<VolumeTO, Object>> volumeToSr = new ArrayList<Pair<VolumeTO, Object>>();
|
final List<Pair<VolumeTO, Object>> volumeToSr = new ArrayList<>();
|
||||||
for (final Pair<VolumeTO, StorageFilerTO> entry : volumeToFiler) {
|
|
||||||
final StorageFilerTO storageFiler = entry.second();
|
for (final Pair<VolumeTO, String> entry : volumeToStorageUuid) {
|
||||||
final SR sr = xenServer610Resource.getStorageRepository(connection, storageFiler.getUuid());
|
final String storageUuid = entry.second();
|
||||||
|
final SR sr = xenServer610Resource.getStorageRepository(connection, storageUuid);
|
||||||
|
|
||||||
volumeToSr.add(new Pair<VolumeTO, Object>(entry.first(), sr));
|
volumeToSr.add(new Pair<VolumeTO, Object>(entry.first(), sr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -19,6 +19,8 @@
|
|||||||
|
|
||||||
package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
|
package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
@ -39,20 +41,35 @@ public final class CitrixCreateStoragePoolCommandWrapper extends CommandWrapper<
|
|||||||
public Answer execute(final CreateStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
|
public Answer execute(final CreateStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
|
||||||
final Connection conn = citrixResourceBase.getConnection();
|
final Connection conn = citrixResourceBase.getConnection();
|
||||||
final StorageFilerTO pool = command.getPool();
|
final StorageFilerTO pool = command.getPool();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (pool.getType() == StoragePoolType.NetworkFilesystem) {
|
if (command.getCreateDatastore()) {
|
||||||
citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString());
|
Map<String, String> details = command.getDetails();
|
||||||
} else if (pool.getType() == StoragePoolType.IscsiLUN) {
|
|
||||||
citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false);
|
String srNameLabel = details.get(CreateStoragePoolCommand.DATASTORE_NAME);
|
||||||
} else if (pool.getType() == StoragePoolType.PreSetup) {
|
String storageHost = details.get(CreateStoragePoolCommand.STORAGE_HOST);
|
||||||
} else {
|
String iqn = details.get(CreateStoragePoolCommand.IQN);
|
||||||
return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported.");
|
|
||||||
|
citrixResourceBase.getIscsiSR(conn, srNameLabel, storageHost, iqn, null, null, false);
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
if (pool.getType() == StoragePoolType.NetworkFilesystem) {
|
||||||
|
citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString());
|
||||||
|
} else if (pool.getType() == StoragePoolType.IscsiLUN) {
|
||||||
|
citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false);
|
||||||
|
} else if (pool.getType() == StoragePoolType.PreSetup) {
|
||||||
|
} else {
|
||||||
|
return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return new Answer(command, true, "success");
|
return new Answer(command, true, "success");
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
final String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:"
|
final String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:"
|
||||||
+ citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath();
|
+ citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath();
|
||||||
|
|
||||||
s_logger.warn(msg, e);
|
s_logger.warn(msg, e);
|
||||||
|
|
||||||
return new Answer(command, false, msg);
|
return new Answer(command, false, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,6 +19,8 @@
|
|||||||
|
|
||||||
package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
|
package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
@ -32,22 +34,40 @@ import com.xensource.xenapi.SR;
|
|||||||
|
|
||||||
@ResourceWrapper(handles = DeleteStoragePoolCommand.class)
|
@ResourceWrapper(handles = DeleteStoragePoolCommand.class)
|
||||||
public final class CitrixDeleteStoragePoolCommandWrapper extends CommandWrapper<DeleteStoragePoolCommand, Answer, CitrixResourceBase> {
|
public final class CitrixDeleteStoragePoolCommandWrapper extends CommandWrapper<DeleteStoragePoolCommand, Answer, CitrixResourceBase> {
|
||||||
|
|
||||||
private static final Logger s_logger = Logger.getLogger(CitrixDeleteStoragePoolCommandWrapper.class);
|
private static final Logger s_logger = Logger.getLogger(CitrixDeleteStoragePoolCommandWrapper.class);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Answer execute(final DeleteStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
|
public Answer execute(final DeleteStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
|
||||||
final Connection conn = citrixResourceBase.getConnection();
|
final Connection conn = citrixResourceBase.getConnection();
|
||||||
final StorageFilerTO poolTO = command.getPool();
|
final StorageFilerTO poolTO = command.getPool();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final SR sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid());
|
final SR sr;
|
||||||
|
|
||||||
|
// getRemoveDatastore being true indicates we are using managed storage and need to pull the SR name out of a Map
|
||||||
|
// instead of pulling it out using getUuid of the StorageFilerTO instance.
|
||||||
|
if (command.getRemoveDatastore()) {
|
||||||
|
Map<String, String> details = command.getDetails();
|
||||||
|
|
||||||
|
String srNameLabel = details.get(DeleteStoragePoolCommand.DATASTORE_NAME);
|
||||||
|
|
||||||
|
sr = citrixResourceBase.getStorageRepository(conn, srNameLabel);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid());
|
||||||
|
}
|
||||||
|
|
||||||
citrixResourceBase.removeSR(conn, sr);
|
citrixResourceBase.removeSR(conn, sr);
|
||||||
|
|
||||||
final Answer answer = new Answer(command, true, "success");
|
final Answer answer = new Answer(command, true, "success");
|
||||||
|
|
||||||
return answer;
|
return answer;
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + poolTO.getHost()
|
final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() +
|
||||||
+ poolTO.getPath();
|
" pool: " + poolTO.getHost() + poolTO.getPath();
|
||||||
s_logger.warn(msg, e);
|
|
||||||
|
s_logger.error(msg, e);
|
||||||
|
|
||||||
return new Answer(command, false, msg);
|
return new Answer(command, false, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,6 +19,7 @@
|
|||||||
package org.apache.cloudstack.storage.motion;
|
package org.apache.cloudstack.storage.motion;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -28,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||||
@ -39,6 +41,8 @@ import org.springframework.stereotype.Component;
|
|||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||||
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
||||||
import com.cloud.agent.api.MigrateWithStorageCommand;
|
import com.cloud.agent.api.MigrateWithStorageCommand;
|
||||||
import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
|
import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
|
||||||
@ -56,9 +60,12 @@ import com.cloud.host.Host;
|
|||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.VolumeDetailVO;
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.vm.VMInstanceVO;
|
import com.cloud.vm.VMInstanceVO;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
@ -74,6 +81,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
@Inject
|
@Inject
|
||||||
PrimaryDataStoreDao storagePoolDao;
|
PrimaryDataStoreDao storagePoolDao;
|
||||||
@Inject
|
@Inject
|
||||||
|
private VolumeDetailsDao volumeDetailsDao;
|
||||||
|
@Inject
|
||||||
VMInstanceDao instanceDao;
|
VMInstanceDao instanceDao;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -120,25 +129,175 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
callback.complete(result);
|
callback.complete(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getBasicIqn(long volumeId) {
|
||||||
|
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, PrimaryDataStoreDriver.BASIC_IQN);
|
||||||
|
|
||||||
|
return volumeDetail.getValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tell the underlying storage plug-in to create a new volume, put it in the VAG of the destination cluster, and
|
||||||
|
* send a command to the destination cluster to create an SR and to attach to the SR from all hosts in the cluster.
|
||||||
|
*/
|
||||||
|
private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePool storagePool, Host destHost) {
|
||||||
|
final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
|
||||||
|
|
||||||
|
VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_CREATE, Boolean.TRUE.toString(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
pdsd.createAsync(volumeInfo.getDataStore(), volumeInfo, null);
|
||||||
|
|
||||||
|
volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_GRANT_ACCESS, Boolean.TRUE.toString(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
pdsd.grantAccess(volumeInfo, destHost, volumeInfo.getDataStore());
|
||||||
|
|
||||||
|
final Map<String, String> details = new HashMap<>();
|
||||||
|
|
||||||
|
final String iqn = getBasicIqn(volumeInfo.getId());
|
||||||
|
|
||||||
|
details.put(CreateStoragePoolCommand.DATASTORE_NAME, iqn);
|
||||||
|
|
||||||
|
details.put(CreateStoragePoolCommand.IQN, iqn);
|
||||||
|
|
||||||
|
details.put(CreateStoragePoolCommand.STORAGE_HOST, storagePool.getHostAddress());
|
||||||
|
|
||||||
|
details.put(CreateStoragePoolCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
|
||||||
|
|
||||||
|
final CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, storagePool);
|
||||||
|
|
||||||
|
cmd.setDetails(details);
|
||||||
|
cmd.setCreateDatastore(true);
|
||||||
|
|
||||||
|
final Answer answer = agentMgr.easySend(destHost.getId(), cmd);
|
||||||
|
|
||||||
|
if (answer == null || !answer.getResult()) {
|
||||||
|
String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" +
|
||||||
|
(StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
|
||||||
|
|
||||||
|
s_logger.error(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return iqn;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHost, VolumeObjectTO volumeTO) {
|
||||||
|
final Map<String, String> details = new HashMap<>();
|
||||||
|
|
||||||
|
details.put(DeleteStoragePoolCommand.DATASTORE_NAME, volumeInfo.get_iScsiName());
|
||||||
|
|
||||||
|
final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand();
|
||||||
|
|
||||||
|
cmd.setDetails(details);
|
||||||
|
cmd.setRemoveDatastore(true);
|
||||||
|
|
||||||
|
final Answer answer = agentMgr.easySend(srcHost.getId(), cmd);
|
||||||
|
|
||||||
|
if (answer == null || !answer.getResult()) {
|
||||||
|
String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" +
|
||||||
|
(StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
|
||||||
|
|
||||||
|
s_logger.error(errMsg);
|
||||||
|
|
||||||
|
throw new CloudRuntimeException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
|
||||||
|
|
||||||
|
pdsd.revokeAccess(volumeInfo, srcHost, volumeInfo.getDataStore());
|
||||||
|
|
||||||
|
VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE, Boolean.TRUE.toString(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null);
|
||||||
|
|
||||||
|
VolumeVO volumeVO = volDao.findById(volumeInfo.getId());
|
||||||
|
|
||||||
|
volumeVO.setPath(volumeTO.getPath());
|
||||||
|
|
||||||
|
volDao.update(volumeVO.getId(), volumeVO);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void handleManagedVolumesAfterFailedMigration(Map<VolumeInfo, DataStore> volumeToPool, Host destHost) {
|
||||||
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
|
VolumeInfo volumeInfo = entry.getKey();
|
||||||
|
StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId());
|
||||||
|
|
||||||
|
if (storagePool.isManaged()) {
|
||||||
|
final Map<String, String> details = new HashMap<>();
|
||||||
|
|
||||||
|
details.put(DeleteStoragePoolCommand.DATASTORE_NAME, getBasicIqn(volumeInfo.getId()));
|
||||||
|
|
||||||
|
final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand();
|
||||||
|
|
||||||
|
cmd.setDetails(details);
|
||||||
|
cmd.setRemoveDatastore(true);
|
||||||
|
|
||||||
|
final Answer answer = agentMgr.easySend(destHost.getId(), cmd);
|
||||||
|
|
||||||
|
if (answer == null || !answer.getResult()) {
|
||||||
|
String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" +
|
||||||
|
(StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
|
||||||
|
|
||||||
|
s_logger.error(errMsg);
|
||||||
|
|
||||||
|
// no need to throw an exception here as the calling code is responsible for doing so
|
||||||
|
// regardless of the success or lack thereof concerning this method
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
|
||||||
|
|
||||||
|
VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_REVOKE_ACCESS, Boolean.TRUE.toString(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
pdsd.revokeAccess(volumeInfo, destHost, volumeInfo.getDataStore());
|
||||||
|
|
||||||
|
volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE_FAILURE, Boolean.TRUE.toString(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
|
private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
|
||||||
throws AgentUnavailableException {
|
throws AgentUnavailableException {
|
||||||
|
// Initiate migration of a virtual machine with its volumes.
|
||||||
|
|
||||||
// Initiate migration of a virtual machine with it's volumes.
|
|
||||||
try {
|
try {
|
||||||
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
|
List<Pair<VolumeTO, String>> volumeToStorageUuid = new ArrayList<>();
|
||||||
|
|
||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
VolumeInfo volume = entry.getKey();
|
VolumeInfo volumeInfo = entry.getKey();
|
||||||
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
|
StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId());
|
||||||
StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
|
VolumeTO volumeTo = new VolumeTO(volumeInfo, storagePool);
|
||||||
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
|
|
||||||
|
if (storagePool.isManaged()) {
|
||||||
|
String iqn = handleManagedVolumePreMigration(volumeInfo, storagePool, destHost);
|
||||||
|
|
||||||
|
volumeToStorageUuid.add(new Pair<>(volumeTo, iqn));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
volumeToStorageUuid.add(new Pair<>(volumeTo, ((StoragePool)entry.getValue()).getPath()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migration across cluster needs to be done in three phases.
|
// Migration across cluster needs to be done in three phases.
|
||||||
// 1. Send a migrate receive command to the destination host so that it is ready to receive a vm.
|
// 1. Send a migrate receive command to the destination host so that it is ready to receive a vm.
|
||||||
// 2. Send a migrate send command to the source host. This actually migrates the vm to the destination.
|
// 2. Send a migrate send command to the source host. This actually migrates the vm to the destination.
|
||||||
// 3. Complete the process. Update the volume details.
|
// 3. Complete the process. Update the volume details.
|
||||||
MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToFilerto);
|
|
||||||
|
MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToStorageUuid);
|
||||||
MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer)agentMgr.send(destHost.getId(), receiveCmd);
|
MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer)agentMgr.send(destHost.getId(), receiveCmd);
|
||||||
|
|
||||||
if (receiveAnswer == null) {
|
if (receiveAnswer == null) {
|
||||||
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
@ -150,16 +309,22 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
MigrateWithStorageSendCommand sendCmd =
|
MigrateWithStorageSendCommand sendCmd =
|
||||||
new MigrateWithStorageSendCommand(to, receiveAnswer.getVolumeToSr(), receiveAnswer.getNicToNetwork(), receiveAnswer.getToken());
|
new MigrateWithStorageSendCommand(to, receiveAnswer.getVolumeToSr(), receiveAnswer.getNicToNetwork(), receiveAnswer.getToken());
|
||||||
MigrateWithStorageSendAnswer sendAnswer = (MigrateWithStorageSendAnswer)agentMgr.send(srcHost.getId(), sendCmd);
|
MigrateWithStorageSendAnswer sendAnswer = (MigrateWithStorageSendAnswer)agentMgr.send(srcHost.getId(), sendCmd);
|
||||||
|
|
||||||
if (sendAnswer == null) {
|
if (sendAnswer == null) {
|
||||||
|
handleManagedVolumesAfterFailedMigration(volumeToPool, destHost);
|
||||||
|
|
||||||
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
} else if (!sendAnswer.getResult()) {
|
} else if (!sendAnswer.getResult()) {
|
||||||
|
handleManagedVolumesAfterFailedMigration(volumeToPool, destHost);
|
||||||
|
|
||||||
s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails());
|
s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails());
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
MigrateWithStorageCompleteCommand command = new MigrateWithStorageCompleteCommand(to);
|
MigrateWithStorageCompleteCommand command = new MigrateWithStorageCompleteCommand(to);
|
||||||
MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command);
|
MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command);
|
||||||
|
|
||||||
if (answer == null) {
|
if (answer == null) {
|
||||||
s_logger.error("Migration with storage of vm " + vm + " failed.");
|
s_logger.error("Migration with storage of vm " + vm + " failed.");
|
||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
@ -168,7 +333,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
|
||||||
} else {
|
} else {
|
||||||
// Update the volume details after migration.
|
// Update the volume details after migration.
|
||||||
updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
|
updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
return answer;
|
return answer;
|
||||||
@ -181,7 +346,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
|
private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
|
||||||
throws AgentUnavailableException {
|
throws AgentUnavailableException {
|
||||||
|
|
||||||
// Initiate migration of a virtual machine with it's volumes.
|
// Initiate migration of a virtual machine with its volumes.
|
||||||
try {
|
try {
|
||||||
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
|
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
|
||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
@ -201,7 +366,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails());
|
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails());
|
||||||
} else {
|
} else {
|
||||||
// Update the volume details after migration.
|
// Update the volume details after migration.
|
||||||
updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
|
updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
return answer;
|
return answer;
|
||||||
@ -211,28 +376,39 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeObjectTO> volumeTos) {
|
private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeObjectTO> volumeTos, Host srcHost) {
|
||||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||||
|
VolumeInfo volumeInfo = entry.getKey();
|
||||||
|
StoragePool storagePool = (StoragePool)entry.getValue();
|
||||||
|
|
||||||
boolean updated = false;
|
boolean updated = false;
|
||||||
VolumeInfo volume = entry.getKey();
|
|
||||||
StoragePool pool = (StoragePool)entry.getValue();
|
|
||||||
for (VolumeObjectTO volumeTo : volumeTos) {
|
for (VolumeObjectTO volumeTo : volumeTos) {
|
||||||
if (volume.getId() == volumeTo.getId()) {
|
if (volumeInfo.getId() == volumeTo.getId()) {
|
||||||
VolumeVO volumeVO = volDao.findById(volume.getId());
|
if (storagePool.isManaged()) {
|
||||||
Long oldPoolId = volumeVO.getPoolId();
|
handleManagedVolumePostMigration(volumeInfo, srcHost, volumeTo);
|
||||||
volumeVO.setPath(volumeTo.getPath());
|
}
|
||||||
volumeVO.setFolder(pool.getPath());
|
else {
|
||||||
volumeVO.setPodId(pool.getPodId());
|
VolumeVO volumeVO = volDao.findById(volumeInfo.getId());
|
||||||
volumeVO.setPoolId(pool.getId());
|
Long oldPoolId = volumeVO.getPoolId();
|
||||||
volumeVO.setLastPoolId(oldPoolId);
|
|
||||||
volDao.update(volume.getId(), volumeVO);
|
volumeVO.setPath(volumeTo.getPath());
|
||||||
|
volumeVO.setFolder(storagePool.getPath());
|
||||||
|
volumeVO.setPodId(storagePool.getPodId());
|
||||||
|
volumeVO.setPoolId(storagePool.getId());
|
||||||
|
volumeVO.setLastPoolId(oldPoolId);
|
||||||
|
|
||||||
|
volDao.update(volumeInfo.getId(), volumeVO);
|
||||||
|
}
|
||||||
|
|
||||||
updated = true;
|
updated = true;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!updated) {
|
if (!updated) {
|
||||||
s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated.");
|
s_logger.error("The volume path wasn't updated for volume '" + volumeInfo + "' after it was migrated.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -204,9 +204,9 @@ public class XenServer610WrapperTest {
|
|||||||
final StorageFilerTO storage1 = Mockito.mock(StorageFilerTO.class);
|
final StorageFilerTO storage1 = Mockito.mock(StorageFilerTO.class);
|
||||||
final StorageFilerTO storage2 = Mockito.mock(StorageFilerTO.class);
|
final StorageFilerTO storage2 = Mockito.mock(StorageFilerTO.class);
|
||||||
|
|
||||||
final List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
|
final List<Pair<VolumeTO, String>> volumeToFiler = new ArrayList<>();
|
||||||
volumeToFiler.add(new Pair<VolumeTO, StorageFilerTO>(vol1, storage1));
|
volumeToFiler.add(new Pair<>(vol1, storage1.getPath()));
|
||||||
volumeToFiler.add(new Pair<VolumeTO, StorageFilerTO>(vol2, storage2));
|
volumeToFiler.add(new Pair<>(vol2, storage2.getPath()));
|
||||||
|
|
||||||
final NicTO nicTO1 = Mockito.mock(NicTO.class);
|
final NicTO nicTO1 = Mockito.mock(NicTO.class);
|
||||||
final NicTO nicTO2 = Mockito.mock(NicTO.class);
|
final NicTO nicTO2 = Mockito.mock(NicTO.class);
|
||||||
|
|||||||
@ -94,6 +94,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
private static final long MIN_IOPS_FOR_SNAPSHOT_VOLUME = 100L;
|
private static final long MIN_IOPS_FOR_SNAPSHOT_VOLUME = 100L;
|
||||||
private static final long MAX_IOPS_FOR_SNAPSHOT_VOLUME = 20000L;
|
private static final long MAX_IOPS_FOR_SNAPSHOT_VOLUME = 20000L;
|
||||||
|
|
||||||
|
private static final String BASIC_SF_ID = "basicSfId";
|
||||||
|
|
||||||
@Inject private AccountDao accountDao;
|
@Inject private AccountDao accountDao;
|
||||||
@Inject private AccountDetailsDao accountDetailsDao;
|
@Inject private AccountDetailsDao accountDetailsDao;
|
||||||
@Inject private ClusterDao clusterDao;
|
@Inject private ClusterDao clusterDao;
|
||||||
@ -153,7 +155,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
|
Preconditions.checkArgument(host != null, "'host' should not be 'null'");
|
||||||
Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
|
Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
|
||||||
|
|
||||||
long sfVolumeId = getSolidFireVolumeId(dataObject);
|
long sfVolumeId = getSolidFireVolumeId(dataObject, true);
|
||||||
long clusterId = host.getClusterId();
|
long clusterId = host.getClusterId();
|
||||||
long storagePoolId = dataStore.getId();
|
long storagePoolId = dataStore.getId();
|
||||||
|
|
||||||
@ -215,7 +217,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
long sfVolumeId = getSolidFireVolumeId(dataObject);
|
long sfVolumeId = getSolidFireVolumeId(dataObject, false);
|
||||||
long clusterId = host.getClusterId();
|
long clusterId = host.getClusterId();
|
||||||
long storagePoolId = dataStore.getId();
|
long storagePoolId = dataStore.getId();
|
||||||
|
|
||||||
@ -252,9 +254,31 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getSolidFireVolumeId(DataObject dataObject) {
|
private long getSolidFireVolumeId(DataObject dataObject, boolean grantAccess) {
|
||||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||||
return Long.parseLong(((VolumeInfo)dataObject).getFolder());
|
final VolumeInfo volumeInfo = (VolumeInfo)dataObject;
|
||||||
|
final long volumeId = volumeInfo.getId();
|
||||||
|
|
||||||
|
if (grantAccess && isBasicGrantAccess(volumeId)) {
|
||||||
|
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_GRANT_ACCESS);
|
||||||
|
|
||||||
|
final Long sfVolumeId = getBasicSfVolumeId(volumeId);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic grant access).");
|
||||||
|
|
||||||
|
return sfVolumeId;
|
||||||
|
}
|
||||||
|
else if (!grantAccess && isBasicRevokeAccess(volumeId)) {
|
||||||
|
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_REVOKE_ACCESS);
|
||||||
|
|
||||||
|
final Long sfVolumeId = getBasicSfVolumeId(volumeId);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic revoke access).");
|
||||||
|
|
||||||
|
return sfVolumeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Long.parseLong(volumeInfo.getFolder());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dataObject.getType() == DataObjectType.SNAPSHOT) {
|
if (dataObject.getType() == DataObjectType.SNAPSHOT) {
|
||||||
@ -271,7 +295,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
return getVolumeIdFrom_iScsiPath(((TemplateInfo)dataObject).getInstallPath());
|
return getVolumeIdFrom_iScsiPath(((TemplateInfo)dataObject).getInstallPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject)");
|
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject, boolean)");
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getVolumeIdFrom_iScsiPath(String iScsiPath) {
|
private long getVolumeIdFrom_iScsiPath(String iScsiPath) {
|
||||||
@ -313,10 +337,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
|
|
||||||
private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) {
|
private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) {
|
||||||
long storagePoolId = dataObject.getDataStore().getId();
|
long storagePoolId = dataObject.getDataStore().getId();
|
||||||
Long minIops = null;
|
|
||||||
Long maxIops = null;
|
final Long minIops;
|
||||||
Long volumeSize = dataObject.getSize();
|
final Long maxIops;
|
||||||
String volumeName = null;
|
final Long volumeSize;
|
||||||
|
final String volumeName;
|
||||||
|
|
||||||
final Map<String, String> mapAttributes;
|
final Map<String, String> mapAttributes;
|
||||||
|
|
||||||
@ -647,6 +672,58 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
snapshotDetailsDao.remove(snapshotDetails.getId());
|
snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Long getBasicSfVolumeId(long volumeId) {
|
||||||
|
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_SF_ID);
|
||||||
|
|
||||||
|
if (volumeDetail != null && volumeDetail.getValue() != null) {
|
||||||
|
return new Long(volumeDetail.getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getBasicIqn(long volumeId) {
|
||||||
|
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_IQN);
|
||||||
|
|
||||||
|
if (volumeDetail != null && volumeDetail.getValue() != null) {
|
||||||
|
return volumeDetail.getValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If isBasicCreate returns true, this means the calling code simply wants us to create a SolidFire volume with specified
|
||||||
|
// characteristics. We do not update the cloud.volumes table with this info.
|
||||||
|
private boolean isBasicCreate(long volumeId) {
|
||||||
|
return getBooleanValueFromVolumeDetails(volumeId, BASIC_CREATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isBasicDelete(long volumeId) {
|
||||||
|
return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isBasicDeleteFailure(long volumeId) {
|
||||||
|
return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isBasicGrantAccess(long volumeId) {
|
||||||
|
return getBooleanValueFromVolumeDetails(volumeId, BASIC_GRANT_ACCESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isBasicRevokeAccess(long volumeId) {
|
||||||
|
return getBooleanValueFromVolumeDetails(volumeId, BASIC_REVOKE_ACCESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean getBooleanValueFromVolumeDetails(long volumeId, String name) {
|
||||||
|
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, name);
|
||||||
|
|
||||||
|
if (volumeDetail != null && volumeDetail.getValue() != null) {
|
||||||
|
return Boolean.parseBoolean(volumeDetail.getValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
private long getCsIdForCloning(long volumeId, String cloneOf) {
|
private long getCsIdForCloning(long volumeId, String cloneOf) {
|
||||||
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, cloneOf);
|
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, cloneOf);
|
||||||
|
|
||||||
@ -788,11 +865,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
LOGGER.error(errMsg);
|
LOGGER.error(errMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
CommandResult result = new CommandResult();
|
if (callback != null) {
|
||||||
|
CommandResult result = new CommandResult();
|
||||||
|
|
||||||
result.setResult(errMsg);
|
result.setResult(errMsg);
|
||||||
|
|
||||||
callback.complete(result);
|
callback.complete(result);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -950,19 +1029,43 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
snapshotDetailsDao.persist(snapshotDetail);
|
snapshotDetailsDao.persist(snapshotDetail);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void addBasicCreateInfoToVolumeDetails(long volumeId, SolidFireUtil.SolidFireVolume sfVolume) {
|
||||||
|
VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_SF_ID, String.valueOf(sfVolume.getId()), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
|
||||||
|
volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_IQN, sfVolume.getIqn(), false);
|
||||||
|
|
||||||
|
volumeDetailsDao.persist(volumeDetailVo);
|
||||||
|
}
|
||||||
|
|
||||||
private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
|
private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
|
||||||
verifySufficientBytesForStoragePool(volumeInfo, storagePoolId);
|
boolean isBasicCreate = isBasicCreate(volumeInfo.getId());
|
||||||
verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId);
|
|
||||||
|
if (!isBasicCreate) {
|
||||||
|
verifySufficientBytesForStoragePool(volumeInfo, storagePoolId);
|
||||||
|
verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId);
|
||||||
|
}
|
||||||
|
|
||||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
||||||
|
|
||||||
long sfAccountId = getCreateSolidFireAccountId(sfConnection, volumeInfo.getAccountId(), storagePoolId);
|
long sfAccountId = getCreateSolidFireAccountId(sfConnection, volumeInfo.getAccountId(), storagePoolId);
|
||||||
|
|
||||||
|
SolidFireUtil.SolidFireVolume sfVolume;
|
||||||
|
|
||||||
|
if (isBasicCreate) {
|
||||||
|
sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId);
|
||||||
|
|
||||||
|
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_CREATE);
|
||||||
|
|
||||||
|
addBasicCreateInfoToVolumeDetails(volumeInfo.getId(), sfVolume);
|
||||||
|
|
||||||
|
return sfVolume.getIqn();
|
||||||
|
}
|
||||||
|
|
||||||
long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
|
long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
|
||||||
long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
|
long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
|
||||||
|
|
||||||
SolidFireUtil.SolidFireVolume sfVolume;
|
|
||||||
|
|
||||||
if (csSnapshotId > 0) {
|
if (csSnapshotId > 0) {
|
||||||
// We are supposed to create a clone of the underlying volume or snapshot that supports the CloudStack snapshot.
|
// We are supposed to create a clone of the underlying volume or snapshot that supports the CloudStack snapshot.
|
||||||
sfVolume = createClone(sfConnection, csSnapshotId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.SNAPSHOT);
|
sfVolume = createClone(sfConnection, csSnapshotId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.SNAPSHOT);
|
||||||
@ -1083,23 +1186,66 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
return iqn;
|
return iqn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void performBasicDelete(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) {
|
||||||
|
Long sfVolumeId = getBasicSfVolumeId(volumeId);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'.");
|
||||||
|
|
||||||
|
String iqn = getBasicIqn(volumeId);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(iqn, "'iqn' should not be 'null'.");
|
||||||
|
|
||||||
|
VolumeVO volumeVO = volumeDao.findById(volumeId);
|
||||||
|
|
||||||
|
SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volumeVO.getFolder()));
|
||||||
|
|
||||||
|
volumeVO.setFolder(String.valueOf(sfVolumeId));
|
||||||
|
volumeVO.set_iScsiName(iqn);
|
||||||
|
|
||||||
|
volumeDao.update(volumeId, volumeVO);
|
||||||
|
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID);
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_IQN);
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void performBasicDeleteFailure(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) {
|
||||||
|
Long sfVolumeId = getBasicSfVolumeId(volumeId);
|
||||||
|
|
||||||
|
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'.");
|
||||||
|
|
||||||
|
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
|
||||||
|
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID);
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_IQN);
|
||||||
|
volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
|
private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
|
||||||
try {
|
try {
|
||||||
long volumeId = volumeInfo.getId();
|
long volumeId = volumeInfo.getId();
|
||||||
|
|
||||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
||||||
|
|
||||||
deleteSolidFireVolume(sfConnection, volumeInfo);
|
if (isBasicDelete(volumeId)) {
|
||||||
|
performBasicDelete(sfConnection, volumeId);
|
||||||
|
}
|
||||||
|
else if (isBasicDeleteFailure(volumeId)) {
|
||||||
|
performBasicDeleteFailure(sfConnection, volumeId);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
deleteSolidFireVolume(sfConnection, volumeInfo);
|
||||||
|
|
||||||
volumeDetailsDao.removeDetails(volumeId);
|
volumeDetailsDao.removeDetails(volumeId);
|
||||||
|
|
||||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||||
|
|
||||||
long usedBytes = getUsedBytes(storagePool, volumeId);
|
long usedBytes = getUsedBytes(storagePool, volumeId);
|
||||||
|
|
||||||
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
|
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
|
||||||
|
|
||||||
storagePoolDao.update(storagePoolId, storagePool);
|
storagePoolDao.update(storagePoolId, storagePool);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (Exception ex) {
|
catch (Exception ex) {
|
||||||
LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex);
|
LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex);
|
||||||
|
|||||||
@ -1205,12 +1205,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
|
srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
|
||||||
allHosts = allHostsPair.first();
|
allHosts = allHostsPair.first();
|
||||||
allHosts.remove(srcHost);
|
allHosts.remove(srcHost);
|
||||||
|
|
||||||
for (final VolumeVO volume : volumes) {
|
for (final VolumeVO volume : volumes) {
|
||||||
final Long volClusterId = _poolDao.findById(volume.getPoolId()).getClusterId();
|
final StoragePool storagePool = _poolDao.findById(volume.getPoolId());
|
||||||
// only check for volume which are not in zone wide primary store, as only those may require storage motion
|
final Long volClusterId = storagePool.getClusterId();
|
||||||
if (volClusterId != null) {
|
|
||||||
for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext();) {
|
for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext();) {
|
||||||
final Host host = iterator.next();
|
final Host host = iterator.next();
|
||||||
|
|
||||||
|
if (volClusterId != null) {
|
||||||
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
|
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
|
||||||
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
|
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
|
||||||
requiresStorageMotion.put(host, true);
|
requiresStorageMotion.put(host, true);
|
||||||
@ -1219,8 +1222,16 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
if (storagePool.isManaged()) {
|
||||||
|
if (srcHost.getClusterId() != host.getClusterId()) {
|
||||||
|
requiresStorageMotion.put(host, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
|
plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
|
||||||
} else {
|
} else {
|
||||||
final Long cluster = srcHost.getClusterId();
|
final Long cluster = srcHost.getClusterId();
|
||||||
@ -1249,7 +1260,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (final HostAllocator allocator : hostAllocators) {
|
for (final HostAllocator allocator : hostAllocators) {
|
||||||
if (canMigrateWithStorage) {
|
if (canMigrateWithStorage) {
|
||||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
|
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
|
||||||
} else {
|
} else {
|
||||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
|
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
|
||||||
|
|||||||
@ -21,6 +21,8 @@ import SignedAPICall
|
|||||||
import time
|
import time
|
||||||
import XenAPI
|
import XenAPI
|
||||||
|
|
||||||
|
from util import sf_util
|
||||||
|
|
||||||
# All tests inherit from cloudstackTestCase
|
# All tests inherit from cloudstackTestCase
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
@ -37,6 +39,15 @@ from marvin.lib.utils import cleanup_resources
|
|||||||
|
|
||||||
from solidfire import solidfire_element_api as sf_api
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
|
# Prerequisites:
|
||||||
|
# Only one zone
|
||||||
|
# Only one pod
|
||||||
|
# Only one cluster (two hosts with another added/removed during the tests)
|
||||||
|
#
|
||||||
|
# Running the tests:
|
||||||
|
# Set a breakpoint on each test after the first one. When the breakpoint is hit, reset the third
|
||||||
|
# host to a snapshot state and re-start it. Once it's up and running, run the test code.
|
||||||
|
|
||||||
|
|
||||||
class TestData:
|
class TestData:
|
||||||
account = "account"
|
account = "account"
|
||||||
@ -238,7 +249,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
try:
|
try:
|
||||||
cleanup_resources(cls.apiClient, cls._cleanup)
|
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||||
|
|
||||||
cls._purge_solidfire_volumes()
|
sf_util.purge_solidfire_volumes(cls.sf_client)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
@ -286,7 +297,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volume = self._get_root_volume(self.virtual_machine)
|
root_volume = self._get_root_volume(self.virtual_machine)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(root_volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
|
||||||
|
|
||||||
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
|
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
|
||||||
|
|
||||||
@ -342,7 +353,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volume = self._get_root_volume(self.virtual_machine)
|
root_volume = self._get_root_volume(self.virtual_machine)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(root_volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
|
||||||
|
|
||||||
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
||||||
|
|
||||||
@ -596,19 +607,6 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
|
|
||||||
self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id))
|
self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id))
|
||||||
|
|
||||||
def _get_iqn(self, volume):
|
|
||||||
# Get volume IQN
|
|
||||||
sf_iscsi_name_request = {'volumeid': volume.id}
|
|
||||||
# put this commented line back once PR 1403 is in
|
|
||||||
# sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
|
|
||||||
sf_iscsi_name_result = self.cs_api.getSolidFireVolumeIscsiName(sf_iscsi_name_request)
|
|
||||||
# sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
|
||||||
sf_iscsi_name = sf_iscsi_name_result['apisolidfirevolumeiscsiname']['solidFireVolumeIscsiName']
|
|
||||||
|
|
||||||
self._check_iscsi_name(sf_iscsi_name)
|
|
||||||
|
|
||||||
return sf_iscsi_name
|
|
||||||
|
|
||||||
def _get_iqn_2(self, primary_storage):
|
def _get_iqn_2(self, primary_storage):
|
||||||
sql_query = "Select path From storage_pool Where uuid = '" + str(primary_storage.id) + "'"
|
sql_query = "Select path From storage_pool Where uuid = '" + str(primary_storage.id) + "'"
|
||||||
|
|
||||||
@ -617,13 +615,6 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
|
|
||||||
return sql_result[0][0]
|
return sql_result[0][0]
|
||||||
|
|
||||||
def _check_iscsi_name(self, sf_iscsi_name):
|
|
||||||
self.assertEqual(
|
|
||||||
sf_iscsi_name[0],
|
|
||||||
"/",
|
|
||||||
"The iSCSI name needs to start with a forward slash."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_host_iscsi_iqns(self):
|
def _get_host_iscsi_iqns(self):
|
||||||
hosts = self.xen_session.xenapi.host.get_all()
|
hosts = self.xen_session.xenapi.host.get_all()
|
||||||
|
|
||||||
@ -687,24 +678,3 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||||||
for host_iscsi_iqn in host_iscsi_iqns:
|
for host_iscsi_iqn in host_iscsi_iqns:
|
||||||
# an error should occur if host_iscsi_iqn is not in sf_vag_initiators
|
# an error should occur if host_iscsi_iqn is not in sf_vag_initiators
|
||||||
sf_vag_initiators.index(host_iscsi_iqn)
|
sf_vag_initiators.index(host_iscsi_iqn)
|
||||||
|
|
||||||
def _check_list(self, in_list, expected_size_of_list, err_msg):
|
|
||||||
self.assertEqual(
|
|
||||||
isinstance(in_list, list),
|
|
||||||
True,
|
|
||||||
"'in_list' is not a list."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
len(in_list),
|
|
||||||
expected_size_of_list,
|
|
||||||
err_msg
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _purge_solidfire_volumes(cls):
|
|
||||||
deleted_volumes = cls.sf_client.list_deleted_volumes()
|
|
||||||
|
|
||||||
for deleted_volume in deleted_volumes:
|
|
||||||
cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
697
test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
Normal file
697
test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
Normal file
@ -0,0 +1,697 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import SignedAPICall
|
||||||
|
import XenAPI
|
||||||
|
|
||||||
|
from util import sf_util
|
||||||
|
|
||||||
|
# All tests inherit from cloudstackTestCase
|
||||||
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
|
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||||
|
from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume
|
||||||
|
|
||||||
|
# common - commonly used methods for all tests are listed here
|
||||||
|
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes
|
||||||
|
|
||||||
|
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||||
|
from marvin.lib.utils import cleanup_resources
|
||||||
|
|
||||||
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
|
# Prerequisites:
|
||||||
|
# Only one zone
|
||||||
|
# Only one pod
|
||||||
|
# Two clusters
|
||||||
|
|
||||||
|
|
||||||
|
class TestData():
|
||||||
|
account = "account"
|
||||||
|
capacityBytes = "capacitybytes"
|
||||||
|
capacityIops = "capacityiops"
|
||||||
|
clusterId1 = "clusterId1"
|
||||||
|
clusterId2 = "clusterId2"
|
||||||
|
computeOffering1 = "computeoffering1"
|
||||||
|
computeOffering2 = "computeoffering2"
|
||||||
|
computeOffering3 = "computeoffering3"
|
||||||
|
diskName = "diskname"
|
||||||
|
diskOffering1 = "diskoffering1"
|
||||||
|
diskOffering2 = "diskoffering2"
|
||||||
|
domainId = "domainid"
|
||||||
|
hypervisor = "hypervisor"
|
||||||
|
login = "login"
|
||||||
|
mvip = "mvip"
|
||||||
|
name = "name"
|
||||||
|
password = "password"
|
||||||
|
podId = "podid"
|
||||||
|
port = "port"
|
||||||
|
primaryStorage = "primarystorage"
|
||||||
|
primaryStorage2 = "primarystorage2"
|
||||||
|
provider = "provider"
|
||||||
|
scope = "scope"
|
||||||
|
solidFire = "solidfire"
|
||||||
|
storageTag = "SolidFire_SAN_1"
|
||||||
|
storageTag2 = "SolidFire_Volume_1"
|
||||||
|
tags = "tags"
|
||||||
|
templateCacheName = "centos56-x86-64-xen"
|
||||||
|
templateName = "templatename"
|
||||||
|
testAccount = "testaccount"
|
||||||
|
url = "url"
|
||||||
|
user = "user"
|
||||||
|
username = "username"
|
||||||
|
virtualMachine = "virtualmachine"
|
||||||
|
virtualMachine2 = "virtualmachine2"
|
||||||
|
volume_1 = "volume_1"
|
||||||
|
xenServer = "xenserver"
|
||||||
|
zoneId = "zoneid"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.testdata = {
|
||||||
|
TestData.solidFire: {
|
||||||
|
TestData.mvip: "192.168.139.112",
|
||||||
|
TestData.login: "admin",
|
||||||
|
TestData.password: "admin",
|
||||||
|
TestData.port: 443,
|
||||||
|
TestData.url: "https://192.168.139.112:443"
|
||||||
|
},
|
||||||
|
TestData.xenServer: {
|
||||||
|
TestData.username: "root",
|
||||||
|
TestData.password: "solidfire"
|
||||||
|
},
|
||||||
|
TestData.account: {
|
||||||
|
"email": "test@test.com",
|
||||||
|
"firstname": "John",
|
||||||
|
"lastname": "Doe",
|
||||||
|
"username": "test",
|
||||||
|
"password": "test"
|
||||||
|
},
|
||||||
|
TestData.testAccount: {
|
||||||
|
"email": "test2@test2.com",
|
||||||
|
"firstname": "Jane",
|
||||||
|
"lastname": "Doe",
|
||||||
|
"username": "test2",
|
||||||
|
"password": "test"
|
||||||
|
},
|
||||||
|
TestData.user: {
|
||||||
|
"email": "user@test.com",
|
||||||
|
"firstname": "Jane",
|
||||||
|
"lastname": "Doe",
|
||||||
|
"username": "testuser",
|
||||||
|
"password": "password"
|
||||||
|
},
|
||||||
|
TestData.primaryStorage: {
|
||||||
|
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||||
|
TestData.scope: "ZONE",
|
||||||
|
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||||
|
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||||
|
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||||
|
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||||
|
TestData.provider: "SolidFire",
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
TestData.capacityIops: 4500000,
|
||||||
|
TestData.capacityBytes: 2251799813685248,
|
||||||
|
TestData.hypervisor: "Any",
|
||||||
|
TestData.zoneId: 1
|
||||||
|
},
|
||||||
|
TestData.primaryStorage2: {
|
||||||
|
TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
|
||||||
|
TestData.scope: "CLUSTER",
|
||||||
|
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||||
|
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||||
|
"minIops=5000;maxIops=50000;burstIops=75000",
|
||||||
|
TestData.provider: "SolidFireShared",
|
||||||
|
TestData.tags: TestData.storageTag2,
|
||||||
|
TestData.capacityIops: 5000,
|
||||||
|
TestData.capacityBytes: 1099511627776,
|
||||||
|
TestData.hypervisor: "XenServer",
|
||||||
|
TestData.podId: 1,
|
||||||
|
TestData.zoneId: 1
|
||||||
|
},
|
||||||
|
TestData.virtualMachine: {
|
||||||
|
"name": "TestVM",
|
||||||
|
"displayname": "Test VM"
|
||||||
|
},
|
||||||
|
TestData.computeOffering1: {
|
||||||
|
"name": "SF_CO_1",
|
||||||
|
"displaytext": "SF_CO_1 (Min IOPS = 1,000; Max IOPS = 2,000)",
|
||||||
|
"cpunumber": 1,
|
||||||
|
"cpuspeed": 100,
|
||||||
|
"memory": 128,
|
||||||
|
"storagetype": "shared",
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 1000,
|
||||||
|
"maxiops": 2000,
|
||||||
|
"hypervisorsnapshotreserve": 125,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
},
|
||||||
|
TestData.computeOffering2: {
|
||||||
|
"name": "SF_CO_2",
|
||||||
|
"displaytext": "SF_CO_2 (Min IOPS = 1,000; Max IOPS = 2,000)",
|
||||||
|
"cpunumber": 1,
|
||||||
|
"cpuspeed": 100,
|
||||||
|
"memory": 128,
|
||||||
|
"storagetype": "shared",
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 1000,
|
||||||
|
"maxiops": 2000,
|
||||||
|
"hypervisorsnapshotreserve": 100,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
},
|
||||||
|
TestData.computeOffering3: {
|
||||||
|
"name": "SF_CO_3",
|
||||||
|
"displaytext": "SF_CO_3 Desc",
|
||||||
|
"cpunumber": 1,
|
||||||
|
"cpuspeed": 100,
|
||||||
|
"memory": 128,
|
||||||
|
"storagetype": "shared",
|
||||||
|
TestData.tags: TestData.storageTag2,
|
||||||
|
},
|
||||||
|
TestData.diskOffering1: {
|
||||||
|
"name": "SF_DO_1",
|
||||||
|
"displaytext": "SF_DO_1 (Min IOPS = 3,000; Max IOPS = 6,000)",
|
||||||
|
"disksize": 100,
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 3000,
|
||||||
|
"maxiops": 6000,
|
||||||
|
"hypervisorsnapshotreserve": 125,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
TestData.diskOffering2: {
|
||||||
|
"name": "SF_DO_2",
|
||||||
|
"displaytext": "SF_DO_2 (Min IOPS = 3,000; Max IOPS = 6,000)",
|
||||||
|
"disksize": 100,
|
||||||
|
"customizediops": False,
|
||||||
|
"miniops": 3000,
|
||||||
|
"maxiops": 6000,
|
||||||
|
"hypervisorsnapshotreserve": 100,
|
||||||
|
TestData.tags: TestData.storageTag,
|
||||||
|
"storagetype": "shared"
|
||||||
|
},
|
||||||
|
TestData.volume_1: {
|
||||||
|
TestData.diskName: "test-volume",
|
||||||
|
},
|
||||||
|
TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)",
|
||||||
|
TestData.zoneId: 1,
|
||||||
|
TestData.clusterId1: 1,
|
||||||
|
TestData.clusterId2: 2,
|
||||||
|
TestData.domainId: 1,
|
||||||
|
TestData.url: "192.168.129.50"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestVMMigrationWithStorage(cloudstackTestCase):
|
||||||
|
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
# Set up API client
|
||||||
|
testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()
|
||||||
|
cls.apiClient = testclient.getApiClient()
|
||||||
|
cls.dbConnection = testclient.getDbConnection()
|
||||||
|
|
||||||
|
cls.testdata = TestData().testdata
|
||||||
|
|
||||||
|
xenserver = cls.testdata[TestData.xenServer]
|
||||||
|
|
||||||
|
# Set up xenAPI connection
|
||||||
|
host_ip = "https://" + \
|
||||||
|
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress
|
||||||
|
|
||||||
|
# Set up XenAPI connection
|
||||||
|
cls.xen_session_1 = XenAPI.Session(host_ip)
|
||||||
|
|
||||||
|
cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
# Set up xenAPI connection
|
||||||
|
host_ip = "https://" + \
|
||||||
|
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress
|
||||||
|
|
||||||
|
# Set up XenAPI connection
|
||||||
|
cls.xen_session_2 = XenAPI.Session(host_ip)
|
||||||
|
|
||||||
|
cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
|
||||||
|
|
||||||
|
# Set up SolidFire connection
|
||||||
|
cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])
|
||||||
|
|
||||||
|
# Get Resources from Cloud Infrastructure
|
||||||
|
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||||
|
cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0]
|
||||||
|
cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0]
|
||||||
|
cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
|
||||||
|
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||||
|
|
||||||
|
# Create test account
|
||||||
|
cls.account = Account.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata["account"],
|
||||||
|
admin=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up connection to make customized API calls
|
||||||
|
cls.user = User.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata["user"],
|
||||||
|
account=cls.account.name,
|
||||||
|
domainid=cls.domain.id
|
||||||
|
)
|
||||||
|
|
||||||
|
url = cls.testdata[TestData.url]
|
||||||
|
|
||||||
|
api_url = "http://" + url + ":8080/client/api"
|
||||||
|
userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
|
||||||
|
|
||||||
|
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
|
||||||
|
|
||||||
|
primarystorage = cls.testdata[TestData.primaryStorage]
|
||||||
|
|
||||||
|
cls.primary_storage = StoragePool.create(
|
||||||
|
cls.apiClient,
|
||||||
|
primarystorage
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.compute_offering_1 = ServiceOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.computeOffering1]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.compute_offering_2 = ServiceOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.computeOffering2]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.compute_offering_3 = ServiceOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.computeOffering3]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.disk_offering_1 = DiskOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.diskOffering1]
|
||||||
|
)
|
||||||
|
|
||||||
|
cls.disk_offering_2 = DiskOffering.create(
|
||||||
|
cls.apiClient,
|
||||||
|
cls.testdata[TestData.diskOffering2]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Resources that are to be destroyed
|
||||||
|
cls._cleanup = [
|
||||||
|
cls.compute_offering_1,
|
||||||
|
cls.compute_offering_2,
|
||||||
|
cls.compute_offering_3,
|
||||||
|
cls.disk_offering_1,
|
||||||
|
cls.disk_offering_2,
|
||||||
|
cls.user,
|
||||||
|
cls.account
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
try:
|
||||||
|
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||||
|
|
||||||
|
cls.primary_storage.delete(cls.apiClient)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.cleanup = []
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
try:
|
||||||
|
cleanup_resources(self.apiClient, self.cleanup)
|
||||||
|
|
||||||
|
sf_util.purge_solidfire_volumes(self.sf_client)
|
||||||
|
except Exception as e:
|
||||||
|
logging.debug("Exception in tearDownClass(self): %s" % e)
|
||||||
|
|
||||||
|
def test_01_storage_migrate_root_and_data_disks(self):
|
||||||
|
src_host, dest_host = self._get_source_and_dest_hosts()
|
||||||
|
|
||||||
|
virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=self.compute_offering_1.id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
hostid=src_host.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(virtual_machine)
|
||||||
|
|
||||||
|
cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
|
||||||
|
|
||||||
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
|
||||||
|
TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
|
||||||
|
|
||||||
|
cs_data_volume = Volume.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.volume_1],
|
||||||
|
account=self.account.name,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
diskofferingid=self.disk_offering_1.id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(cs_data_volume)
|
||||||
|
|
||||||
|
cs_data_volume = virtual_machine.attach_volume(
|
||||||
|
self.apiClient,
|
||||||
|
cs_data_volume
|
||||||
|
)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
|
||||||
|
|
||||||
|
sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
|
||||||
|
sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
|
||||||
|
|
||||||
|
src_host, dest_host = dest_host, src_host
|
||||||
|
|
||||||
|
self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume,
|
||||||
|
self.xen_session_2, self.xen_session_1)
|
||||||
|
|
||||||
|
def test_02_storage_migrate_root_and_data_disks(self):
|
||||||
|
primarystorage2 = self.testdata[TestData.primaryStorage2]
|
||||||
|
|
||||||
|
primary_storage_2 = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage2,
|
||||||
|
clusterid=self.cluster_1.id
|
||||||
|
)
|
||||||
|
|
||||||
|
primary_storage_3 = StoragePool.create(
|
||||||
|
self.apiClient,
|
||||||
|
primarystorage2,
|
||||||
|
clusterid=self.cluster_2.id
|
||||||
|
)
|
||||||
|
|
||||||
|
src_host, dest_host = self._get_source_and_dest_hosts()
|
||||||
|
|
||||||
|
virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=self.compute_offering_3.id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
hostid=src_host.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
cs_data_volume = Volume.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.volume_1],
|
||||||
|
account=self.account.name,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
diskofferingid=self.disk_offering_1.id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup = [
|
||||||
|
virtual_machine,
|
||||||
|
cs_data_volume,
|
||||||
|
primary_storage_2,
|
||||||
|
primary_storage_3
|
||||||
|
]
|
||||||
|
|
||||||
|
cs_data_volume = virtual_machine.attach_volume(
|
||||||
|
self.apiClient,
|
||||||
|
cs_data_volume
|
||||||
|
)
|
||||||
|
|
||||||
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
|
||||||
|
TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
|
||||||
|
|
||||||
|
sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id,
|
||||||
|
sf_data_volume, self.xen_session_1, self.xen_session_2)
|
||||||
|
|
||||||
|
src_host, dest_host = dest_host, src_host
|
||||||
|
|
||||||
|
self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume,
|
||||||
|
self.xen_session_2, self.xen_session_1)
|
||||||
|
|
||||||
|
# The hypervisor snapshot reserve isn't large enough for either the compute or disk offering.
|
||||||
|
def test_03_storage_migrate_root_and_data_disks_fail(self):
|
||||||
|
self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_2.id)
|
||||||
|
|
||||||
|
# The hypervisor snapshot reserve isn't large enough for the compute offering.
|
||||||
|
def test_04_storage_migrate_root_disk_fails(self):
|
||||||
|
self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_1.id)
|
||||||
|
|
||||||
|
# The hypervisor snapshot reserve isn't large enough for the disk offering.
|
||||||
|
def test_05_storage_migrate_data_disk_fails(self):
|
||||||
|
self._execute_migration_failure(self.compute_offering_1.id, self.disk_offering_2.id)
|
||||||
|
|
||||||
|
def _execute_migration_failure(self, compute_offering_id, disk_offering_id):
|
||||||
|
src_host, dest_host = self._get_source_and_dest_hosts()
|
||||||
|
|
||||||
|
virtual_machine = VirtualMachine.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.virtualMachine],
|
||||||
|
accountid=self.account.name,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
serviceofferingid=compute_offering_id,
|
||||||
|
templateid=self.template.id,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
hostid=src_host.id,
|
||||||
|
startvm=True
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(virtual_machine)
|
||||||
|
|
||||||
|
cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
|
||||||
|
|
||||||
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
|
||||||
|
TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
|
||||||
|
|
||||||
|
cs_data_volume = Volume.create(
|
||||||
|
self.apiClient,
|
||||||
|
self.testdata[TestData.volume_1],
|
||||||
|
account=self.account.name,
|
||||||
|
domainid=self.domain.id,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
diskofferingid=disk_offering_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cleanup.append(cs_data_volume)
|
||||||
|
|
||||||
|
cs_data_volume = virtual_machine.attach_volume(
|
||||||
|
self.apiClient,
|
||||||
|
cs_data_volume
|
||||||
|
)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
|
||||||
|
|
||||||
|
self._fail_migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
|
||||||
|
sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
|
||||||
|
|
||||||
|
def _get_source_and_dest_hosts(self):
|
||||||
|
hosts = list_hosts(self.apiClient)
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
if host.name == "XenServer-6.5-1":
|
||||||
|
src_host = host
|
||||||
|
elif host.name == "XenServer-6.5-3":
|
||||||
|
dest_host = host
|
||||||
|
|
||||||
|
self.assertIsNotNone(src_host, "Could not locate the source host")
|
||||||
|
|
||||||
|
self.assertIsNotNone(dest_host, "Could not locate the destination host")
|
||||||
|
|
||||||
|
return src_host, dest_host
|
||||||
|
|
||||||
|
def _migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
|
||||||
|
src_xen_session, dest_xen_session):
|
||||||
|
self._verifyFields(cs_root_volume, src_sf_root_volume)
|
||||||
|
self._verifyFields(cs_data_volume, src_sf_data_volume)
|
||||||
|
|
||||||
|
virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
|
||||||
|
|
||||||
|
cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id)
|
||||||
|
cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
|
||||||
|
dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
|
||||||
|
|
||||||
|
self._verifyFields(cs_root_volume, dest_sf_root_volume)
|
||||||
|
self._verifyFields(cs_data_volume, dest_sf_data_volume)
|
||||||
|
|
||||||
|
self._verify_no_basic_volume_details()
|
||||||
|
|
||||||
|
self._verify_different_volume_access_groups(src_sf_root_volume, dest_sf_root_volume)
|
||||||
|
self._verify_different_volume_access_groups(src_sf_data_volume, dest_sf_data_volume)
|
||||||
|
|
||||||
|
self._verify_same_account(src_sf_root_volume, dest_sf_root_volume)
|
||||||
|
self._verify_same_account(src_sf_data_volume, dest_sf_data_volume)
|
||||||
|
|
||||||
|
self._verifySfVolumeIds(src_sf_root_volume, dest_sf_root_volume)
|
||||||
|
self._verifySfVolumeIds(src_sf_data_volume, dest_sf_data_volume)
|
||||||
|
|
||||||
|
self._verify_xenserver_state(src_xen_session, src_sf_root_volume, dest_xen_session, dest_sf_root_volume)
|
||||||
|
self._verify_xenserver_state(src_xen_session, src_sf_data_volume, dest_xen_session, dest_sf_data_volume)
|
||||||
|
|
||||||
|
return dest_sf_root_volume, dest_sf_data_volume
|
||||||
|
|
||||||
|
def _migrate_and_verify_one_disk_only(self, virtual_machine, dest_host, cs_volume, sf_account_id, src_sf_volume, src_xen_session, dest_xen_session):
|
||||||
|
self._verifyFields(cs_volume, src_sf_volume)
|
||||||
|
|
||||||
|
virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
|
||||||
|
|
||||||
|
cs_volume = self._get_updated_cs_volume(cs_volume.id)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
dest_sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_volume.name, self)
|
||||||
|
|
||||||
|
self._verifyFields(cs_volume, dest_sf_volume)
|
||||||
|
|
||||||
|
self._verify_no_basic_volume_details()
|
||||||
|
|
||||||
|
self._verify_different_volume_access_groups(src_sf_volume, dest_sf_volume)
|
||||||
|
|
||||||
|
self._verify_same_account(src_sf_volume, dest_sf_volume)
|
||||||
|
|
||||||
|
self._verifySfVolumeIds(src_sf_volume, dest_sf_volume)
|
||||||
|
|
||||||
|
self._verify_xenserver_state(src_xen_session, src_sf_volume, dest_xen_session, dest_sf_volume)
|
||||||
|
|
||||||
|
return dest_sf_volume
|
||||||
|
|
||||||
|
def _fail_migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
|
||||||
|
src_xen_session, dest_xen_session):
|
||||||
|
self._verifyFields(cs_root_volume, src_sf_root_volume)
|
||||||
|
self._verifyFields(cs_data_volume, src_sf_data_volume)
|
||||||
|
|
||||||
|
class MigrationException(Exception):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
Exception.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
try:
|
||||||
|
virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
|
||||||
|
|
||||||
|
raise MigrationException("The migration did not fail (as expected).")
|
||||||
|
except MigrationException:
|
||||||
|
raise
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self._verify_no_basic_volume_details()
|
||||||
|
|
||||||
|
cs_root_volume_refreshed = self._get_updated_cs_volume(cs_root_volume.id)
|
||||||
|
cs_data_volume_refreshed = self._get_updated_cs_volume(cs_data_volume.id)
|
||||||
|
|
||||||
|
self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume)
|
||||||
|
self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume)
|
||||||
|
|
||||||
|
sf_volumes = sf_util.get_not_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
|
|
||||||
|
dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
|
||||||
|
dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
|
||||||
|
|
||||||
|
self._verify_xenserver_state(dest_xen_session, dest_sf_root_volume, src_xen_session, src_sf_root_volume)
|
||||||
|
self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume)
|
||||||
|
|
||||||
|
def _verify_different_volume_access_groups(self, src_sf_volume, dest_sf_volume):
|
||||||
|
src_vags = src_sf_volume['volumeAccessGroups']
|
||||||
|
|
||||||
|
sf_util.check_list(src_vags, 1, self, "'src_vags' should be a list with only one element in it.")
|
||||||
|
|
||||||
|
dest_vags = dest_sf_volume['volumeAccessGroups']
|
||||||
|
|
||||||
|
sf_util.check_list(dest_vags, 1, self, "'dest_vags' should be a list with only one element in it.")
|
||||||
|
|
||||||
|
self.assertNotEqual(src_vags[0], dest_vags[0], "The source and destination volumes should not be in the same volume access group.")
|
||||||
|
|
||||||
|
def _get_updated_cs_volume(self, cs_volume_id):
|
||||||
|
return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0]
|
||||||
|
|
||||||
|
def _verify_same_account(self, src_sf_volume, dest_sf_volume):
|
||||||
|
self.assertEqual(src_sf_volume['accountID'], dest_sf_volume['accountID'], "The source and destination volumes should be in the same SolidFire account.")
|
||||||
|
|
||||||
|
def _verifySfVolumeIds(self, src_sf_volume, dest_sf_volume):
|
||||||
|
self.assert_(src_sf_volume['volumeID'] < dest_sf_volume['volumeID'],
|
||||||
|
"The destination SolidFire root volume's ID should be greater than the id of the source one.")
|
||||||
|
|
||||||
|
# verify the name, folder, and iscsi_name
|
||||||
|
def _verifyFields(self, cs_volume, sf_volume):
|
||||||
|
self.assert_(cs_volume.name == sf_volume['name'], "The CloudStack volume name does not match the SolidFire volume name.")
|
||||||
|
|
||||||
|
cs_volume_folder = self._get_cs_volume_folder(cs_volume.id)
|
||||||
|
|
||||||
|
self.assert_(int(cs_volume_folder) == sf_volume['volumeID'], "The CloudStack folder name does not match the SolidFire volume ID.")
|
||||||
|
|
||||||
|
cs_volume_iscsi_name = self._get_cs_volume_iscsi_name(cs_volume.id)
|
||||||
|
|
||||||
|
self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume['iqn']), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.")
|
||||||
|
|
||||||
|
def _get_cs_volume_property(self, cs_volume_id, volume_property):
|
||||||
|
sql_query = "Select " + volume_property + " From volumes Where uuid = '" + cs_volume_id + "'"
|
||||||
|
|
||||||
|
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||||
|
sql_result = self.dbConnection.execute(sql_query)
|
||||||
|
|
||||||
|
return sql_result[0][0]
|
||||||
|
|
||||||
|
def _get_cs_volume_folder(self, cs_volume_id):
|
||||||
|
return self._get_cs_volume_property(cs_volume_id, "folder")
|
||||||
|
|
||||||
|
def _get_cs_volume_iscsi_name(self, cs_volume_id):
|
||||||
|
return self._get_cs_volume_property(cs_volume_id, "iscsi_name")
|
||||||
|
|
||||||
|
def _verify_no_basic_volume_details(self):
|
||||||
|
sql_query = "Select id From volume_details Where name like 'basic_'"
|
||||||
|
|
||||||
|
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||||
|
sql_result = self.dbConnection.execute(sql_query)
|
||||||
|
|
||||||
|
sf_util.check_list(sql_result, 0, self, "The cloud.volume_details table should not have any name fields that start with 'basic_'.")
|
||||||
|
|
||||||
|
def _verify_xenserver_state(self, xen_session_1, sf_volume_1, xen_session_2, sf_volume_2):
|
||||||
|
sr_name = sf_util.format_iqn(sf_volume_1["iqn"])
|
||||||
|
|
||||||
|
sf_util.check_xen_sr(sr_name, xen_session_1, self, False)
|
||||||
|
|
||||||
|
sr_name = sf_util.format_iqn(sf_volume_2["iqn"])
|
||||||
|
|
||||||
|
sf_util.check_xen_sr(sr_name, xen_session_2, self)
|
||||||
@ -20,6 +20,8 @@ import random
|
|||||||
import SignedAPICall
|
import SignedAPICall
|
||||||
import XenAPI
|
import XenAPI
|
||||||
|
|
||||||
|
from util import sf_util
|
||||||
|
|
||||||
# All tests inherit from cloudstackTestCase
|
# All tests inherit from cloudstackTestCase
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
@ -36,8 +38,10 @@ from marvin.lib.utils import cleanup_resources
|
|||||||
|
|
||||||
from solidfire import solidfire_element_api as sf_api
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
# on April 15, 2016: Ran 2 tests in 800.299s with three hosts
|
# Prerequisites:
|
||||||
# on May 2, 2016: Ran 2 tests in 789.729s with two hosts
|
# Only one zone
|
||||||
|
# Only one pod
|
||||||
|
# Only one cluster
|
||||||
|
|
||||||
|
|
||||||
class TestData:
|
class TestData:
|
||||||
@ -328,7 +332,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
cls.primary_storage.delete(cls.apiClient)
|
cls.primary_storage.delete(cls.apiClient)
|
||||||
|
|
||||||
cls._purge_solidfire_volumes()
|
sf_util.purge_solidfire_volumes(cls.sf_client)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
@ -346,7 +350,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
||||||
|
|
||||||
self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
||||||
|
|
||||||
root_volume = root_volumes[0]
|
root_volume = root_volumes[0]
|
||||||
|
|
||||||
@ -355,7 +359,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id)
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id)
|
||||||
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
self._check_iscsi_name(sf_iscsi_name)
|
sf_util.check_iscsi_name(sf_iscsi_name, self)
|
||||||
|
|
||||||
root_volume_path_1 = self._get_path(volume_id)
|
root_volume_path_1 = self._get_path(volume_id)
|
||||||
|
|
||||||
@ -388,7 +392,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
vdis_after_create = self._get_vdis(xen_vdis)
|
vdis_after_create = self._get_vdis(xen_vdis)
|
||||||
|
|
||||||
@ -411,7 +415,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
root_volume_path_3 = self._get_path(volume_id)
|
root_volume_path_3 = self._get_path(volume_id)
|
||||||
|
|
||||||
@ -423,7 +427,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
vdis_after_revert = self._get_vdis(xen_vdis)
|
vdis_after_revert = self._get_vdis(xen_vdis)
|
||||||
|
|
||||||
@ -470,7 +474,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
|
||||||
|
|
||||||
self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
sf_util.check_list(xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
vdis_after_delete = self._get_vdis(xen_vdis, True)
|
vdis_after_delete = self._get_vdis(xen_vdis, True)
|
||||||
|
|
||||||
@ -505,7 +509,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
|
||||||
|
|
||||||
self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
|
||||||
|
|
||||||
root_volume = root_volumes[0]
|
root_volume = root_volumes[0]
|
||||||
|
|
||||||
@ -514,13 +518,13 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id)
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id)
|
||||||
sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
self._check_iscsi_name(sf_iscsi_root_volume_name)
|
sf_util.check_iscsi_name(sf_iscsi_root_volume_name, self)
|
||||||
|
|
||||||
root_volume_path_1 = self._get_path(root_volume_id)
|
root_volume_path_1 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true")
|
data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true")
|
||||||
|
|
||||||
self._check_list(data_volumes, 1, "There should only be one data volume.")
|
sf_util.check_list(data_volumes, 1, self, "There should only be one data volume.")
|
||||||
|
|
||||||
data_volume = data_volumes[0]
|
data_volume = data_volumes[0]
|
||||||
|
|
||||||
@ -529,7 +533,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id)
|
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id)
|
||||||
sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
self._check_iscsi_name(sf_iscsi_data_volume_name)
|
sf_util.check_iscsi_name(sf_iscsi_data_volume_name, self)
|
||||||
|
|
||||||
data_volume_path_1 = self._get_path(data_volume_id)
|
data_volume_path_1 = self._get_path(data_volume_id)
|
||||||
|
|
||||||
@ -570,7 +574,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis)
|
root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis)
|
||||||
|
|
||||||
@ -586,7 +590,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis)
|
data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis)
|
||||||
|
|
||||||
@ -609,7 +613,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
|
||||||
|
|
||||||
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
root_volume_path_3 = self._get_path(root_volume_id)
|
root_volume_path_3 = self._get_path(root_volume_id)
|
||||||
|
|
||||||
@ -621,7 +625,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis)
|
root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis)
|
||||||
|
|
||||||
@ -653,7 +657,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
|
sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
|
||||||
|
|
||||||
data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis)
|
data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis)
|
||||||
|
|
||||||
@ -700,7 +704,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
sf_util.check_list(root_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True)
|
root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True)
|
||||||
|
|
||||||
@ -720,7 +724,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
|
|
||||||
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
|
||||||
|
|
||||||
self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
sf_util.check_list(data_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
|
||||||
|
|
||||||
data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True)
|
data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True)
|
||||||
|
|
||||||
@ -745,7 +749,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
return path_result['apipathforvolume']['path']
|
return path_result['apipathforvolume']['path']
|
||||||
|
|
||||||
def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot):
|
def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot):
|
||||||
self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
|
||||||
|
|
||||||
vm_snapshot_from_list = list_vm_snapshots[0]
|
vm_snapshot_from_list = list_vm_snapshots[0]
|
||||||
|
|
||||||
@ -767,26 +771,6 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
"The snapshot is not in the 'Ready' state."
|
"The snapshot is not in the 'Ready' state."
|
||||||
)
|
)
|
||||||
|
|
||||||
def _check_iscsi_name(self, sf_iscsi_name):
|
|
||||||
self.assertEqual(
|
|
||||||
sf_iscsi_name[0],
|
|
||||||
"/",
|
|
||||||
"The iSCSI name needs to start with a forward slash."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_list(self, in_list, expected_size_of_list, err_msg):
|
|
||||||
self.assertEqual(
|
|
||||||
isinstance(in_list, list),
|
|
||||||
True,
|
|
||||||
"'in_list' is not a list."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
len(in_list),
|
|
||||||
expected_size_of_list,
|
|
||||||
err_msg
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_vdis(self, xen_vdis, only_active_expected=False):
|
def _get_vdis(self, xen_vdis, only_active_expected=False):
|
||||||
expected_number_of_vdis = 1 if only_active_expected else 3
|
expected_number_of_vdis = 1 if only_active_expected else 3
|
||||||
|
|
||||||
@ -852,11 +836,3 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||||||
vdis.base_vdi = base_vdi
|
vdis.base_vdi = base_vdi
|
||||||
|
|
||||||
return vdis
|
return vdis
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _purge_solidfire_volumes(cls):
|
|
||||||
deleted_volumes = cls.sf_client.list_deleted_volumes()
|
|
||||||
|
|
||||||
for deleted_volume in deleted_volumes:
|
|
||||||
cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
|
||||||
|
|
||||||
|
|||||||
@ -20,6 +20,8 @@ import random
|
|||||||
import SignedAPICall
|
import SignedAPICall
|
||||||
import XenAPI
|
import XenAPI
|
||||||
|
|
||||||
|
from util import sf_util
|
||||||
|
|
||||||
# All tests inherit from cloudstackTestCase
|
# All tests inherit from cloudstackTestCase
|
||||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||||
|
|
||||||
@ -39,11 +41,13 @@ from marvin.lib.utils import cleanup_resources
|
|||||||
|
|
||||||
from solidfire import solidfire_element_api as sf_api
|
from solidfire import solidfire_element_api as sf_api
|
||||||
|
|
||||||
# on April 14, 2016: Ran 11 tests in 2494.043s with three hosts (resign = True)
|
# Prerequisites:
|
||||||
# on April 14, 2016: Ran 11 tests in 2033.516s with three hosts (resign = False)
|
# Only one zone
|
||||||
|
# Only one pod
|
||||||
# on May 2, 2016: Ran 11 tests in 2352.461s with two hosts (resign = True)
|
# Only one cluster
|
||||||
# on May 2, 2016: Ran 11 tests in 1982.066s with two hosts (resign = False)
|
#
|
||||||
|
# Running the tests:
|
||||||
|
# Change the "supports_resign" variable to True or False as desired.
|
||||||
|
|
||||||
|
|
||||||
class TestData():
|
class TestData():
|
||||||
@ -145,7 +149,7 @@ class TestData():
|
|||||||
"miniops": "10000",
|
"miniops": "10000",
|
||||||
"maxiops": "15000",
|
"maxiops": "15000",
|
||||||
"hypervisorsnapshotreserve": 200,
|
"hypervisorsnapshotreserve": 200,
|
||||||
"tags": "SolidFire_SAN_1"
|
TestData.tags: TestData.storageTag
|
||||||
},
|
},
|
||||||
TestData.diskOffering: {
|
TestData.diskOffering: {
|
||||||
"name": "SF_DO_1",
|
"name": "SF_DO_1",
|
||||||
@ -158,71 +162,6 @@ class TestData():
|
|||||||
TestData.tags: TestData.storageTag,
|
TestData.tags: TestData.storageTag,
|
||||||
"storagetype": "shared"
|
"storagetype": "shared"
|
||||||
},
|
},
|
||||||
"testdiskofferings": {
|
|
||||||
"customiopsdo": {
|
|
||||||
"name": "SF_Custom_Iops_DO",
|
|
||||||
"displaytext": "Customized Iops DO",
|
|
||||||
"disksize": 128,
|
|
||||||
"customizediops": True,
|
|
||||||
"miniops": 500,
|
|
||||||
"maxiops": 1000,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
},
|
|
||||||
"customsizedo": {
|
|
||||||
"name": "SF_Custom_Size_DO",
|
|
||||||
"displaytext": "Customized Size DO",
|
|
||||||
"disksize": 175,
|
|
||||||
"customizediops": False,
|
|
||||||
"miniops": 500,
|
|
||||||
"maxiops": 1000,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
},
|
|
||||||
"customsizeandiopsdo": {
|
|
||||||
"name": "SF_Custom_Iops_Size_DO",
|
|
||||||
"displaytext": "Customized Size and Iops DO",
|
|
||||||
"disksize": 200,
|
|
||||||
"customizediops": True,
|
|
||||||
"miniops": 400,
|
|
||||||
"maxiops": 800,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
},
|
|
||||||
"newiopsdo": {
|
|
||||||
"name": "SF_New_Iops_DO",
|
|
||||||
"displaytext": "New Iops (min=350, max = 700)",
|
|
||||||
"disksize": 128,
|
|
||||||
"miniops": 350,
|
|
||||||
"maxiops": 700,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
},
|
|
||||||
"newsizedo": {
|
|
||||||
"name": "SF_New_Size_DO",
|
|
||||||
"displaytext": "New Size: 175",
|
|
||||||
"disksize": 175,
|
|
||||||
"miniops": 400,
|
|
||||||
"maxiops": 800,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
},
|
|
||||||
"newsizeandiopsdo": {
|
|
||||||
"name": "SF_New_Size_Iops_DO",
|
|
||||||
"displaytext": "New Size and Iops",
|
|
||||||
"disksize": 200,
|
|
||||||
"miniops": 200,
|
|
||||||
"maxiops": 400,
|
|
||||||
"hypervisorsnapshotreserve": 200,
|
|
||||||
TestData.tags: TestData.storageTag,
|
|
||||||
"storagetype": "shared"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
TestData.volume_1: {
|
TestData.volume_1: {
|
||||||
TestData.diskName: "test-volume",
|
TestData.diskName: "test-volume",
|
||||||
},
|
},
|
||||||
@ -241,14 +180,11 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
_should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list."
|
_should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list."
|
||||||
_should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
|
_should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
|
||||||
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
|
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
|
||||||
_vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer."
|
|
||||||
_volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer."
|
_volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer."
|
||||||
_volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
|
_volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
|
||||||
_vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state."
|
_vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state."
|
||||||
_vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state."
|
_vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state."
|
||||||
_sr_not_shared_err_msg = "The SR is not shared."
|
|
||||||
_volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero."
|
_volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero."
|
||||||
_list_should_be_empty = "The list should be empty."
|
|
||||||
_volume_should_not_be_in_a_vag = "The volume should not be in a volume access group."
|
_volume_should_not_be_in_a_vag = "The volume should not be in a volume access group."
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -262,7 +198,7 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
cls.supports_resign = True
|
cls.supports_resign = True
|
||||||
|
|
||||||
cls._set_supports_resign()
|
sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection)
|
||||||
|
|
||||||
# Set up xenAPI connection
|
# Set up xenAPI connection
|
||||||
host_ip = "https://" + \
|
host_ip = "https://" + \
|
||||||
@ -368,7 +304,7 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
cls.primary_storage.delete(cls.apiClient)
|
cls.primary_storage.delete(cls.apiClient)
|
||||||
|
|
||||||
cls._purge_solidfire_volumes()
|
sf_util.purge_solidfire_volumes(cls.sf_client)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||||
|
|
||||||
@ -387,9 +323,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
if self.supports_resign == False:
|
if self.supports_resign == False:
|
||||||
return
|
return
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes()
|
sf_volumes = self._get_active_sf_volumes()
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, TestData.templateCacheName)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -451,21 +387,23 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(new_volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(new_volume)
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, newvolume.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, newvolume, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, newvolume.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, newvolume, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -481,9 +419,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.start(self.apiClient)
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
||||||
|
|
||||||
@ -516,17 +454,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -559,9 +499,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
str(vm.state)
|
str(vm.state)
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -600,11 +540,11 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -614,9 +554,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.start(self.apiClient)
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
||||||
|
|
||||||
@ -649,17 +589,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -674,17 +616,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
vm = self._get_vm(self.virtual_machine.id)
|
vm = self._get_vm(self.virtual_machine.id)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -694,9 +638,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.start(self.apiClient)
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
||||||
|
|
||||||
@ -729,17 +673,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -772,9 +718,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -796,9 +742,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
vm = self._get_vm(self.virtual_machine.id)
|
vm = self._get_vm(self.virtual_machine.id)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -814,9 +760,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.start(self.apiClient)
|
self.virtual_machine.start(self.apiClient)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
||||||
|
|
||||||
@ -849,17 +795,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -894,9 +842,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_stopped_state_err_msg
|
TestVolumes._vm_not_in_stopped_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -918,9 +866,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
vm = self._get_vm(self.virtual_machine.id)
|
vm = self._get_vm(self.virtual_machine.id)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -936,9 +884,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
self.virtual_machine.stop(self.apiClient)
|
self.virtual_machine.stop(self.apiClient)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
|
||||||
|
|
||||||
@ -971,17 +919,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_stopped_state_err_msg
|
TestVolumes._vm_not_in_stopped_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -1003,17 +953,19 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -1061,21 +1013,23 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
TestVolumes._vm_not_in_running_state_err_msg
|
TestVolumes._vm_not_in_running_state_err_msg
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -1114,11 +1068,11 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
"Check if VM was actually expunged"
|
"Check if VM was actually expunged"
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -1174,21 +1128,23 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
str(vm.state)
|
str(vm.state)
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(new_volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(new_volume)
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
@ -1219,11 +1175,11 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
str(vm.state)
|
str(vm.state)
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(sf_volume['volumeAccessGroups']),
|
len(sf_volume['volumeAccessGroups']),
|
||||||
@ -1246,9 +1202,9 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
"Check volume was deleted"
|
"Check volume was deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
self._check_and_get_sf_volume(sf_volumes, vol.name, False)
|
sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
|
||||||
|
|
||||||
@attr(hypervisor='XenServer')
|
@attr(hypervisor='XenServer')
|
||||||
def test_09_attach_volumes_multiple_accounts(self):
|
def test_09_attach_volumes_multiple_accounts(self):
|
||||||
@ -1342,39 +1298,43 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
str(test_vm.state)
|
str(test_vm.state)
|
||||||
)
|
)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(vol)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, vol, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
sf_test_account_id = self._get_sf_account_id(self.primary_storage.id, test_account.id)
|
sf_test_account_id = sf_util.get_sf_account_id(self.cs_api, test_account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_test_volumes = self._get_sf_volumes(sf_test_account_id)
|
sf_test_volumes = self._get_active_sf_volumes(sf_test_account_id)
|
||||||
|
|
||||||
sf_test_volume = self._check_and_get_sf_volume(sf_test_volumes, test_vol.name)
|
sf_test_volume = sf_util.check_and_get_sf_volume(sf_test_volumes, test_vol.name, self)
|
||||||
|
|
||||||
sf_test_volume_size = self._get_volume_size_with_hsr(test_vol)
|
sf_test_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, test_vol, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_test_volume_size)
|
||||||
|
|
||||||
sf_test_iscsi_name = self._get_iqn(test_volume)
|
sf_util.check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size, self)
|
||||||
|
|
||||||
|
sf_test_iscsi_name = sf_util.get_iqn(self.cs_api, test_volume, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_test_iscsi_name)
|
self._check_xen_sr(sf_test_iscsi_name)
|
||||||
|
|
||||||
self._check_vag(sf_test_volume, sf_vag_id)
|
sf_util.check_vag(sf_test_volume, sf_vag_id, self)
|
||||||
|
|
||||||
@attr(hypervisor='XenServer')
|
@attr(hypervisor='XenServer')
|
||||||
def test_10_attach_more_than_one_disk_to_VM(self):
|
def test_10_attach_more_than_one_disk_to_VM(self):
|
||||||
@ -1417,66 +1377,50 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName])
|
vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName])
|
||||||
|
|
||||||
sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
|
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
|
||||||
|
|
||||||
sf_volume_size = self._get_volume_size_with_hsr(self.volume)
|
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
sf_volume_2_size = self._get_volume_size_with_hsr(volume_2)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
|
||||||
|
|
||||||
sf_vag_id = self._get_vag_id()
|
sf_volume_2_size = sf_util.get_volume_size_with_hsr(self.cs_api, volume_2, self)
|
||||||
|
|
||||||
sf_volumes = self._get_sf_volumes(sf_account_id)
|
self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_2_size)
|
||||||
|
|
||||||
sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
|
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume, vol, sf_volume_size)
|
sf_volumes = self._get_active_sf_volumes(sf_account_id)
|
||||||
|
|
||||||
sf_iscsi_name = self._get_iqn(self.volume)
|
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
|
||||||
|
|
||||||
|
sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
|
||||||
|
|
||||||
|
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name)
|
self._check_xen_sr(sf_iscsi_name)
|
||||||
|
|
||||||
self._check_vag(sf_volume, sf_vag_id)
|
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||||
|
|
||||||
sf_volume_2 = self._check_and_get_sf_volume(sf_volumes, vol_2.name)
|
sf_volume_2 = sf_util.check_and_get_sf_volume(sf_volumes, vol_2.name, self)
|
||||||
|
|
||||||
self._check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size)
|
sf_util.check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size, self)
|
||||||
|
|
||||||
sf_iscsi_name_2 = self._get_iqn(volume_2)
|
sf_iscsi_name_2 = sf_util.get_iqn(self.cs_api, volume_2, self)
|
||||||
|
|
||||||
self._check_xen_sr(sf_iscsi_name_2)
|
self._check_xen_sr(sf_iscsi_name_2)
|
||||||
|
|
||||||
self._check_vag(sf_volume_2, sf_vag_id)
|
sf_util.check_vag(sf_volume_2, sf_vag_id, self)
|
||||||
|
|
||||||
self.virtual_machine.detach_volume(self.apiClient, volume_2)
|
self.virtual_machine.detach_volume(self.apiClient, volume_2)
|
||||||
|
|
||||||
'''
|
'''
|
||||||
@attr(hypervisor = 'XenServer')
|
@attr(hypervisor = 'XenServer')
|
||||||
def _test_11_attach_disk_to_running_vm_change_iops(self):
|
def test_11_attach_disk_to_running_vm_change_iops(self):
|
||||||
Attach a disk to a running VM, then change iops
|
Attach a disk to a running VM, then change iops
|
||||||
self.custom_iops_disk_offering = DiskOffering.create(
|
self.custom_iops_disk_offering = DiskOffering.create(
|
||||||
|
|
||||||
)'''
|
)'''
|
||||||
|
|
||||||
def _check_list(self, in_list, expected_size_of_list, err_msg):
|
|
||||||
self.assertEqual(
|
|
||||||
isinstance(in_list, list),
|
|
||||||
True,
|
|
||||||
"'in_list' is not a list."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
len(in_list),
|
|
||||||
expected_size_of_list,
|
|
||||||
err_msg
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_iscsi_name(self, sf_iscsi_name):
|
|
||||||
self.assertEqual(
|
|
||||||
sf_iscsi_name[0],
|
|
||||||
"/",
|
|
||||||
"The iSCSI name needs to start with a forward slash."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_volume(self, volume, volume_name):
|
def _check_volume(self, volume, volume_name):
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
volume.name.startswith(volume_name),
|
volume.name.startswith(volume_name),
|
||||||
@ -1501,45 +1445,13 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
"The storage type is incorrect."
|
"The storage type is incorrect."
|
||||||
)
|
)
|
||||||
|
|
||||||
def _check_size_and_iops(self, sf_volume, volume, size):
|
|
||||||
self.assertEqual(
|
|
||||||
sf_volume['qos']['minIOPS'],
|
|
||||||
volume.miniops,
|
|
||||||
"Check QOS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
sf_volume['qos']['maxIOPS'],
|
|
||||||
volume.maxiops,
|
|
||||||
"Check QOS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
sf_volume['totalSize'],
|
|
||||||
size,
|
|
||||||
"Check SF volume size: " + str(sf_volume['totalSize'])
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_vag(self, sf_volume, sf_vag_id):
|
|
||||||
self.assertEqual(
|
|
||||||
len(sf_volume['volumeAccessGroups']),
|
|
||||||
1,
|
|
||||||
"The volume should only be in one VAG."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
sf_volume['volumeAccessGroups'][0],
|
|
||||||
sf_vag_id,
|
|
||||||
"The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_and_get_cs_volume(self, volume_id, volume_name):
|
def _check_and_get_cs_volume(self, volume_id, volume_name):
|
||||||
list_volumes_response = list_volumes(
|
list_volumes_response = list_volumes(
|
||||||
self.apiClient,
|
self.apiClient,
|
||||||
id=volume_id
|
id=volume_id
|
||||||
)
|
)
|
||||||
|
|
||||||
self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg)
|
sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg)
|
||||||
|
|
||||||
cs_volume = list_volumes_response[0]
|
cs_volume = list_volumes_response[0]
|
||||||
|
|
||||||
@ -1547,108 +1459,37 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
|
|
||||||
return cs_volume
|
return cs_volume
|
||||||
|
|
||||||
def _get_sf_account_id(self, primary_storage_id, account_id):
|
def _verify_hsr(self, cs_volume_size_in_gb, hsr, sf_volume_size_in_bytes):
|
||||||
sf_account_id_request = {'storageid': primary_storage_id, 'accountid': account_id}
|
cs_volume_size_including_hsr_in_bytes = self._get_cs_volume_size_including_hsr_in_bytes(cs_volume_size_in_gb, hsr)
|
||||||
sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request)
|
|
||||||
sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
|
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertTrue(
|
||||||
isinstance(sf_account_id, int),
|
cs_volume_size_including_hsr_in_bytes == sf_volume_size_in_bytes,
|
||||||
True,
|
"HSR does not add up correctly."
|
||||||
TestVolumes._sf_account_id_should_be_non_zero_int_err_msg
|
);
|
||||||
)
|
|
||||||
|
|
||||||
return sf_account_id
|
def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr):
|
||||||
|
lowest_hsr = 10
|
||||||
|
|
||||||
def _get_volume_size_with_hsr(self, cs_volume):
|
if hsr < lowest_hsr:
|
||||||
# Get underlying SF volume size with hypervisor snapshot reserve
|
hsr = lowest_hsr;
|
||||||
sf_volume_size_request = {'volumeid': cs_volume.id}
|
|
||||||
sf_volume_size_result = self.cs_api.getSolidFireVolumeSize(sf_volume_size_request)
|
|
||||||
sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
|
|
||||||
|
|
||||||
self.assertEqual(
|
return self._get_bytes_from_gb(cs_volume_size_in_gb + (cs_volume_size_in_gb * (hsr / 100)))
|
||||||
isinstance(sf_volume_size, int),
|
|
||||||
True,
|
|
||||||
"The SolidFire volume size should be a non-zero integer."
|
|
||||||
)
|
|
||||||
|
|
||||||
return sf_volume_size
|
def _get_bytes_from_gb(self, number_in_gb):
|
||||||
|
return number_in_gb * 1024 * 1024 * 1024
|
||||||
def _get_vag_id(self):
|
|
||||||
# Get SF Volume Access Group ID
|
|
||||||
sf_vag_id_request = {'clusterid': self.cluster.id, 'storageid': self.primary_storage.id}
|
|
||||||
sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
|
|
||||||
sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
isinstance(sf_vag_id, int),
|
|
||||||
True,
|
|
||||||
TestVolumes._vag_id_should_be_non_zero_int_err_msg
|
|
||||||
)
|
|
||||||
|
|
||||||
return sf_vag_id
|
|
||||||
|
|
||||||
def _get_iqn(self, volume):
|
|
||||||
# Get volume IQN
|
|
||||||
sf_iscsi_name_request = {'volumeid': volume.id}
|
|
||||||
sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
|
|
||||||
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
|
||||||
|
|
||||||
self._check_iscsi_name(sf_iscsi_name)
|
|
||||||
|
|
||||||
return sf_iscsi_name
|
|
||||||
|
|
||||||
def _get_vm(self, vm_id):
|
def _get_vm(self, vm_id):
|
||||||
list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
|
list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
|
||||||
|
|
||||||
self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg)
|
sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg)
|
||||||
|
|
||||||
return list_vms_response[0]
|
return list_vms_response[0]
|
||||||
|
|
||||||
def _check_and_get_sf_volume(self, sf_volumes, sf_volume_name, should_exist=True):
|
|
||||||
sf_volume = None
|
|
||||||
|
|
||||||
for volume in sf_volumes:
|
|
||||||
if volume['name'] == sf_volume_name:
|
|
||||||
sf_volume = volume
|
|
||||||
break
|
|
||||||
|
|
||||||
if should_exist:
|
|
||||||
self.assertNotEqual(
|
|
||||||
sf_volume,
|
|
||||||
None,
|
|
||||||
"Check if SF volume was created in correct account: " + str(sf_volumes)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.assertEqual(
|
|
||||||
sf_volume,
|
|
||||||
None,
|
|
||||||
"Check if SF volume was deleted: " + str(sf_volumes)
|
|
||||||
)
|
|
||||||
|
|
||||||
return sf_volume
|
|
||||||
|
|
||||||
def _check_xen_sr(self, xen_sr_name, should_exist=True):
|
def _check_xen_sr(self, xen_sr_name, should_exist=True):
|
||||||
if should_exist:
|
sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist)
|
||||||
xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)[0]
|
|
||||||
|
|
||||||
self.sr_shared = self.xen_session.xenapi.SR.get_shared(xen_sr)
|
def _get_active_sf_volumes(self, sf_account_id=None):
|
||||||
|
sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
|
||||||
self.assertEqual(
|
|
||||||
self.sr_shared,
|
|
||||||
True,
|
|
||||||
TestVolumes._sr_not_shared_err_msg
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
|
|
||||||
|
|
||||||
self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty)
|
|
||||||
|
|
||||||
def _get_sf_volumes(self, sf_account_id=None):
|
|
||||||
if sf_account_id is not None:
|
|
||||||
sf_volumes = self.sf_client.list_volumes_for_account(sf_account_id)
|
|
||||||
else:
|
|
||||||
sf_volumes = self.sf_client.list_active_volumes()
|
|
||||||
|
|
||||||
self.assertNotEqual(
|
self.assertNotEqual(
|
||||||
len(sf_volumes),
|
len(sf_volumes),
|
||||||
@ -1657,20 +1498,3 @@ class TestVolumes(cloudstackTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return sf_volumes
|
return sf_volumes
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _set_supports_resign(cls):
|
|
||||||
supports_resign = str(cls.supports_resign)
|
|
||||||
|
|
||||||
sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'"
|
|
||||||
|
|
||||||
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
|
||||||
cls.dbConnection.execute(sql_query)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _purge_solidfire_volumes(cls):
|
|
||||||
deleted_volumes = cls.sf_client.list_deleted_volumes()
|
|
||||||
|
|
||||||
for deleted_volume in deleted_volumes:
|
|
||||||
cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
|
||||||
|
|
||||||
|
|||||||
217
test/integration/plugins/solidfire/util/sf_util.py
Normal file
217
test/integration/plugins/solidfire/util/sf_util.py
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
isinstance(in_list, list),
|
||||||
|
True,
|
||||||
|
"'in_list' is not a list."
|
||||||
|
)
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
len(in_list),
|
||||||
|
expected_size_of_list,
|
||||||
|
err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_sf_account_id(cs_api, cs_account_id, primary_storage_id, obj_assert, err_msg):
|
||||||
|
sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
|
||||||
|
sf_account_id_result = cs_api.getSolidFireAccountId(sf_account_id_request)
|
||||||
|
sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
isinstance(sf_account_id, int),
|
||||||
|
True,
|
||||||
|
err_msg
|
||||||
|
)
|
||||||
|
|
||||||
|
return sf_account_id
|
||||||
|
|
||||||
|
def get_iqn(cs_api, volume, obj_assert):
|
||||||
|
# Get volume IQN
|
||||||
|
sf_iscsi_name_request = {'volumeid': volume.id}
|
||||||
|
sf_iscsi_name_result = cs_api.getVolumeiScsiName(sf_iscsi_name_request)
|
||||||
|
sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
|
||||||
|
|
||||||
|
check_iscsi_name(sf_iscsi_name, obj_assert)
|
||||||
|
|
||||||
|
return sf_iscsi_name
|
||||||
|
|
||||||
|
def check_iscsi_name(sf_iscsi_name, obj_assert):
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_iscsi_name[0],
|
||||||
|
"/",
|
||||||
|
"The iSCSI name needs to start with a forward slash."
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_supports_resign(supports_resign, db_connection):
|
||||||
|
_set_supports_resign_for_table(supports_resign, db_connection, "host_details")
|
||||||
|
_set_supports_resign_for_table(supports_resign, db_connection, "cluster_details")
|
||||||
|
|
||||||
|
def _set_supports_resign_for_table(supports_resign, db_connection, table):
|
||||||
|
sql_query = "Update " + str(table) + " Set value = '" + str(supports_resign) + "' Where name = 'supportsResign'"
|
||||||
|
|
||||||
|
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||||
|
db_connection.execute(sql_query)
|
||||||
|
|
||||||
|
def purge_solidfire_volumes(sf_client):
|
||||||
|
deleted_volumes = sf_client.list_deleted_volumes()
|
||||||
|
|
||||||
|
for deleted_volume in deleted_volumes:
|
||||||
|
sf_client.purge_deleted_volume(deleted_volume['volumeID'])
|
||||||
|
|
||||||
|
def get_not_active_sf_volumes(sf_client, sf_account_id=None):
|
||||||
|
if sf_account_id is not None:
|
||||||
|
sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
|
||||||
|
|
||||||
|
if sf_volumes is not None and len(sf_volumes) > 0:
|
||||||
|
sf_volumes = _get_not_active_sf_volumes_only(sf_volumes)
|
||||||
|
else:
|
||||||
|
sf_volumes = sf_client.list_deleted_volumes()
|
||||||
|
|
||||||
|
return sf_volumes
|
||||||
|
|
||||||
|
def _get_not_active_sf_volumes_only(sf_volumes):
|
||||||
|
not_active_sf_volumes_only = []
|
||||||
|
|
||||||
|
for sf_volume in sf_volumes:
|
||||||
|
if sf_volume["status"] != "active":
|
||||||
|
not_active_sf_volumes_only.append(sf_volume)
|
||||||
|
|
||||||
|
return not_active_sf_volumes_only
|
||||||
|
|
||||||
|
def get_active_sf_volumes(sf_client, sf_account_id=None):
|
||||||
|
if sf_account_id is not None:
|
||||||
|
sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
|
||||||
|
|
||||||
|
if sf_volumes is not None and len(sf_volumes) > 0:
|
||||||
|
sf_volumes = _get_active_sf_volumes_only(sf_volumes)
|
||||||
|
else:
|
||||||
|
sf_volumes = sf_client.list_active_volumes()
|
||||||
|
|
||||||
|
return sf_volumes
|
||||||
|
|
||||||
|
def _get_active_sf_volumes_only(sf_volumes):
|
||||||
|
active_sf_volumes_only = []
|
||||||
|
|
||||||
|
for sf_volume in sf_volumes:
|
||||||
|
if sf_volume["status"] == "active":
|
||||||
|
active_sf_volumes_only.append(sf_volume)
|
||||||
|
|
||||||
|
return active_sf_volumes_only
|
||||||
|
|
||||||
|
def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist=True):
|
||||||
|
sf_volume = None
|
||||||
|
|
||||||
|
for volume in sf_volumes:
|
||||||
|
if volume['name'] == sf_volume_name:
|
||||||
|
sf_volume = volume
|
||||||
|
break
|
||||||
|
|
||||||
|
if should_exist:
|
||||||
|
obj_assert.assertNotEqual(
|
||||||
|
sf_volume,
|
||||||
|
None,
|
||||||
|
"Check if SF volume was created in correct account: " + str(sf_volumes)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_volume,
|
||||||
|
None,
|
||||||
|
"Check if SF volume was deleted: " + str(sf_volumes)
|
||||||
|
)
|
||||||
|
|
||||||
|
return sf_volume
|
||||||
|
|
||||||
|
def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
|
||||||
|
xen_sr = xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
|
||||||
|
|
||||||
|
if should_exist:
|
||||||
|
check_list(xen_sr, 1, obj_assert, "SR " + xen_sr_name + " doesn't exist, but should.")
|
||||||
|
|
||||||
|
sr_shared = xen_session.xenapi.SR.get_shared(xen_sr[0])
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sr_shared,
|
||||||
|
True,
|
||||||
|
"SR " + xen_sr_name + " is not shared, but should be."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
|
||||||
|
|
||||||
|
def check_vag(sf_volume, sf_vag_id, obj_assert):
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
len(sf_volume['volumeAccessGroups']),
|
||||||
|
1,
|
||||||
|
"The volume should only be in one VAG."
|
||||||
|
)
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_volume['volumeAccessGroups'][0],
|
||||||
|
sf_vag_id,
|
||||||
|
"The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_vag_id(cs_api, cluster_id, primary_storage_id, obj_assert):
|
||||||
|
# Get SF Volume Access Group ID
|
||||||
|
sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
|
||||||
|
sf_vag_id_result = cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
|
||||||
|
sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
isinstance(sf_vag_id, int),
|
||||||
|
True,
|
||||||
|
"The SolidFire VAG ID should be a non-zero integer."
|
||||||
|
)
|
||||||
|
|
||||||
|
return sf_vag_id
|
||||||
|
|
||||||
|
def format_iqn(iqn):
|
||||||
|
return "/" + iqn + "/0"
|
||||||
|
|
||||||
|
def check_size_and_iops(sf_volume, cs_volume, size, obj_assert):
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_volume['qos']['minIOPS'],
|
||||||
|
cs_volume.miniops,
|
||||||
|
"Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
|
||||||
|
)
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_volume['qos']['maxIOPS'],
|
||||||
|
cs_volume.maxiops,
|
||||||
|
"Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
|
||||||
|
)
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
sf_volume['totalSize'],
|
||||||
|
size,
|
||||||
|
"Check SolidFire volume size: " + str(sf_volume['totalSize'])
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):
|
||||||
|
# Get underlying SF volume size with hypervisor snapshot reserve
|
||||||
|
sf_volume_size_request = {'volumeid': cs_volume.id}
|
||||||
|
sf_volume_size_result = cs_api.getSolidFireVolumeSize(sf_volume_size_request)
|
||||||
|
sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
|
||||||
|
|
||||||
|
obj_assert.assertEqual(
|
||||||
|
isinstance(sf_volume_size, int),
|
||||||
|
True,
|
||||||
|
"The SolidFire volume size should be a non-zero integer."
|
||||||
|
)
|
||||||
|
|
||||||
|
return sf_volume_size
|
||||||
Loading…
x
Reference in New Issue
Block a user