mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge pull request #1749 from mike-tutkowski/archived_snapshots
CLOUDSTACK-9619: Updates for SAN-assisted snapshotsThis PR is to address a few issues in #1600 (which was recently merged to master for 4.10). In StorageSystemDataMotionStrategy.performCopyOfVdi we call getSnapshotDetails. In one such scenario, the source snapshot in question is coming from secondary storage (when we are creating a new volume on managed storage from a snapshot of ours thats on secondary storage). This usually worked in the regression tests due to a bit of "luck": We retrieve the ID of the snapshot (which is on secondary storage) and then try to pull out its StorageVO object (which is for primary storage). If you happen to have a primary storage that matches the ID (which is the ID of a secondary storage), then getSnapshotDetails populates its Map<String, String> with inapplicable data (that is later ignored) and you dont easily see a problem. However, if you dont have a primary storage that matches that ID (which I didnt today because I had removed that primary storage), then a NullPointerException is thrown. I have fixed that issue by skipping getSnapshotDetails if the source is coming from secondary storage. While fixing that, I noticed a couple more problems: 1) We can invoke grantAccess on a snapshot thats actually on secondary storage (this doesnt amount to much because the VolumeServiceImpl ignores the call when its not for a primary-storage driver). 2) We can invoke revokeAccess on a snapshot thats actually on secondary storage (this doesnt amount to much because the VolumeServiceImpl ignores the call when its not for a primary-storage driver). I have corrected those issues, as well. I then came across one more problem: When using a SAN snapshot and copying it to secondary storage or creating a new managed-storage volume from a snapshot of ours on secondary storage, we attach to the SR in the XenServer code, but detach from it in the StorageSystemDataMotionStrategy code (by sending a message to the XenServer code to perform an SR detach). Since we know to detach from the SR after the copy is done, we should detach from the SR in the XenServer code (without that code having to be explicitly called from outside of the XenServer logic). I went ahead and changed that, as well. JIRA Ticket: https://issues.apache.org/jira/browse/CLOUDSTACK-9619 * pr/1749: CLOUDSTACK-9619: Updates for SAN-assisted snapshots Signed-off-by: Rajani Karuturi <rajani.karuturi@accelerite.com>
This commit is contained in:
commit
4721c53ea0
@ -19,7 +19,6 @@
|
||||
package org.apache.cloudstack.storage.motion;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
@ -37,9 +36,9 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
@ -79,15 +78,11 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.DettachAnswer;
|
||||
import org.apache.cloudstack.storage.command.DettachCommand;
|
||||
import org.apache.cloudstack.storage.command.ResignatureAnswer;
|
||||
import org.apache.cloudstack.storage.command.ResignatureCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
@ -225,6 +220,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
if (canHandleDest) {
|
||||
handleCreateVolumeFromSnapshotOnSecondaryStorage(snapshotInfo, volumeInfo, callback);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -415,8 +411,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
|
||||
|
||||
if (needCache) {
|
||||
if (!copyCmdAnswer.getResult()) {
|
||||
// We were not able to copy. Handle it.
|
||||
errMsg = copyCmdAnswer.getDetails();
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
if (needCache) {
|
||||
// If cached storage was needed (in case of object store as secondary
|
||||
// storage), at this point, the data has been copied from the primary
|
||||
// to the NFS cache by the hypervisor. We now invoke another copy
|
||||
@ -430,34 +432,34 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
if (ep == null) {
|
||||
errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
|
||||
LOGGER.error(errMsg);
|
||||
|
||||
copyCmdAnswer = new CopyCmdAnswer(errMsg);
|
||||
} else {
|
||||
copyCmdAnswer = (CopyCmdAnswer) ep.sendMessage(cmd);
|
||||
copyCmdAnswer = (CopyCmdAnswer)ep.sendMessage(cmd);
|
||||
}
|
||||
|
||||
// clean up snapshot copied to staging
|
||||
performCleanupCacheStorage(destOnStore);
|
||||
cacheMgr.deleteCacheObject(destOnStore);
|
||||
}
|
||||
|
||||
|
||||
} catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) {
|
||||
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
|
||||
|
||||
LOGGER.warn(msg, ex);
|
||||
|
||||
throw new CloudRuntimeException(msg + ex.getMessage());
|
||||
|
||||
} finally {
|
||||
|
||||
// detach and remove access after the volume is created
|
||||
SnapshotObjectTO snapshotObjectTO = (SnapshotObjectTO) snapshotInfo.getTO();
|
||||
DiskTO disk = new DiskTO(snapshotObjectTO, null, snapshotInfo.getPath(), Volume.Type.UNKNOWN);
|
||||
detachManagedStoreVolume(snapshotInfo, hostVO, getProperty(snapshotInfo.getId(), DiskTO.IQN), srcDataStore.getId(), disk);
|
||||
_volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore);
|
||||
|
||||
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
|
||||
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
|
||||
errMsg = copyCmdAnswer.getDetails();
|
||||
|
||||
if (needCache) {
|
||||
cacheMgr.deleteCacheObject(destOnStore);
|
||||
}
|
||||
}
|
||||
else {
|
||||
errMsg = "Unable to create template from snapshot";
|
||||
@ -497,11 +499,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
* @param callback for async
|
||||
*/
|
||||
private void handleCreateVolumeFromSnapshotOnSecondaryStorage(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||
|
||||
// at this point, the snapshotInfo and volumeInfo should have the same disk offering ID (so either one should be OK to get a DiskOfferingVO instance)
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId());
|
||||
SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId());
|
||||
DataStore destDataStore = volumeInfo.getDataStore();
|
||||
|
||||
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
|
||||
_volumeService.updateHypervisorSnapshotReserveForVolume(diskOffering, volumeInfo.getId(), snapshot.getHypervisorType());
|
||||
@ -510,9 +510,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
String errMsg = null;
|
||||
|
||||
HostVO hostVO = null;
|
||||
try {
|
||||
|
||||
//create a volume on the storage
|
||||
try {
|
||||
// create a volume on the storage
|
||||
AsyncCallFuture<VolumeApiResult> future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore());
|
||||
VolumeApiResult result = future.get();
|
||||
|
||||
@ -529,7 +529,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
hostVO = getHost(snapshotInfo.getDataCenterId(), false);
|
||||
|
||||
//copy the volume from secondary via the hypervisor
|
||||
// copy the volume from secondary via the hypervisor
|
||||
copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO);
|
||||
|
||||
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
|
||||
@ -543,12 +543,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
}
|
||||
catch (Exception ex) {
|
||||
errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'";
|
||||
} finally {
|
||||
|
||||
|
||||
DiskTO disk = new DiskTO(volumeInfo.getTO(), volumeInfo.getDeviceId(), volumeInfo.getPath(),volumeInfo.getVolumeType());
|
||||
long storagePoolId = volumeInfo.getPoolId();
|
||||
detachManagedStoreVolume(volumeInfo, hostVO, volumeInfo.get_iScsiName(), storagePoolId, disk);
|
||||
}
|
||||
|
||||
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
|
||||
@ -556,73 +550,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
result.setResult(errMsg);
|
||||
|
||||
callback.complete(result);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Detaches a managed volume from a host
|
||||
* @param dataObject Volume to be detached
|
||||
* @param hostVO Host where the volume is currently attached
|
||||
* @param storagePoolId Storage where volume resides
|
||||
* @param disk Object which stores disk attributes to send to the agents
|
||||
*/
|
||||
private void detachManagedStoreVolume(DataObject dataObject, HostVO hostVO, String iqn, long storagePoolId, DiskTO disk) {
|
||||
|
||||
DataStore destDataStore = dataObject.getDataStore();
|
||||
DettachCommand cmd = new DettachCommand(disk, null);
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
|
||||
|
||||
cmd.setManaged(true);
|
||||
cmd.setStorageHost(storagePool.getHostAddress());
|
||||
cmd.setStoragePort(storagePool.getPort());
|
||||
cmd.set_iScsiName(iqn);
|
||||
|
||||
try {
|
||||
DettachAnswer dettachAnswer = (DettachAnswer) _agentMgr.send(hostVO.getId(), cmd);
|
||||
|
||||
if (!dettachAnswer.getResult()) {
|
||||
LOGGER.warn("Error detaching DataObject:" + dettachAnswer.getDetails());
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Error detaching DataObject " + dataObject.getId() + " Error: " + e.getMessage());
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
_volumeService.revokeAccess(dataObject, hostVO, destDataStore);
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Error revoking access to DataObject (DataObject ID = " + dataObject.getId() + "): " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void performCleanupCacheStorage(DataObject destOnStore) {
|
||||
destOnStore.processEvent(Event.DestroyRequested);
|
||||
|
||||
DeleteCommand deleteCommand = new DeleteCommand(destOnStore.getTO());
|
||||
EndPoint ep = selector.select(destOnStore);
|
||||
try {
|
||||
if (ep == null) {
|
||||
LOGGER.warn("Unable to cleanup staging NFS because no endpoint was found " +
|
||||
"Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
|
||||
|
||||
destOnStore.processEvent(Event.OperationFailed);
|
||||
} else {
|
||||
Answer deleteAnswer = ep.sendMessage(deleteCommand);
|
||||
if (deleteAnswer != null && deleteAnswer.getResult()) {
|
||||
LOGGER.warn("Unable to cleanup staging NFS " + deleteAnswer.getDetails() +
|
||||
"Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
|
||||
destOnStore.processEvent(Event.OperationFailed);
|
||||
}
|
||||
}
|
||||
|
||||
destOnStore.processEvent(Event.OperationSuccessed);
|
||||
} catch (Exception e) {
|
||||
destOnStore.processEvent(Event.OperationFailed);
|
||||
LOGGER.warn("Unable to clean up staging cache Exception " + e.getMessage() +
|
||||
"Object: " + destOnStore.getType() + " ID: " + destOnStore.getId());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -899,7 +826,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
}
|
||||
|
||||
private Map<String, String> getSnapshotDetails(SnapshotInfo snapshotInfo) {
|
||||
Map<String, String> snapshotDetails = new HashMap<String, String>();
|
||||
Map<String, String> snapshotDetails = new HashMap<>();
|
||||
|
||||
long storagePoolId = snapshotInfo.getDataStore().getId();
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
|
||||
@ -1066,30 +993,34 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
* @return result of the copy
|
||||
*/
|
||||
private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo, HostVO hostVO) {
|
||||
Snapshot.LocationType locationType = snapshotInfo.getLocationType();
|
||||
|
||||
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
|
||||
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
|
||||
|
||||
DataObject srcData = snapshotInfo;
|
||||
CopyCmdAnswer copyCmdAnswer = null;
|
||||
DataObject cacheData = null;
|
||||
|
||||
boolean needCacheStorage = needCacheStorage(snapshotInfo, volumeInfo);
|
||||
|
||||
if (needCacheStorage) {
|
||||
cacheData = cacheSnapshotChain(snapshotInfo, new ZoneScope(volumeInfo.getDataCenterId()));
|
||||
srcData = cacheData;
|
||||
|
||||
}
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
try {
|
||||
|
||||
if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
|
||||
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
|
||||
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
|
||||
|
||||
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
|
||||
|
||||
copyCommand.setOptions(srcDetails);
|
||||
}
|
||||
|
||||
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
|
||||
|
||||
Map<String, String> destDetails = getVolumeDetails(volumeInfo);
|
||||
|
||||
@ -1105,11 +1036,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
throw new CloudRuntimeException(msg + ex.getMessage());
|
||||
}
|
||||
finally {
|
||||
if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
|
||||
_volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
|
||||
}
|
||||
|
||||
_volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
|
||||
|
||||
if (needCacheStorage && copyCmdAnswer != null && copyCmdAnswer.getResult()) {
|
||||
performCleanupCacheStorage(cacheData);
|
||||
cacheMgr.deleteCacheObject(cacheData);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -90,18 +90,17 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
|
||||
@Override
|
||||
public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
|
||||
|
||||
Preconditions.checkArgument(snapshotInfo != null, "backupSnapshot expects a valid snapshot");
|
||||
|
||||
if (snapshotInfo.getLocationType() != Snapshot.LocationType.SECONDARY) {
|
||||
|
||||
markAsBackedUp((SnapshotObject)snapshotInfo);
|
||||
|
||||
return snapshotInfo;
|
||||
}
|
||||
|
||||
// At this point the snapshot is either taken as a native
|
||||
// At this point, the snapshot is either taken as a native
|
||||
// snapshot on the storage or exists as a volume on the storage (clone).
|
||||
// If archive flag is passed, we will copy this snapshot to secondary
|
||||
// If archive flag is passed in, we should copy this snapshot to secondary
|
||||
// storage and delete it from the primary storage.
|
||||
|
||||
HostVO host = getHost(snapshotInfo.getVolumeId());
|
||||
@ -109,15 +108,14 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(host.getClusterId());
|
||||
|
||||
if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) {
|
||||
String mesg = "Cannot archive snapshot: Either canStorageSystemCreateVolumeFromSnapshot or " +
|
||||
"computeClusterSupportsResign were false. ";
|
||||
String msg = "Cannot archive snapshot: canStorageSystemCreateVolumeFromSnapshot and/or computeClusterSupportsResign were false.";
|
||||
|
||||
s_logger.warn(mesg);
|
||||
throw new CloudRuntimeException(mesg);
|
||||
s_logger.warn(msg);
|
||||
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
return snapshotSvr.backupSnapshot(snapshotInfo);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -255,14 +253,13 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
backedUpSnapshot = backupSnapshot(snapshotOnPrimary);
|
||||
|
||||
updateLocationTypeInDb(backedUpSnapshot);
|
||||
|
||||
}
|
||||
finally {
|
||||
if (result != null && result.isSuccess()) {
|
||||
volumeInfo.stateTransit(Volume.Event.OperationSucceeded);
|
||||
|
||||
if (snapshotOnPrimary != null && snapshotInfo.getLocationType() == Snapshot.LocationType.SECONDARY) {
|
||||
// cleanup the snapshot on primary
|
||||
// remove the snapshot on primary storage
|
||||
try {
|
||||
snapshotSvr.deleteSnapshot(snapshotOnPrimary);
|
||||
} catch (Exception e) {
|
||||
@ -276,6 +273,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
}
|
||||
|
||||
snapshotDao.releaseFromLockTable(snapshotInfo.getId());
|
||||
|
||||
return backedUpSnapshot;
|
||||
}
|
||||
|
||||
@ -586,7 +584,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
|
||||
Snapshot.LocationType locationType = snapshot.getLocationType();
|
||||
|
||||
//If the snapshot exisits on Secondary Storage, we can't delete it.
|
||||
// If the snapshot exists on Secondary Storage, we can't delete it.
|
||||
if (SnapshotOperation.DELETE.equals(op) && Snapshot.LocationType.SECONDARY.equals(locationType)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
@ -28,14 +28,6 @@ import java.util.Random;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.org.Grouping.AllocationState;
|
||||
import com.cloud.resource.ResourceState;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.storage.RegisterVolumePayload;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
@ -85,6 +77,7 @@ import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
@ -94,7 +87,13 @@ import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.host.dao.HostDetailsDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.org.Grouping.AllocationState;
|
||||
import com.cloud.resource.ResourceState;
|
||||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.RegisterVolumePayload;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StoragePool;
|
||||
@ -111,6 +110,7 @@ import com.cloud.storage.template.TemplateProp;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
@ -317,9 +317,7 @@ public class VolumeServiceImpl implements VolumeService {
|
||||
|
||||
VolumeVO vol = volDao.findById(volume.getId());
|
||||
|
||||
String volumePath = vol.getPath();
|
||||
Long poolId = vol.getPoolId();
|
||||
if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) {
|
||||
if (!volumeExistsOnPrimary(vol)) {
|
||||
// not created on primary store
|
||||
if (volumeStore == null) {
|
||||
// also not created on secondary store
|
||||
@ -348,6 +346,32 @@ public class VolumeServiceImpl implements VolumeService {
|
||||
return future;
|
||||
}
|
||||
|
||||
private boolean volumeExistsOnPrimary(VolumeVO vol) {
|
||||
Long poolId = vol.getPoolId();
|
||||
|
||||
if (poolId == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PrimaryDataStore primaryStore = dataStoreMgr.getPrimaryDataStore(poolId);
|
||||
|
||||
if (primaryStore == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (primaryStore.isManaged()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
String volumePath = vol.getPath();
|
||||
|
||||
if (volumePath == null || volumePath.trim().isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public Void deleteVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CommandResult> callback, DeleteVolumeContext<VolumeApiResult> context) {
|
||||
CommandResult result = callback.getResult();
|
||||
VolumeObject vo = context.getVolume();
|
||||
|
||||
@ -532,6 +532,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
if (snapshotSr != null) {
|
||||
hypervisorResource.removeSR(conn, snapshotSr);
|
||||
}
|
||||
|
||||
if (primaryStore.isManaged()) {
|
||||
hypervisorResource.removeSR(conn, primaryStorageSR);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final String primaryStorageSRUuid = primaryStorageSR.getUuid(conn);
|
||||
@ -553,9 +557,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
physicalSize = Long.parseLong(tmp[1]);
|
||||
finalPath = folder + File.separator + snapshotBackupUuid + ".vhd";
|
||||
}
|
||||
}
|
||||
|
||||
final String volumeUuid = snapshotTO.getVolume().getPath();
|
||||
|
||||
destroySnapshotOnPrimaryStorageExceptThis(conn, volumeUuid, snapshotUuid);
|
||||
}
|
||||
|
||||
final SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
|
||||
newSnapshot.setPath(finalPath);
|
||||
@ -708,9 +714,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
}
|
||||
SR srcSr = null;
|
||||
VDI destVdi = null;
|
||||
try {
|
||||
SR primaryStorageSR;
|
||||
|
||||
SR primaryStorageSR = null;
|
||||
|
||||
try {
|
||||
if (pool.isManaged()) {
|
||||
Map<String,String> destDetails = cmd.getOptions2();
|
||||
|
||||
@ -771,6 +778,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
final VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(volumeUUID);
|
||||
newVol.setSize(vdir.virtualSize);
|
||||
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (final Types.XenAPIException e) {
|
||||
details += " due to " + e.toString();
|
||||
@ -782,6 +790,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
if (srcSr != null) {
|
||||
hypervisorResource.removeSR(conn, srcSr);
|
||||
}
|
||||
|
||||
if (pool.isManaged()) {
|
||||
hypervisorResource.removeSR(conn, primaryStorageSR);
|
||||
}
|
||||
|
||||
if (!result && destVdi != null) {
|
||||
try {
|
||||
destVdi.destroy(conn);
|
||||
|
||||
@ -403,9 +403,10 @@ public class SolidFireUtil {
|
||||
SolidFireUtil.getIqnsFromHosts(hosts), new long[] { sfVolumeId });
|
||||
}
|
||||
catch (Exception ex) {
|
||||
String iqnInVagAlready = "Exceeded maximum number of Volume Access Groups per initiator";
|
||||
String iqnInVagAlready1 = "Exceeded maximum number of Volume Access Groups per initiator";
|
||||
String iqnInVagAlready2 = "Exceeded maximum number of VolumeAccessGroups per Initiator";
|
||||
|
||||
if (!ex.getMessage().contains(iqnInVagAlready)) {
|
||||
if (!ex.getMessage().contains(iqnInVagAlready1) && !ex.getMessage().contains(iqnInVagAlready2)) {
|
||||
throw new CloudRuntimeException(ex.getMessage());
|
||||
}
|
||||
|
||||
|
||||
@ -488,7 +488,9 @@ class TestManagedSystemVMs(cloudstackTestCase):
|
||||
type="Routing"
|
||||
)
|
||||
|
||||
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm], self, False)
|
||||
kvm_login = self.testdata[TestData.kvm]
|
||||
|
||||
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, kvm_login[TestData.username], kvm_login[TestData.password], self, False)
|
||||
else:
|
||||
self.assertTrue(False, "Invalid hypervisor type")
|
||||
|
||||
@ -544,7 +546,9 @@ class TestManagedSystemVMs(cloudstackTestCase):
|
||||
type="Routing"
|
||||
)
|
||||
|
||||
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm], self)
|
||||
kvm_login = self.testdata[TestData.kvm]
|
||||
|
||||
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, kvm_login[TestData.username], kvm_login[TestData.password], self)
|
||||
else:
|
||||
self.assertTrue(False, "Invalid hypervisor type")
|
||||
|
||||
|
||||
@ -39,7 +39,7 @@ from marvin.lib.utils import cleanup_resources
|
||||
# Prerequisites:
|
||||
# Only one zone
|
||||
# Only one pod
|
||||
# Two clusters
|
||||
# Two clusters (have system VMs (including the VR) running on local or NFS storage)
|
||||
|
||||
|
||||
class TestData():
|
||||
|
||||
@ -139,77 +139,9 @@ class TestData:
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"testdiskofferings": {
|
||||
"customiopsdo": {
|
||||
"name": "SF_Custom_IOPS_DO",
|
||||
"displaytext": "Customized IOPS DO (Size = 128 GB; Min IOPS = 500; Max IOPS = 1000)",
|
||||
"disksize": 128,
|
||||
"customizediops": True,
|
||||
"miniops": 500,
|
||||
"maxiops": 1000,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"customsizedo": {
|
||||
"name": "SF_Custom_Size_DO",
|
||||
"displaytext": "Customized IOPS DO (Min IOPS = 500; Max IOPS = 1000)",
|
||||
"disksize": 175,
|
||||
"customizediops": False,
|
||||
"miniops": 500,
|
||||
"maxiops": 1000,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"customsizeandiopsdo": {
|
||||
"name": "SF_Custom_Size_IOPS_DO",
|
||||
"displaytext": "Customized Size and IOPS DO",
|
||||
"disksize": 200,
|
||||
"customizediops": True,
|
||||
"miniops": 400,
|
||||
"maxiops": 800,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"newiopsdo": {
|
||||
"name": "SF_New_IOPS_DO",
|
||||
"displaytext": "New IOPS (Size = 128 GB; Min IOPS = 350, Max IOPS = 700)",
|
||||
"disksize": 128,
|
||||
"miniops": 350,
|
||||
"maxiops": 700,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"newsizedo": {
|
||||
"name": "SF_New_Size_DO",
|
||||
"displaytext": "New Size: 175",
|
||||
"disksize": 175,
|
||||
"miniops": 400,
|
||||
"maxiops": 800,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
"newsizeandiopsdo": {
|
||||
"name": "SF_New_Size_IOPS_DO",
|
||||
"displaytext": "New Size and IOPS",
|
||||
"disksize": 200,
|
||||
"miniops": 200,
|
||||
"maxiops": 400,
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
}
|
||||
},
|
||||
TestData.volume_1: {
|
||||
"diskname": "testvolume",
|
||||
},
|
||||
"volume2": {
|
||||
"diskname": "testvolume2",
|
||||
},
|
||||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
|
||||
@ -155,11 +155,11 @@ def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
|
||||
else:
|
||||
check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
|
||||
|
||||
def check_kvm_access_to_volume(iscsi_name, kvm_hosts, kvm_login, obj_assert, should_exist=True):
|
||||
def check_kvm_access_to_volume(iscsi_name, kvm_hosts, username, password, obj_assert, should_exist=True):
|
||||
count = 0
|
||||
|
||||
for kvm_host in kvm_hosts:
|
||||
ssh_connection = sf_util.get_ssh_connection(kvm_host.ipaddress, kvm_login[TestData.username], kvm_login[TestData.password])
|
||||
ssh_connection = get_ssh_connection(kvm_host.ipaddress, username, password)
|
||||
|
||||
stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + iscsi_name)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user