Updates to HPE-Primera and Pure FlashArray Drivers to use Host-based VLUN Assignments (#8889)

* Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes

* update to add timestamp when deleting pure volumes to avoid future conflicts

* update to migrate to properly check disk offering is valid for the target storage pool

* Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes

* update to add timestamp when deleting pure volumes to avoid future conflicts

* update to migrate to properly check disk offering is valid for the target storage pool

* improve error handling when copying volumes to add precision to which step failed

* rename pure volume before delete to avoid conflicts if the same name is used before its expunged on the array

* remove dead code in AdaptiveDataStoreLifeCycleImpl.java

* Fix issues found in PR checks

* fix session refresh TTL logic

* updates from PR comments

* logic to delete by path ONLY on supported OUI

* fix to StorageSystemDataMotionStrategy compile error

* change noisy debug message to trace message

* fix double callback call in handleVolumeMigrationFromNonManagedStorageToManagedStorage

* fix for flash array delete error

* fix typo in StorageSystemDataMotionStrategy

* change copyVolume to use writeback to speed up copy ops

* remove returning PrimaryStorageDownloadAnswer when connectPhysicalDisk returns false during KVMStorageProcessor template copy

* remove change to only set UUID on snapshot if it is a vmSnapshot

* reverting change to UserVmManagerImpl.configureCustomRootDiskSize

* add error checking/simplification per comments from @slavkap

* Update engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java

Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com>

* address PR comments from @sureshanaparti

---------

Co-authored-by: GLOVER RENE <rg9975@cs419-mgmtserver.rg9975nprd.app.ecp.att.com>
Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com>
This commit is contained in:
Rene Glover 2024-06-25 00:05:39 -05:00 committed by GitHub
parent 42e71175d7
commit 6ee6603359
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 1170 additions and 662 deletions

View File

@ -1504,18 +1504,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
for (VolumeVO vol : vols) { for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only) // This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) { if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
} }
// make sure this is done AFTER grantAccess, as grantAccess may change the volume's state
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
disk.setDetails(getDetails(volumeInfo, dataStore));
vm.addDisk(disk); vm.addDisk(disk);
} }

View File

@ -45,6 +45,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
@ -148,6 +149,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private static final int LOCK_TIME_IN_SECONDS = 300; private static final int LOCK_TIME_IN_SECONDS = 300;
private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported.";
@Inject @Inject
protected AgentManager agentManager; protected AgentManager agentManager;
@Inject @Inject
@ -685,8 +687,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) { AsyncCompletionCallback<CopyCommandResult> callback) {
String errMsg = null;
try { try {
HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType(); HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType();
@ -697,37 +697,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (HypervisorType.XenServer.equals(hypervisorType)) { if (HypervisorType.XenServer.equals(hypervisorType)) {
handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo);
} destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
else { DataTO dataTO = destVolumeInfo.getTO();
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(dataTO);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
callback.complete(result);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
} }
} }
catch (Exception ex) { catch (Exception ex) {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + String errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
ex.getMessage(); ex.getMessage();
throw new CloudRuntimeException(errMsg, ex); throw new CloudRuntimeException(errMsg, ex);
} }
finally {
CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg);
}
else {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
DataTO dataTO = destVolumeInfo.getTO();
copyCmdAnswer = new CopyCmdAnswer(dataTO);
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
} }
private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
@ -846,12 +830,25 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
checkAvailableForMigration(vm); checkAvailableForMigration(vm);
String errMsg = null; String errMsg = null;
HostVO hostVO = null;
try { try {
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
updatePathFromScsiName(volumeVO); updatePathFromScsiName(volumeVO);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
// if managed we need to grant access
PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid());
if (pds == null) {
throw new CloudRuntimeException("Unable to find primary data store driver for this volume");
}
// grant access (for managed volumes)
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
// re-retrieve volume to get any updated information from grant
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
// migrate the volume via the hypervisor // migrate the volume via the hypervisor
String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
@ -872,6 +869,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(errMsg, ex); throw new CloudRuntimeException(errMsg, ex);
} }
} finally { } finally {
// revoke access (for managed volumes)
if (hostVO != null) {
try {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
} catch (Exception e) {
LOGGER.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e);
}
}
// re-retrieve volume to get any updated information from grant
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
CopyCmdAnswer copyCmdAnswer; CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) { if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg); copyCmdAnswer = new CopyCmdAnswer(errMsg);
@ -922,6 +931,125 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return hostVO; return hostVO;
} }
private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) {
VolumeInfo tempVolumeInfo = null;
VolumeVO tempVolumeVO = null;
try {
tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(tempVolumeVO);
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
return tempVolumeInfo;
} catch (Throwable e) {
try {
if (tempVolumeInfo != null) {
tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null);
}
// cleanup temporary volume
if (tempVolumeVO != null) {
_volumeDao.remove(tempVolumeVO.getId());
}
} catch (Throwable e2) {
LOGGER.warn("Failed to delete temporary volume created for copy", e2);
}
throw e;
}
}
/**
* Simplier logic for copy from snapshot for adaptive driver only.
* @param snapshotInfo
* @param destData
* @param callback
*/
private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCmdAnswer copyCmdAnswer = null;
DataObject srcFinal = null;
HostVO hostVO = null;
DataStore srcDataStore = null;
boolean tempRequired = false;
try {
snapshotInfo.processEvent(Event.CopyingRequested);
hostVO = getHost(snapshotInfo);
DataObject destOnStore = destData;
srcDataStore = snapshotInfo.getDataStore();
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = null;
if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) {
srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo);
tempRequired = true;
} else {
srcFinal = snapshotInfo;
}
_volumeService.grantAccess(srcFinal, hostVO, srcDataStore);
DataTO srcTo = srcFinal.getTO();
// have to set PATH as extraOptions due to logic in KVM hypervisor processor
HashMap<String,String> extraDetails = new HashMap<>();
extraDetails.put(DiskTO.PATH, srcTo.getPath());
copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
copyCommand.setOptions(extraDetails);
copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand);
} catch (Exception ex) {
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
}
finally {
// remove access tot he volume that was used
if (srcFinal != null && hostVO != null && srcDataStore != null) {
_volumeService.revokeAccess(srcFinal, hostVO, srcDataStore);
}
// delete the temporary volume if it was needed
if (srcFinal != null && tempRequired) {
try {
srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null);
} catch (Throwable e) {
LOGGER.warn("Failed to delete temporary volume created for copy", e);
}
}
// check we have a reasonable result
String errMsg = null;
if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) {
errMsg = "Unable to create template from snapshot";
copyCmdAnswer = new CopyCmdAnswer(errMsg);
} else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
errMsg = "Unable to create template from snapshot";
} else if (!copyCmdAnswer.getResult()) {
errMsg = copyCmdAnswer.getDetails();
}
//submit processEvent
if (StringUtils.isEmpty(errMsg)) {
snapshotInfo.processEvent(Event.OperationSuccessed);
} else {
snapshotInfo.processEvent(Event.OperationFailed);
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(copyCmdAnswer.getDetails());
callback.complete(result);
}
}
/** /**
* This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases:
* 1) When creating a template from a snapshot * 1) When creating a template from a snapshot
@ -932,6 +1060,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* @param callback callback for async * @param callback callback for async
*/ */
private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) { private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
// if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler
if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) {
handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback);
return;
}
String errMsg = null; String errMsg = null;
CopyCmdAnswer copyCmdAnswer = null; CopyCmdAnswer copyCmdAnswer = null;
boolean usingBackendSnapshot = false; boolean usingBackendSnapshot = false;
@ -1698,14 +1833,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) {
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
CopyCmdAnswer copyCmdAnswer; CopyCmdAnswer copyCmdAnswer;
try { try {
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo); Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
copyCommand.setOptions2(destDetails); copyCommand.setOptions2(destDetails);
@ -1730,42 +1864,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return copyCmdAnswer; return copyCmdAnswer;
} }
/**
* Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot)
* @param volumeVO
* @param snapshotInfo
*/
public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
try {
volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
volumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(volumeVO);
VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
// save the "temp" volume info into the snapshot details (we need this to clean up at the end)
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true);
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true);
// NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO()
// whenever the TemporaryVolumeCopyPath is set.
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
} catch (Throwable e) {
// cleanup temporary volume
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
throw e;
}
}
/** /**
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
@ -1777,13 +1875,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
*/ */
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
prepTempVolumeForCopyFromSnapshot(snapshotInfo);
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
try { try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
} }
@ -1798,23 +1891,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* invocation of createVolumeFromSnapshot(SnapshotInfo). * invocation of createVolumeFromSnapshot(SnapshotInfo).
*/ */
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null; try {
// cleanup any temporary volume previously created for copy from a snapshot LOGGER.debug("Cleaning up temporary volume created for copy from a snapshot");
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
SnapshotDetailsVO tempUuid = null;
tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
if (tempUuid == null || tempUuid.getValue() == null) {
return;
}
volumeVO = _volumeDao.findByUuid(tempUuid.getValue());
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
_snapshotDetailsDao.remove(tempUuid.getId());
_snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
@ -1824,6 +1902,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
finally { finally {
_snapshotDetailsDao.remove(snapshotDetails.getId()); _snapshotDetailsDao.remove(snapshotDetails.getId());
} }
} catch (Throwable e) {
LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e);
}
} }
private void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState qualityOfServiceState) { private void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState qualityOfServiceState) {
@ -2459,15 +2541,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
try { try {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
_volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore);
} }
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
Map<String, String> srcDetails = getVolumeDetails(volumeInfo); Map<String, String> srcDetails = getVolumeDetails(volumeInfo);
copyCommand.setOptions(srcDetails); copyCommand.setOptions(srcDetails);
@ -2496,7 +2578,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(msg + ex.getMessage(), ex); throw new CloudRuntimeException(msg + ex.getMessage(), ex);
} }
finally { finally {
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
try { try {
_volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore);
} }
@ -2591,13 +2673,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
long snapshotId = snapshotInfo.getId(); long snapshotId = snapshotInfo.getId();
// if the snapshot required a temporary volume be created check if the UUID is set so we can if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
// retrieve the temporary volume's path to use during remote copy
List<SnapshotDetailsVO> storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath");
if (storedDetails != null && storedDetails.size() > 0) {
String value = storedDetails.get(0).getValue();
snapshotDetails.put(DiskTO.PATH, value);
} else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath());
} else { } else {
snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN));
@ -2813,6 +2889,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo); Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo); Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(),
srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value());
@ -2855,18 +2933,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId());
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo); Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
copyVolumeCommand.setSrcDetails(srcDetails);
handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
if (srcVolumeDetached) { if (srcVolumeDetached) {
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
} }
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
copyVolumeCommand.setSrcDetails(srcDetails);
CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand); CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand);
if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) { if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) {
@ -2938,18 +3016,19 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
srcData = cacheData; srcData = cacheData;
} }
CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
try { try {
CopyCommand copyCommand = null;
if (Snapshot.LocationType.PRIMARY.equals(locationType)) { if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo); Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
copyCommand.setOptions(srcDetails); copyCommand.setOptions(srcDetails);
} } else {
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
}
Map<String, String> destDetails = getVolumeDetails(volumeInfo); Map<String, String> destDetails = getVolumeDetails(volumeInfo);

View File

@ -101,7 +101,9 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet {
log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName));
ApplicationContext context = getApplicationContext(moduleDefinitionName); ApplicationContext context = getApplicationContext(moduleDefinitionName);
try { try {
if (context.containsBean("moduleStartup")) { if (context == null) {
log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName));
} else if (context.containsBean("moduleStartup")) {
Runnable runnable = context.getBean("moduleStartup", Runnable.class); Runnable runnable = context.getBean("moduleStartup", Runnable.class);
log.info(String.format("Starting module [%s].", moduleDefinitionName)); log.info(String.format("Starting module [%s].", moduleDefinitionName));
runnable.run(); runnable.run();

View File

@ -122,7 +122,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API
} }
if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) {
LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); if (LOGGER.isTraceEnabled()) {
LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account));
}
return true; return true;
} }

View File

@ -76,7 +76,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Project project = CallContext.current().getProject(); Project project = CallContext.current().getProject();
if (project == null) { if (project == null) {
LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); if (LOGGER.isTraceEnabled()) {
LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user));
}
return apiNames; return apiNames;
} }
@ -114,8 +116,10 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Project project = CallContext.current().getProject(); Project project = CallContext.current().getProject();
if (project == null) { if (project == null) {
LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, if (LOGGER.isTraceEnabled()) {
LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName,
user)); user));
}
return true; return true;
} }

View File

@ -302,15 +302,27 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVo
(destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString()); (destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString());
try { try {
storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails); KVMStoragePool sourceStoragePool = storagePoolManager.getStoragePool(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid());
if (!sourceStoragePool.connectPhysicalDisk(srcPath, srcDetails)) {
return new MigrateVolumeAnswer(command, false, "Unable to connect source volume on hypervisor", srcPath);
}
KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
if (srcPhysicalDisk == null) {
return new MigrateVolumeAnswer(command, false, "Unable to get handle to source volume on hypervisor", srcPath);
}
KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid()); KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid());
storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails); if (!destPrimaryStorage.connectPhysicalDisk(destPath, destDetails)) {
return new MigrateVolumeAnswer(command, false, "Unable to connect destination volume on hypervisor", srcPath);
}
storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds()); KVMPhysicalDisk newDiskCopy = storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
if (newDiskCopy == null) {
return new MigrateVolumeAnswer(command, false, "Copy command failed to return handle to copied physical disk", destPath);
}
} }
catch (Exception ex) { catch (Exception ex) {
return new MigrateVolumeAnswer(command, false, ex.getMessage(), null); return new MigrateVolumeAnswer(command, false, ex.getMessage(), null);

View File

@ -16,13 +16,36 @@
// under the License. // under the License.
package com.cloud.hypervisor.kvm.storage; package com.cloud.hypervisor.kvm.storage;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel) @StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel)
public class FiberChannelAdapter extends MultipathSCSIAdapterBase { public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
private Logger LOGGER = Logger.getLogger(getClass());
private String hostname = null;
private String hostnameFq = null;
public FiberChannelAdapter() { public FiberChannelAdapter() {
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer"); LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
// get the hostname - we need this to compare to connid values
try {
InetAddress inetAddress = InetAddress.getLocalHost();
hostname = inetAddress.getHostName(); // basic hostname
if (hostname.indexOf(".") > 0) {
hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain
}
hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]");
} catch (UnknownHostException e) {
LOGGER.error("Error getting hostname", e);
}
} }
@Override @Override
@ -72,6 +95,11 @@ public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
address = value; address = value;
} else if (key.equals("connid")) { } else if (key.equals("connid")) {
connectionId = value; connectionId = value;
} else if (key.startsWith("connid.")) {
String inHostname = key.substring(7);
if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) {
connectionId = value;
}
} }
} }
} }

View File

@ -268,7 +268,7 @@ public class KVMStorageProcessor implements StorageProcessor {
Map<String, String> details = primaryStore.getDetails(); Map<String, String> details = primaryStore.getDetails();
String path = details != null ? details.get("managedStoreTarget") : null; String path = derivePath(primaryStore, destData, details);
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
@ -328,6 +328,16 @@ public class KVMStorageProcessor implements StorageProcessor {
} }
} }
private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map<String, String> details) {
String path = null;
if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) {
path = destData.getPath();
} else {
path = details != null ? details.get("managedStoreTarget") : null;
}
return path;
}
// this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk // this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk
private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) { private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) {
final int index = templateUrl.lastIndexOf("/"); final int index = templateUrl.lastIndexOf("/");
@ -407,7 +417,7 @@ public class KVMStorageProcessor implements StorageProcessor {
vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds());
} if (primaryPool.getType() == StoragePoolType.PowerFlex) { } if (primaryPool.getType() == StoragePoolType.PowerFlex) {
Map<String, String> details = primaryStore.getDetails(); Map<String, String> details = primaryStore.getDetails();
String path = details != null ? details.get("managedStoreTarget") : null; String path = derivePath(primaryStore, destData, details);
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) {
s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid());
@ -1048,7 +1058,7 @@ public class KVMStorageProcessor implements StorageProcessor {
srcVolume.clearPassphrase(); srcVolume.clearPassphrase();
if (isCreatedFromVmSnapshot) { if (isCreatedFromVmSnapshot) {
s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot");
} else if (primaryPool.getType() != StoragePoolType.RBD) { } else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) {
deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); deleteSnapshotOnPrimary(cmd, snapshot, primaryPool);
} }
@ -2482,8 +2492,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) {
s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid());
} }
String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails());
destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath;
} else { } else {
final String volumeName = UUID.randomUUID().toString(); final String volumeName = UUID.randomUUID().toString();
destVolumeName = volumeName + "." + destFormat.getFileExtension(); destVolumeName = volumeName + "." + destFormat.getFileExtension();

View File

@ -21,20 +21,17 @@ import java.io.BufferedReader;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Timer; import java.util.Timer;
import java.util.TimerTask; import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger; import org.joda.time.Duration;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManager;
@ -43,8 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script; import com.cloud.utils.script.Script;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.libvirt.LibvirtException; import org.apache.log4j.Logger;
import org.joda.time.Duration;
public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class);
@ -55,6 +51,14 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
*/ */
static byte[] CLEANUP_LOCK = new byte[0]; static byte[] CLEANUP_LOCK = new byte[0];
/**
* List of supported OUI's (needed for path-based cleanup logic on disconnects after live migrations)
*/
static String[] SUPPORTED_OUI_LIST = {
"0002ac", // HPE Primera 3PAR
"24a937" // Pure Flasharray
};
/** /**
* Property keys and defaults * Property keys and defaults
*/ */
@ -82,6 +86,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
* Initialize static program-wide configurations and background jobs * Initialize static program-wide configurations and background jobs
*/ */
static { static {
long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000;
boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue();
@ -96,16 +101,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new Error("Unable to find the disconnectVolume.sh script"); throw new Error("Unable to find the disconnectVolume.sh script");
} }
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (resizeScript == null) {
throw new Error("Unable to find the resizeVolume.sh script");
}
copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript);
if (copyScript == null) { if (copyScript == null) {
throw new Error("Unable to find the copyVolume.sh script"); throw new Error("Unable to find the copyVolume.sh script");
} }
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (cleanupEnabled) { if (cleanupEnabled) {
cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript);
if (cleanupScript == null) { if (cleanupScript == null) {
@ -137,9 +139,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type);
/**
* We expect WWN values in the volumePath so need to convert it to an actual physical path
*/
public abstract AddressInfo parseAndValidatePath(String path); public abstract AddressInfo parseAndValidatePath(String path);
@Override @Override
@ -151,6 +150,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return null; return null;
} }
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = parseAndValidatePath(volumePath); AddressInfo address = parseAndValidatePath(volumePath);
return getPhysicalDisk(address, pool); return getPhysicalDisk(address, pool);
} }
@ -186,15 +186,23 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
if (StringUtils.isEmpty(volumePath)) { if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined"); LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined"); return false;
} }
if (pool == null) { if (pool == null) {
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set"); LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set"); return false;
} }
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = this.parseAndValidatePath(volumePath); AddressInfo address = this.parseAndValidatePath(volumePath);
// validate we have a connection id - we can't proceed without that
if (address.getConnectionId() == null) {
LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path");
return false;
}
int waitTimeInSec = diskWaitTimeSecs; int waitTimeInSec = diskWaitTimeSecs;
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
@ -207,31 +215,62 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
@Override @Override
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
AddressInfo address = this.parseAndValidatePath(volumePath); AddressInfo address = this.parseAndValidatePath(volumePath);
if (address.getAddress() == null) {
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) returning FALSE, volume path has no address field", volumePath, pool.getUuid()));
return false;
}
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true; if (result.getExitCode() != 0) {
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode()));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult()));
}
return (result.getExitCode() == 0);
} }
@Override @Override
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) { public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
return false; return false;
} }
@Override @Override
public boolean disconnectPhysicalDiskByPath(String localPath) { public boolean disconnectPhysicalDiskByPath(String localPath) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); if (localPath == null) {
ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); return false;
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); }
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) START", localPath));
if (localPath.startsWith("/dev/mapper/")) {
String multipathName = localPath.replace("/dev/mapper/3", "");
// this ensures we only disconnect multipath devices supported by this driver
for (String oui: SUPPORTED_OUI_LIST) {
if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) {
ScriptResult result = runScript(disconnectScript, 60000L, multipathName);
if (result.getExitCode() != 0) {
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", multipathName, result.getExitCode()));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode()));
}
return (result.getExitCode() == 0);
}
}
}
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath));
return false;
} }
@Override @Override
public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) {
LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); return false;
return true;
} }
@Override @Override
@ -275,15 +314,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return true; return true;
} }
/**
* Validate inputs and return the source file for a template copy @Override
* @param templateFilePath public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
* @param destTemplatePath
* @param destPool
* @param format
* @return
*/
File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) {
if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) {
LOGGER.error("Unable to create template from direct download template file due to insufficient data"); LOGGER.error("Unable to create template from direct download template file due to insufficient data");
throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data");
@ -296,57 +329,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host");
} }
if (destTemplatePath == null || destTemplatePath.isEmpty()) { KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath);
LOGGER.error("Failed to create template, target template disk path not provided");
throw new CloudRuntimeException("Target template disk path not provided");
}
if (this.isStoragePoolTypeSupported(destPool.getType())) {
throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString());
}
if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) {
LOGGER.error("Failed to create template, unsupported template format: " + format.toString());
throw new CloudRuntimeException("Unsupported template format: " + format.toString());
}
return sourceFile;
}
String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) {
String srcTemplateFilePath = templateFilePath;
if (isTemplateExtractable(templateFilePath)) {
srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString();
LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath);
String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath);
Script.runSimpleBashScript(extractCommand);
Script.runSimpleBashScript("rm -f " + templateFilePath);
}
return srcTemplateFilePath;
}
QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) {
if (format == Storage.ImageFormat.RAW) {
return QemuImg.PhysicalDiskFormat.RAW;
} else if (format == Storage.ImageFormat.QCOW2) {
return QemuImg.PhysicalDiskFormat.QCOW2;
} else {
return QemuImg.PhysicalDiskFormat.RAW;
}
}
@Override
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format);
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath());
return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN);
} }
@Override @Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout,
byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
LOGGER.error("Unable to copy physical disk due to insufficient data");
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
}
validateForDiskCopy(disk, name, destPool);
LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat());
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name);
@ -366,60 +360,34 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat());
QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath());
LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath());
ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName());
int rc = result.getExitCode(); int rc = result.getExitCode();
if (rc != 0) { if (rc != 0) {
throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult());
} }
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); LOGGER.debug("Successfully converted source volume at " + srcFile.getFileName() + " to destination volume: " + destDisk.getPath() + " " + result.getResult());
return destDisk; return destDisk;
} }
void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { private static final ScriptResult runScript(String script, long timeout, String...args) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) { ScriptResult result = new ScriptResult();
LOGGER.error("Unable to copy physical disk due to insufficient data"); Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); cmd.add(args);
} OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
} String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
/** if (output != null && output.contains("Unable to execute the command")) {
* Copy a disk path to another disk path using QemuImg command result.setResult(output);
* @param disk result.setExitCode(-1);
* @param destDisk return result;
* @param name
* @param timeout
*/
void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) {
QemuImg qemu;
try {
qemu = new QemuImg(timeout);
} catch (LibvirtException | QemuImgException e) {
throw new CloudRuntimeException (e);
}
QemuImgFile srcFile = null;
QemuImgFile destFile = null;
try {
srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
qemu.convert(srcFile, destFile, true);
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
} catch (QemuImgException | LibvirtException e) {
try {
Map<String, String> srcInfo = qemu.info(srcFile);
LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo));
} catch (Exception ignored) {
LOGGER.warn("Unable to get info from source disk: " + disk.getName());
}
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg, e);
} }
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
} }
@Override @Override
@ -460,25 +428,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
} }
} }
private static final ScriptResult runScript(String script, long timeout, String...args) {
ScriptResult result = new ScriptResult();
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
cmd.add(args);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
if (output != null && output.contains("Unable to execute the command")) {
result.setResult(output);
result.setExitCode(-1);
return result;
}
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
}
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
long maxTries = 10; // how many max retries to attempt the script long maxTries = 10; // how many max retries to attempt the script
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
@ -556,40 +508,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return false; return false;
} }
void runConnectScript(String lun, AddressInfo address) {
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
Process p = builder.start();
int rc = p.waitFor();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} catch (IOException | InterruptedException e) {
throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e);
}
}
void sleep(long sleepTimeMs) {
try {
Thread.sleep(sleepTimeMs);
} catch (Exception ex) {
// don't do anything
}
}
long getPhysicalDiskSize(String diskPath) { long getPhysicalDiskSize(String diskPath) {
if (StringUtils.isEmpty(diskPath)) { if (StringUtils.isEmpty(diskPath)) {
return 0; return 0;

View File

@ -56,3 +56,44 @@ This provides instructions of which provider implementation class to load when t
## Build and Deploy the Jar ## Build and Deploy the Jar
Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading
all configured modules. all configured modules.
### Test Cases
The following test cases should be run against configured installations of each storage array in a working Cloudstack installation.
1. Create New Primera Storage Pool for Zone
2. Create New Primera Storage Pool for Cluster
3. Update Primera Storage Pool for Zone
4. Update Primera Storage Pool for Cluster
5. Create VM with Root Disk using Primera pool
6. Create VM with Root and Data Disk using Primera pool
7. Create VM with Root Disk using NFS and Data Disk on Primera pool
8. Create VM with Root Disk on Primera Pool and Data Disk on NFS
9. Snapshot root disk with VM using Primera Pool for root disk
10. Snapshot data disk with VM using Primera Pool for data disk
11. Snapshot VM (non-memory) with root and data disk using Primera pool
12. Snapshot VM (non-memory) with root disk using Primera pool and data disk using NFS
13. Snapshot VM (non-memory) with root disk using NFS pool and data disk using Primera pool
14. Create new template from previous snapshot root disk on Primera pool
15. Create new volume from previous snapshot root disk on Primera pool
16. Create new volume from previous snapshot data disk on Primera pool
17. Create new VM using template created from Primera root snapshot and using Primera as root volume pool
18. Create new VM using template created from Primera root snapshot and using NFS as root volume pool
19. Delete previously created Primera snapshot
20. Create previously created Primera volume attached to a VM that is running (should fail)
21. Create previously created Primera volume attached to a VM that is not running (should fail)
22. Detach a Primera volume from a non-running VM (should work)
23. Attach a Primera volume to a running VM (should work)
24. Attach a Primera volume to a non-running VM (should work)
25. Create a 'thin' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=true, reduce=false)
26. Create a 'sparse' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=false, reduce=true)
27. Create a 'fat' Disk Offering and tagged for Primera pool and provision and attach a data volume to a VM using this offering (should fail as 'fat' not supported)
28. Perform volume migration of root volume from Primera pool to NFS pool on stopped VM
29. Perform volume migration of root volume from NFS pool to Primera pool on stopped VM
30. Perform volume migration of data volume from Primera pool to NFS pool on stopped VM
31. Perform volume migration of data volume from NFS pool to Primera pool on stopped VM
32. Perform VM data migration for a VM with 1 or more data volumes from all volumes on Primera pool to all volumes on NFS pool
33. Perform VM data migration for a VM with 1 or more data volumes from all volumes on NFS pool to all volumes on Primera pool
34. Perform live migration of a VM with a Primera root disk
35. Perform live migration of a VM with a Primera data disk and NFS root disk
36. Perform live migration of a VM with a Primera root disk and NFS data disk
37. Perform volume migration between 2 Primera pools on the same backend Primera IP address
38. Perform volume migration between 2 Primera pools on different Primera IP address

View File

@ -69,14 +69,14 @@ public interface ProviderAdapter {
* @param request * @param request
* @return * @return
*/ */
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request); public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
/** /**
* Detach the host from the storage context * Detach the host from the storage context
* @param context * @param context
* @param request * @param request
*/ */
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request); public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
/** /**
* Delete the provided volume/object * Delete the provided volume/object
@ -154,4 +154,22 @@ public interface ProviderAdapter {
* @return * @return
*/ */
public boolean canAccessHost(ProviderAdapterContext context, String hostname); public boolean canAccessHost(ProviderAdapterContext context, String hostname);
/**
* Returns true if the provider allows direct attach/connection of snapshots to a host
* @return
*/
public boolean canDirectAttachSnapshot();
/**
* Given a ProviderAdapterDataObject, return a map of connection IDs to connection values. Generally
* this would be used to return a map of hostnames and the VLUN ID for the attachment associated with
* that hostname. If the provider is using a hostgroup/hostset model where the ID is assigned in common
* across all hosts in the group, then the map MUST contain a single entry with host key set as a wildcard
* character (exactly '*').
* @param dataIn
* @return
*/
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn);
} }

View File

@ -19,6 +19,10 @@ package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map; import java.util.Map;
public interface ProviderAdapterFactory { public interface ProviderAdapterFactory {
/** Name of the provider */
public String getProviderName(); public String getProviderName();
/** create a new instance of a provider adapter */
public ProviderAdapter create(String url, Map<String, String> details); public ProviderAdapter create(String url, Map<String, String> details);
/** returns true if this type of adapter can directly attach snapshots to hosts */
public Object canDirectAttachSnapshot();
} }

View File

@ -21,7 +21,6 @@ public class ProviderVolumeNamer {
private static final String SNAPSHOT_PREFIX = "snap"; private static final String SNAPSHOT_PREFIX = "snap";
private static final String VOLUME_PREFIX = "vol"; private static final String VOLUME_PREFIX = "vol";
private static final String TEMPLATE_PREFIX = "tpl"; private static final String TEMPLATE_PREFIX = "tpl";
/** Simple method to allow sharing storage setup, primarily in lab/testing environment */
private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier"); private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier");
public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) { public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) {

View File

@ -32,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
@ -43,6 +44,7 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
@ -53,10 +55,12 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.image.store.TemplateObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import com.cloud.agent.api.Answer; import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataObjectType;
@ -73,7 +77,6 @@ import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.SnapshotVO; import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateVO;
@ -133,6 +136,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
DomainDao _domainDao; DomainDao _domainDao;
@Inject @Inject
VolumeService _volumeService; VolumeService _volumeService;
@Inject
VolumeDataFactory volumeDataFactory;
private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null;
@ -142,9 +147,54 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
@Override @Override
public DataTO getTO(DataObject data) { public DataTO getTO(DataObject data) {
// we need to get connectionId and and the VLUN ID for currently attached hosts to add to the DataTO object
DataTO to = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeObjectTO vto = new VolumeObjectTO((VolumeObject)data);
vto.setPath(getPath(data));
to = vto;
} else if (data.getType() == DataObjectType.TEMPLATE) {
TemplateObjectTO tto = new TemplateObjectTO((TemplateObject)data);
tto.setPath(getPath(data));
to = tto;
} else if (data.getType() == DataObjectType.SNAPSHOT) {
SnapshotObjectTO sto = new SnapshotObjectTO((SnapshotObject)data);
sto.setPath(getPath(data));
to = sto;
} else {
to = super.getTO(data);
}
return to;
}
/*
* For the given data object, return the path with current connection info. If a snapshot
* object is passed, we will determine if a temporary volume is avialable for that
* snapshot object and return that conneciton info instead.
*/
String getPath(DataObject data) {
StoragePoolVO storagePool = _storagePoolDao.findById(data.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterDataObject dataIn = newManagedDataObject(data, storagePool);
/** This means the object is not yet associated with the external provider so path is null */
if (dataIn.getExternalName() == null) {
return null; return null;
} }
ProviderAdapterContext context = newManagedVolumeContext(data);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
ProviderVolume volume = api.getVolume(context, dataIn);
// if this is an existing object, generate the path for it.
String finalPath = null;
if (volume != null) {
finalPath = generatePathInfo(volume, connIdMap);
}
return finalPath;
}
@Override @Override
public DataStoreTO getStoreTO(DataStore store) { public DataStoreTO getStoreTO(DataStore store) {
return null; return null;
@ -217,11 +267,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
dataIn.setExternalName(volume.getExternalName()); dataIn.setExternalName(volume.getExternalName());
dataIn.setExternalUuid(volume.getExternalUuid()); dataIn.setExternalUuid(volume.getExternalUuid());
// add the volume to the host set
String connectionId = api.attach(context, dataIn);
// update the cloudstack metadata about the volume // update the cloudstack metadata about the volume
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true); result.setSuccess(true);
@ -288,6 +335,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
ProviderAdapterContext context = newManagedVolumeContext(destdata); ProviderAdapterContext context = newManagedVolumeContext(destdata);
ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool);
ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool);
outVolume = api.copy(context, sourceIn, destIn); outVolume = api.copy(context, sourceIn, destIn);
// populate this data - it may be needed later // populate this data - it may be needed later
@ -302,17 +350,9 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
api.resize(context, destIn, destdata.getSize()); api.resize(context, destIn, destdata.getSize());
} }
String connectionId = api.attach(context, destIn); // initial volume info does not have connection map yet. That is added when grantAccess is called later.
String finalPath = generatePathInfo(outVolume, null);
String finalPath; persistVolumeData(storagePool, details, destdata, outVolume, null);
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase());
}
persistVolumeData(storagePool, details, destdata, outVolume, connectionId);
s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]");
VolumeObjectTO voto = new VolumeObjectTO(); VolumeObjectTO voto = new VolumeObjectTO();
@ -442,6 +482,66 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
} }
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
s_logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid());
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
api.attach(context, sourceIn, host.getName());
// rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
s_logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid());
return true;
} catch (Throwable e) {
String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage();
s_logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
}
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
// nothing to do if the host is null
if (dataObject == null || host == null || dataStore == null) {
return;
}
s_logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid());
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
api.detach(context, sourceIn, host.getName());
// rewrite the volume data, especially the connection string for informational purposes
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
s_logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid());
} catch (Throwable e) {
String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage();
s_logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
}
@Override @Override
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo,
QualityOfServiceState qualityOfServiceState) { QualityOfServiceState qualityOfServiceState) {
@ -492,15 +592,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
// add the snapshot to the host group (needed for copying to non-provider storage // add the snapshot to the host group (needed for copying to non-provider storage
// to create templates, etc) // to create templates, etc)
String connectionId = null;
String finalAddress = outSnapshot.getAddress(); String finalAddress = outSnapshot.getAddress();
if (outSnapshot.canAttachDirectly()) {
connectionId = api.attach(context, inSnapshotDO);
if (connectionId != null) {
finalAddress = finalAddress + "::" + connectionId;
}
}
snapshotTO.setPath(finalAddress); snapshotTO.setPath(finalAddress);
snapshotTO.setName(outSnapshot.getName()); snapshotTO.setName(outSnapshot.getName());
snapshotTO.setHypervisorType(HypervisorType.KVM); snapshotTO.setHypervisorType(HypervisorType.KVM);
@ -631,10 +723,12 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
// indicates the datastore can create temporary volumes for use when copying ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
// data from a snapshot if (factory != null) {
mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString()); mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
} else {
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", Boolean.FALSE.toString());
}
return mapCapabilities; return mapCapabilities;
} }
@ -667,6 +761,11 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
return true; return true;
} }
@Override
public boolean requiresAccessForMigration(DataObject dataObject) {
return true;
}
public String getProviderName() { public String getProviderName() {
return providerName; return providerName;
} }
@ -715,8 +814,13 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
object.setType(ProviderAdapterDataObject.Type.VOLUME); object.setType(ProviderAdapterDataObject.Type.VOLUME);
ProviderVolumeStats stats = api.getVolumeStats(context, object); ProviderVolumeStats stats = api.getVolumeStats(context, object);
Long provisionedSizeInBytes = stats.getActualUsedInBytes(); Long provisionedSizeInBytes = null;
Long allocatedSizeInBytes = stats.getAllocatedInBytes(); Long allocatedSizeInBytes = null;
if (stats != null) {
provisionedSizeInBytes = stats.getActualUsedInBytes();
allocatedSizeInBytes = stats.getAllocatedInBytes();
}
if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) { if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) {
return null; return null;
} }
@ -734,31 +838,19 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
} }
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails, void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
DataObject dataObject, ProviderVolume volume, String connectionId) { DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap) {
if (dataObject.getType() == DataObjectType.VOLUME) { if (dataObject.getType() == DataObjectType.VOLUME) {
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId); persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) { } else if (dataObject.getType() == DataObjectType.TEMPLATE) {
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId); persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
} }
} }
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject, void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume managedVolume, String connectionId) { ProviderVolume managedVolume, Map<String,String> connIdMap) {
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
// if its null check if the storage provider returned one that is already set String finalPath = generatePathInfo(managedVolume, connIdMap);
if (connectionId == null) {
connectionId = managedVolume.getExternalConnectionId();
}
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase());
}
volumeVO.setPath(finalPath); volumeVO.setPath(finalPath);
volumeVO.setFormat(ImageFormat.RAW); volumeVO.setFormat(ImageFormat.RAW);
volumeVO.setPoolId(storagePool.getId()); volumeVO.setPoolId(storagePool.getId());
@ -783,25 +875,31 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
} }
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject, void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume volume, String connectionId) { ProviderVolume volume, Map<String,String> connIdMap) {
TemplateInfo templateInfo = (TemplateInfo) dataObject; TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null); templateInfo.getId(), null);
// template pool ref doesn't have a details object so we'll save:
// 1. external name ==> installPath templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
// 2. address ==> local download path
if (connectionId == null) {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase()));
} else {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase(), connectionId));
}
templatePoolRef.setLocalDownloadPath(volume.getExternalName()); templatePoolRef.setLocalDownloadPath(volume.getExternalName());
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes()); templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); _vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
} }
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
// if a map was provided, add the connection IDs to the path info. the map is all the possible vlun id's used
// across each host or the hostset (represented with host name key as "*");
if (connIdMap != null && connIdMap.size() > 0) {
for (String key: connIdMap.keySet()) {
finalPath += String.format(" connid.%s=%s;", key, connIdMap.get(key));
}
}
return finalPath;
}
ProviderAdapterContext newManagedVolumeContext(DataObject obj) { ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
ProviderAdapterContext ctx = new ProviderAdapterContext(); ProviderAdapterContext ctx = new ProviderAdapterContext();
if (obj instanceof VolumeInfo) { if (obj instanceof VolumeInfo) {
@ -898,4 +996,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString())); dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString()));
return dataIn; return dataIn;
} }
public boolean volumesRequireGrantAccessWhenUsed() {
return true;
}
} }

View File

@ -189,7 +189,6 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
parameters.setName(dsName); parameters.setName(dsName);
parameters.setProviderName(providerName); parameters.setProviderName(providerName);
parameters.setManaged(true); parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0); parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops); parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.KVM); parameters.setHypervisorType(HypervisorType.KVM);
@ -223,7 +222,7 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
// if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes // if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes
ProviderVolumeStorageStats stats = api.getManagedStorageStats(); ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (capacityBytes != null && capacityBytes != 0) { if (capacityBytes != null && capacityBytes != 0 && stats != null) {
if (stats.getCapacityInBytes() > 0) { if (stats.getCapacityInBytes() > 0) {
if (stats.getCapacityInBytes() < capacityBytes) { if (stats.getCapacityInBytes() < capacityBytes) {
throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes()); throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes());
@ -233,8 +232,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
} }
// if we have no user-provided capacity bytes, use the ones provided by storage // if we have no user-provided capacity bytes, use the ones provided by storage
else { else {
if (stats.getCapacityInBytes() <= 0) { if (stats == null || stats.getCapacityInBytes() <= 0) {
throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified"); throw new InvalidParameterValueException("Capacity bytes not available from the storage provider, user provided capacity bytes must be specified");
} }
parameters.setCapacityBytes(stats.getCapacityInBytes()); parameters.setCapacityBytes(stats.getCapacityInBytes());
} }
@ -383,8 +382,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
* Update the storage pool configuration * Update the storage pool configuration
*/ */
@Override @Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) { public void updateStoragePool(StoragePool storagePool, Map<String, String> newDetails) {
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details); _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails);
} }
/** /**

View File

@ -131,4 +131,8 @@ public class AdaptivePrimaryDatastoreAdapterFactoryMap {
logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url); logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url);
return api; return api;
} }
public ProviderAdapterFactory getFactory(String providerName) {
return this.factoryMap.get(providerName);
}
} }

View File

@ -54,6 +54,8 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener {
if (storagePoolHost == null) { if (storagePoolHost == null) {
storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); storagePoolHost = new StoragePoolHostVO(poolId, hostId, "");
storagePoolHostDao.persist(storagePoolHost); storagePoolHostDao.persist(storagePoolHost);
} else {
return false;
} }
return true; return true;
} }

View File

@ -23,9 +23,9 @@ import java.net.URL;
import java.security.KeyManagementException; import java.security.KeyManagementException;
import java.security.KeyStoreException; import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.text.SimpleDateFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HostnameVerifier;
@ -109,7 +109,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
@Override @Override
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) { public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject,
ProviderAdapterDiskOffering offering, long size) {
FlashArrayVolume request = new FlashArrayVolume(); FlashArrayVolume request = new FlashArrayVolume();
request.setExternalName( request.setExternalName(
pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject)); pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject));
@ -128,30 +129,50 @@ public class FlashArrayAdapter implements ProviderAdapter {
* cluster (depending on Cloudstack Storage Pool configuration) * cluster (depending on Cloudstack Storage Pool configuration)
*/ */
@Override @Override
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
// should not happen but double check for sanity
if (dataObject.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
throw new RuntimeException("This storage provider does not support direct attachments of snapshots to hosts");
}
String volumeName = normalizeName(pod, dataObject.getExternalName()); String volumeName = normalizeName(pod, dataObject.getExternalName());
try { try {
FlashArrayList<FlashArrayConnection> list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference<FlashArrayList<FlashArrayConnection>> () { }); FlashArrayList<FlashArrayConnection> list = null;
FlashArrayHost host = getHost(hostname);
if (host != null) {
list = POST("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName, null,
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
}
if (list == null || list.getItems() == null || list.getItems().size() == 0) { if (list == null || list.getItems() == null || list.getItems().size() == 0) {
throw new RuntimeException("Volume attach did not return lun information"); throw new RuntimeException("Volume attach did not return lun information");
} }
FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list); FlashArrayConnection connection = (FlashArrayConnection) this.getFlashArrayItem(list);
if (connection.getLun() == null) { if (connection.getLun() == null) {
throw new RuntimeException("Volume attach missing lun field"); throw new RuntimeException("Volume attach missing lun field");
} }
return ""+connection.getLun(); return "" + connection.getLun();
} catch (Throwable e) { } catch (Throwable e) {
// the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it // the volume is already attached. happens in some scenarios where orchestration
// creates the volume before copying to it
if (e.toString().contains("Connection already exists")) { if (e.toString().contains("Connection already exists")) {
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + volumeName, FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + volumeName,
new TypeReference<FlashArrayList<FlashArrayConnection>>() { new TypeReference<FlashArrayList<FlashArrayConnection>>() {
}); });
if (list != null && list.getItems() != null) { if (list != null && list.getItems() != null) {
return ""+list.getItems().get(0).getLun(); for (FlashArrayConnection conn : list.getItems()) {
if (conn.getHost() != null && conn.getHost().getName() != null &&
(conn.getHost().getName().equals(hostname) || conn.getHost().getName().equals(hostname.substring(0, hostname.indexOf('.')))) &&
conn.getLun() != null) {
return "" + conn.getLun();
}
}
throw new RuntimeException("Volume lun is not found in existing connection");
} else { } else {
throw new RuntimeException("Volume lun is not found in existing connection"); throw new RuntimeException("Volume lun is not found in existing connection");
} }
@ -162,23 +183,42 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
@Override @Override
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
String volumeName = normalizeName(pod, dataObject.getExternalName()); String volumeName = normalizeName(pod, dataObject.getExternalName());
// hostname is always provided by cloudstack, but we will detach from hostgroup
// if this pool is configured to use hostgroup for attachments
if (hostgroup != null) {
DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName);
} }
FlashArrayHost host = getHost(hostname);
if (host != null) {
DELETE("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName);
}
}
@Override @Override
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
// public void deleteVolume(String volumeNamespace, String volumeName) {
// first make sure we are disconnected // first make sure we are disconnected
removeVlunsAll(context, pod, dataObject.getExternalName()); removeVlunsAll(context, pod, dataObject.getExternalName());
String fullName = normalizeName(pod, dataObject.getExternalName()); String fullName = normalizeName(pod, dataObject.getExternalName());
FlashArrayVolume volume = new FlashArrayVolume(); FlashArrayVolume volume = new FlashArrayVolume();
volume.setDestroyed(true);
// rename as we delete so it doesn't conflict if the template or volume is ever recreated
// pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete
String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date());
volume.setExternalName(fullName + "-" + timestamp);
try { try {
PATCH("/volumes?names=" + fullName, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() { PATCH("/volumes?names=" + fullName, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
}); });
// now delete it with new name
volume.setDestroyed(true);
PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
});
} catch (CloudRuntimeException e) { } catch (CloudRuntimeException e) {
if (e.toString().contains("Volume does not exist")) { if (e.toString().contains("Volume does not exist")) {
return; return;
@ -205,8 +245,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
return null; return null;
} }
populateConnectionId(volume);
return volume; return volume;
} catch (Exception e) { } catch (Exception e) {
// assume any exception is a not found. Flash returns 400's for most errors // assume any exception is a not found. Flash returns 400's for most errors
@ -217,7 +255,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
@Override @Override
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) {
// public FlashArrayVolume getVolumeByWwn(String wwn) { // public FlashArrayVolume getVolumeByWwn(String wwn) {
if (address == null ||addressType == null) { if (address == null || addressType == null) {
throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress"); throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress");
} }
@ -242,13 +280,11 @@ public class FlashArrayAdapter implements ProviderAdapter {
return null; return null;
} }
volume = (FlashArrayVolume)this.getFlashArrayItem(list); volume = (FlashArrayVolume) this.getFlashArrayItem(list);
if (volume != null && volume.getAddress() == null) { if (volume != null && volume.getAddress() == null) {
return null; return null;
} }
populateConnectionId(volume);
return volume; return volume;
} catch (Exception e) { } catch (Exception e) {
// assume any exception is a not found. Flash returns 400's for most errors // assume any exception is a not found. Flash returns 400's for most errors
@ -256,32 +292,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
} }
private void populateConnectionId(FlashArrayVolume volume) {
// we need to see if there is a connection (lun) associated with this volume.
// note we assume 1 lun for the hostgroup associated with this object
FlashArrayList<FlashArrayConnection> list = null;
try {
list = GET("/connections?volume_names=" + volume.getExternalName(),
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
} catch (CloudRuntimeException e) {
// this means there is no attachment associated with this volume on the array
if (e.toString().contains("Bad Request")) {
return;
}
}
if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn: list.getItems()) {
if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) {
volume.setExternalConnectionId(""+conn.getLun());
break;
}
}
}
}
@Override @Override
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) { public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) {
// public void resizeVolume(String volumeNamespace, String volumeName, long // public void resizeVolume(String volumeNamespace, String volumeName, long
@ -299,7 +309,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
* @return * @return
*/ */
@Override @Override
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) { public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
ProviderAdapterDataObject targetDataObject) {
// public FlashArrayVolume snapshotVolume(String volumeNamespace, String // public FlashArrayVolume snapshotVolume(String volumeNamespace, String
// volumeName, String snapshotName) { // volumeName, String snapshotName) {
FlashArrayList<FlashArrayVolume> list = POST( FlashArrayList<FlashArrayVolume> list = POST(
@ -354,11 +365,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
@Override @Override
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) { public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
ProviderAdapterDataObject destDataObject) {
// private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace,
// String destName) { // String destName) {
if (sourceDataObject == null || sourceDataObject.getExternalName() == null if (sourceDataObject == null || sourceDataObject.getExternalName() == null
||sourceDataObject.getType() == null) { || sourceDataObject.getType() == null) {
throw new RuntimeException("Provided volume has no external source information"); throw new RuntimeException("Provided volume has no external source information");
} }
@ -424,12 +436,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
@Override @Override
public void validate() { public void validate() {
login(); login();
// check if hostgroup and pod from details really exist - we will
// require a distinct configuration object/connection object for each type
if (this.getHostgroup(hostgroup) == null) {
throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url
+ "], please validate configuration");
}
if (this.getVolumeNamespace(pod) == null) { if (this.getVolumeNamespace(pod) == null) {
throw new RuntimeException( throw new RuntimeException(
@ -477,40 +483,36 @@ public class FlashArrayAdapter implements ProviderAdapter {
throw new RuntimeException("Unable to validate host access because a hostname was not provided"); throw new RuntimeException("Unable to validate host access because a hostname was not provided");
} }
List<String> members = getHostgroupMembers(hostgroup); FlashArrayHost host = getHost(hostname);
if (host != null) {
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
// hostname configuration
String shortname;
if (hostname.indexOf('.') > 0) {
shortname = hostname.substring(0, (hostname.indexOf('.')));
} else {
shortname = hostname;
}
for (String member : members) {
// exact match (short or long names)
if (member.equals(hostname)) {
return true; return true;
} }
// primera has short name and cloudstack had long name
if (member.equals(shortname)) {
return true;
}
// member has long name but cloudstack had shortname
if (member.indexOf('.') > 0) {
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
return true;
}
}
}
return false; return false;
} }
private FlashArrayHost getHost(String hostname) {
FlashArrayList<FlashArrayHost> list = null;
try {
list = GET("/hosts?names=" + hostname,
new TypeReference<FlashArrayList<FlashArrayHost>>() {
});
} catch (Exception e) {
}
if (list == null) {
if (hostname.indexOf('.') > 0) {
list = GET("/hosts?names=" + hostname.substring(0, (hostname.indexOf('.'))),
new TypeReference<FlashArrayList<FlashArrayHost>>() {
});
}
}
return (FlashArrayHost) getFlashArrayItem(list);
}
private String getAccessToken() { private String getAccessToken() {
refreshSession(false);
return accessToken; return accessToken;
} }
@ -527,13 +529,21 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
} catch (Exception e) { } catch (Exception e) {
// retry frequently but not every request to avoid DDOS on storage API // retry frequently but not every request to avoid DDOS on storage API
logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", logger.warn(
"Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds",
e); e);
keyExpiration = System.currentTimeMillis() + (5 * 1000); keyExpiration = System.currentTimeMillis() + (5 * 1000);
} }
} }
private void validateLoginInfo(String urlStr) { /**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
URL urlFull; URL urlFull;
try { try {
urlFull = new URL(urlStr); urlFull = new URL(urlStr);
@ -571,15 +581,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
} }
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
throw new RuntimeException(
FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool");
}
}
apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION); apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION);
if (apiLoginVersion == null) { if (apiLoginVersion == null) {
apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION); apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION);
@ -596,6 +597,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
} }
// retrieve for legacy purposes. if set, we'll remove any connections to hostgroup we find and use the host
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
}
String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
if (connTimeoutStr == null) { if (connTimeoutStr == null) {
connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
@ -651,16 +658,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
} else { } else {
skipTlsValidation = true; skipTlsValidation = true;
} }
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
validateLoginInfo(urlStr);
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken"); HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken");
@ -749,7 +747,13 @@ public class FlashArrayAdapter implements ProviderAdapter {
if (list != null && list.getItems() != null) { if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn : list.getItems()) { for (FlashArrayConnection conn : list.getItems()) {
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName); if (hostgroup != null && conn.getHostGroup() != null && conn.getHostGroup().getName() != null) {
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names="
+ volumeName);
break;
} else if (conn.getHost() != null && conn.getHost().getName() != null) {
DELETE("/connections?host_names=" + conn.getHost().getName() + "&volume_names=" + volumeName);
}
} }
} }
} }
@ -762,32 +766,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
private FlashArrayPod getVolumeNamespace(String name) { private FlashArrayPod getVolumeNamespace(String name) {
FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name, new TypeReference<FlashArrayList<FlashArrayPod>>() { FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name,
new TypeReference<FlashArrayList<FlashArrayPod>>() {
}); });
return (FlashArrayPod) getFlashArrayItem(list); return (FlashArrayPod) getFlashArrayItem(list);
} }
private FlashArrayHostgroup getHostgroup(String name) {
FlashArrayList<FlashArrayHostgroup> list = GET("/host-groups?name=" + name,
new TypeReference<FlashArrayList<FlashArrayHostgroup>>() {
});
return (FlashArrayHostgroup) getFlashArrayItem(list);
}
private List<String> getHostgroupMembers(String groupname) {
FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname,
new TypeReference<FlashArrayGroupMemberReferenceList>() {
});
if (list == null || list.getItems().size() == 0) {
return null;
}
List<String> hostnames = new ArrayList<String>();
for (FlashArrayGroupMemberReference ref : list.getItems()) {
hostnames.add(ref.getMember().getName());
}
return hostnames;
}
private FlashArrayVolume getSnapshot(String snapshotName) { private FlashArrayVolume getSnapshot(String snapshotName) {
FlashArrayList<FlashArrayVolume> list = GET("/volume-snapshots?names=" + snapshotName, FlashArrayList<FlashArrayVolume> list = GET("/volume-snapshots?names=" + snapshotName,
new TypeReference<FlashArrayList<FlashArrayVolume>>() { new TypeReference<FlashArrayList<FlashArrayVolume>>() {
@ -856,7 +840,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
return null; return null;
} catch (UnsupportedOperationException | IOException e) { } catch (UnsupportedOperationException | IOException e) {
throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e); throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]",
e);
} }
} else if (statusCode == 400) { } else if (statusCode == 400) {
try { try {
@ -1083,4 +1068,39 @@ public class FlashArrayAdapter implements ProviderAdapter {
} }
return sizeInBytes; return sizeInBytes;
} }
@Override
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
Map<String, String> map = new HashMap<String, String>();
// flasharray doesn't let you directly map a snapshot to a host, so we'll just return an empty map
if (dataIn.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
return map;
}
try {
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + dataIn.getExternalName(),
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn : list.getItems()) {
if (conn.getHost() != null) {
map.put(conn.getHost().getName(), "" + conn.getLun());
}
}
}
} catch (Exception e) {
// flasharray returns a 400 if the volume doesn't exist, so we'll just return an empty object.
if (logger.isTraceEnabled()) {
logger.trace("Error getting connection map for volume [" + dataIn.getExternalName() + "]: " + e.toString(), e);
}
}
return map;
}
@Override
public boolean canDirectAttachSnapshot() {
return false;
}
} }

View File

@ -33,4 +33,9 @@ public class FlashArrayAdapterFactory implements ProviderAdapterFactory {
return new FlashArrayAdapter(url, details); return new FlashArrayAdapter(url, details);
} }
@Override
public Object canDirectAttachSnapshot() {
return false;
}
} }

View File

@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayHost {
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getWwns() {
return wwns;
}
public void setWwns(List<String> wwns) {
this.wwns = wwns;
}
@JsonProperty("name")
private String name;
@JsonProperty("wwns")
private List<String> wwns;
}

View File

@ -83,7 +83,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
@JsonIgnore @JsonIgnore
public String getPodName() { public String getPodName() {
if (pod != null) { if (pod != null) {
return pod.getName(); return pod.name;
} else { } else {
return null; return null;
} }
@ -129,7 +129,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
} }
public void setPodName(String podname) { public void setPodName(String podname) {
FlashArrayVolumePod pod = new FlashArrayVolumePod(); FlashArrayVolumePod pod = new FlashArrayVolumePod();
pod.setName(podname); pod.name = podname;
this.pod = pod; this.pod = pod;
} }
@Override @Override

View File

@ -24,20 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
@JsonInclude(JsonInclude.Include.NON_NULL) @JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolumePod { public class FlashArrayVolumePod {
@JsonProperty("id") @JsonProperty("id")
private String id; public String id;
@JsonProperty("name") @JsonProperty("name")
private String name; public String name;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
} }

View File

@ -24,7 +24,6 @@ import java.security.KeyManagementException;
import java.security.KeyStoreException; import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HostnameVerifier;
@ -73,7 +72,7 @@ public class PrimeraAdapter implements ProviderAdapter {
public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs"; public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs";
private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14);
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; private static final long CONNECT_TIMEOUT_MS_DEFAULT = 60 * 1000;
private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000; private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000;
public static final long BYTES_IN_MiB = 1048576; public static final long BYTES_IN_MiB = 1048576;
@ -106,18 +105,11 @@ public class PrimeraAdapter implements ProviderAdapter {
this.refreshSession(true); this.refreshSession(true);
} }
/**
* Validate that the hostgroup and pod from the details data exists. Each
* configuration object/connection needs a distinct set of these 2 things.
*/
@Override @Override
public void validate() { public void validate() {
login(); login();
if (this.getHostset(hostset) == null) { // check if hostgroup and pod from details really exist - we will
throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url // require a distinct configuration object/connection object for each type
+ "], please validate configuration");
}
if (this.getCpg(cpg) == null) { if (this.getCpg(cpg) == null) {
throw new RuntimeException( throw new RuntimeException(
"Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration"); "Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration");
@ -126,6 +118,15 @@ public class PrimeraAdapter implements ProviderAdapter {
@Override @Override
public void disconnect() { public void disconnect() {
logger.info("PrimeraAdapter:disconnect(): closing session");
try {
_client.close();
} catch (IOException e) {
logger.warn("PrimeraAdapter:refreshSession(): Error closing client connection", e);
} finally {
_client = null;
keyExpiration = -1;
}
return; return;
} }
@ -176,10 +177,15 @@ public class PrimeraAdapter implements ProviderAdapter {
} }
@Override @Override
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, String hostname) {
assert dataIn.getExternalName() != null : "External name not provided internally on volume attach"; assert dataIn.getExternalName() != null : "External name not provided internally on volume attach";
PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest(); PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest();
request.setHostname("set:" + hostset); PrimeraHost host = getHost(hostname);
if (host == null) {
throw new RuntimeException("Unable to find host " + hostname + " on storage provider");
}
request.setHostname(host.getName());
request.setVolumeName(dataIn.getExternalName()); request.setVolumeName(dataIn.getExternalName());
request.setAutoLun(true); request.setAutoLun(true);
// auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4
@ -194,12 +200,36 @@ public class PrimeraAdapter implements ProviderAdapter {
return toks[1]; return toks[1];
} }
@Override /**
* This detaches ALL vlun's for the provided volume name IF they are associated to this hostset
* @param context
* @param request
*/
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) { public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) {
detach(context, request, null);
}
@Override
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname) {
// we expect to only be attaching one hostset to the vluns, so on detach we'll // we expect to only be attaching one hostset to the vluns, so on detach we'll
// remove ALL vluns we find. // remove ALL vluns we find.
assert request.getExternalName() != null : "External name not provided internally on volume detach"; assert request.getExternalName() != null : "External name not provided internally on volume detach";
removeAllVluns(request.getExternalName());
PrimeraVlunList list = getVluns(request.getExternalName());
if (list != null && list.getMembers().size() > 0) {
list.getMembers().forEach(vlun -> {
// remove any hostset from old code if configured
if (hostset != null && vlun.getHostname() != null && vlun.getHostname().equals("set:" + hostset)) {
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
}
if (hostname != null) {
if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) {
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
}
}
});
}
} }
public void removeVlun(String name, Integer lunid, String hostString) { public void removeVlun(String name, Integer lunid, String hostString) {
@ -208,20 +238,7 @@ public class PrimeraAdapter implements ProviderAdapter {
DELETE("/vluns/" + name + "," + lunid + "," + hostString); DELETE("/vluns/" + name + "," + lunid + "," + hostString);
} }
/** public PrimeraVlunList getVluns(String name) {
* Removes all vluns - this should only be done when you are sure the volume is no longer in use
* @param name
*/
public void removeAllVluns(String name) {
PrimeraVlunList list = getVolumeHostsets(name);
if (list != null && list.getMembers() != null) {
for (PrimeraVlun vlun: list.getMembers()) {
removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname());
}
}
}
public PrimeraVlunList getVolumeHostsets(String name) {
String query = "%22volumeName%20EQ%20" + name + "%22"; String query = "%22volumeName%20EQ%20" + name + "%22";
return GET("/vluns?query=" + query, new TypeReference<PrimeraVlunList>() {}); return GET("/vluns?query=" + query, new TypeReference<PrimeraVlunList>() {});
} }
@ -231,7 +248,7 @@ public class PrimeraAdapter implements ProviderAdapter {
assert request.getExternalName() != null : "External name not provided internally on volume delete"; assert request.getExternalName() != null : "External name not provided internally on volume delete";
// first remove vluns (take volumes from vluns) from hostset // first remove vluns (take volumes from vluns) from hostset
removeAllVluns(request.getExternalName()); detach(context, request);
DELETE("/volumes/" + request.getExternalName()); DELETE("/volumes/" + request.getExternalName());
} }
@ -420,6 +437,7 @@ public class PrimeraAdapter implements ProviderAdapter {
if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) { if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) {
return null; return null;
} }
Long capacityBytes = 0L; Long capacityBytes = 0L;
if (cpgobj.getsDGrowth() != null) { if (cpgobj.getsDGrowth() != null) {
capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB; capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB;
@ -453,73 +471,59 @@ public class PrimeraAdapter implements ProviderAdapter {
@Override @Override
public boolean canAccessHost(ProviderAdapterContext context, String hostname) { public boolean canAccessHost(ProviderAdapterContext context, String hostname) {
PrimeraHostset hostset = getHostset(this.hostset); // check that the array has the host configured
PrimeraHost host = this.getHost(hostname);
List<String> members = hostset.getSetmembers(); if (host != null) {
// if hostset is configured we'll additionally check if the host is in it (legacy/original behavior)
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
// hostname configuration
String shortname;
if (hostname.indexOf('.') > 0) {
shortname = hostname.substring(0, (hostname.indexOf('.')));
} else {
shortname = hostname;
}
for (String member: members) {
// exact match (short or long names)
if (member.equals(hostname)) {
return true; return true;
} }
// primera has short name and cloudstack had long name
if (member.equals(shortname)) {
return true;
}
// member has long name but cloudstack had shortname
int index = member.indexOf(".");
if (index > 0) {
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
return true;
}
}
}
return false; return false;
} }
private PrimeraHost getHost(String name) {
PrimeraHost host = GET("/hosts/" + name, new TypeReference<PrimeraHost>() { });
if (host == null) {
if (name.indexOf('.') > 0) {
host = this.getHost(name.substring(0, (name.indexOf('.'))));
}
}
return host;
}
private PrimeraCpg getCpg(String name) { private PrimeraCpg getCpg(String name) {
return GET("/cpgs/" + name, new TypeReference<PrimeraCpg>() { return GET("/cpgs/" + name, new TypeReference<PrimeraCpg>() {
}); });
} }
private PrimeraHostset getHostset(String name) { private synchronized String refreshSession(boolean force) {
return GET("/hostsets/" + name, new TypeReference<PrimeraHostset>() {
});
}
private String getSessionKey() {
refreshSession(false);
return key;
}
private synchronized void refreshSession(boolean force) {
try { try {
if (force || keyExpiration < System.currentTimeMillis()) { if (force || keyExpiration < (System.currentTimeMillis()-15000)) {
// close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing // close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing
_client.close();; disconnect();
_client = null;
login(); login();
keyExpiration = System.currentTimeMillis() + keyTtl; logger.debug("PrimeraAdapter:refreshSession(): session created or refreshed with key=" + key + ", expiration=" + keyExpiration);
} else {
if (logger.isTraceEnabled()) {
logger.trace("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration);
}
} }
} catch (Exception e) { } catch (Exception e) {
// retry frequently but not every request to avoid DDOS on storage API // retry frequently but not every request to avoid DDOS on storage API
logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e); logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e);
keyExpiration = System.currentTimeMillis() + (5*1000); keyExpiration = System.currentTimeMillis() + (5*1000);
} }
return key;
} }
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
private void validateLoginInfo(String urlStr) {
URL urlFull; URL urlFull;
try { try {
urlFull = new URL(urlStr); urlFull = new URL(urlStr);
@ -553,7 +557,7 @@ public class PrimeraAdapter implements ProviderAdapter {
cpg = queryParms.get(PrimeraAdapter.CPG); cpg = queryParms.get(PrimeraAdapter.CPG);
if (cpg == null) { if (cpg == null) {
throw new RuntimeException( throw new RuntimeException(
PrimeraAdapter.CPG + " paramater/option required to configure this storage pool"); PrimeraAdapter.CPG + " parameter/option required to configure this storage pool");
} }
} }
@ -566,13 +570,10 @@ public class PrimeraAdapter implements ProviderAdapter {
} }
} }
// if this is null, we will use direct-to-host vlunids (preferred)
hostset = connectionDetails.get(PrimeraAdapter.HOSTSET); hostset = connectionDetails.get(PrimeraAdapter.HOSTSET);
if (hostset == null) { if (hostset == null) {
hostset = queryParms.get(PrimeraAdapter.HOSTSET); hostset = queryParms.get(PrimeraAdapter.HOSTSET);
if (hostset == null) {
throw new RuntimeException(
PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool");
}
} }
String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS);
@ -629,16 +630,7 @@ public class PrimeraAdapter implements ProviderAdapter {
} else { } else {
skipTlsValidation = true; skipTlsValidation = true;
} }
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
validateLoginInfo(urlStr);
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
HttpPost request = new HttpPost(url + "/credentials"); HttpPost request = new HttpPost(url + "/credentials");
@ -652,6 +644,9 @@ public class PrimeraAdapter implements ProviderAdapter {
if (statusCode == 200 | statusCode == 201) { if (statusCode == 200 | statusCode == 201) {
PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class); PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class);
key = keyobj.getKey(); key = keyobj.getKey();
// Set the key expiration to x minutes from now
this.keyExpiration = System.currentTimeMillis() + keyTtl;
logger.info("PrimeraAdapter:login(): successful, new session: New key=" + key + ", expiration=" + this.keyExpiration);
} else if (statusCode == 401 || statusCode == 403) { } else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token"); + "] failed, unable to retrieve session token");
@ -712,15 +707,15 @@ public class PrimeraAdapter implements ProviderAdapter {
private <T> T POST(String path, Object input, final TypeReference<T> type) { private <T> T POST(String path, Object input, final TypeReference<T> type) {
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
this.refreshSession(false); String session_key = this.refreshSession(false);
HttpPost request = new HttpPost(url + path); HttpPost request = new HttpPost(url + path);
request.addHeader("Content-Type", "application/json"); request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json"); request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
try { try {
String data = mapper.writeValueAsString(input); String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data)); request.setEntity(new StringEntity(data));
logger.debug("POST data: " + request.getEntity()); if (logger.isTraceEnabled()) logger.trace("POST data: " + request.getEntity());
} catch (UnsupportedEncodingException | JsonProcessingException e) { } catch (UnsupportedEncodingException | JsonProcessingException e) {
throw new RuntimeException( throw new RuntimeException(
"Error processing request payload to [" + url + "] for path [" + path + "]", e); "Error processing request payload to [" + url + "] for path [" + path + "]", e);
@ -797,10 +792,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
this.refreshSession(false); this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpPut request = new HttpPut(url + path); HttpPut request = new HttpPut(url + path);
request.addHeader("Content-Type", "application/json"); request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json"); request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
String data = mapper.writeValueAsString(input); String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data)); request.setEntity(new StringEntity(data));
@ -850,10 +846,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
this.refreshSession(false); this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpGet request = new HttpGet(url + path); HttpGet request = new HttpGet(url + path);
request.addHeader("Content-Type", "application/json"); request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json"); request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
CloseableHttpClient client = getClient(); CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request); response = (CloseableHttpResponse) client.execute(request);
@ -892,10 +889,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null; CloseableHttpResponse response = null;
try { try {
this.refreshSession(false); this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpDelete request = new HttpDelete(url + path); HttpDelete request = new HttpDelete(url + path);
request.addHeader("Content-Type", "application/json"); request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json"); request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
CloseableHttpClient client = getClient(); CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request); response = (CloseableHttpResponse) client.execute(request);
@ -926,5 +924,22 @@ public class PrimeraAdapter implements ProviderAdapter {
} }
} }
@Override
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
Map<String,String> connIdMap = new HashMap<String,String>();
PrimeraVlunList list = this.getVluns(dataIn.getExternalName());
if (list != null && list.getMembers() != null && list.getMembers().size() > 0) {
for (PrimeraVlun vlun: list.getMembers()) {
connIdMap.put(vlun.getHostname(), ""+vlun.getLun());
}
}
return connIdMap;
}
@Override
public boolean canDirectAttachSnapshot() {
return true;
}
} }

View File

@ -33,4 +33,9 @@ public class PrimeraAdapterFactory implements ProviderAdapterFactory {
return new PrimeraAdapter(url, details); return new PrimeraAdapter(url, details);
} }
@Override
public Object canDirectAttachSnapshot() {
return true;
}
} }

View File

@ -0,0 +1,56 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraHost {
private Integer id;
private String name;
private List<PrimeraPort> fcPaths;
private PrimeraHostDescriptor descriptors;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<PrimeraPort> getFcPaths() {
return fcPaths;
}
public void setFcPaths(List<PrimeraPort> fcPaths) {
this.fcPaths = fcPaths;
}
public PrimeraHostDescriptor getDescriptors() {
return descriptors;
}
public void setDescriptors(PrimeraHostDescriptor descriptors) {
this.descriptors = descriptors;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraHostDescriptor {
private String IPAddr = null;
private String os = null;
public String getIPAddr() {
return IPAddr;
}
public void setIPAddr(String iPAddr) {
IPAddr = iPAddr;
}
public String getOs() {
return os;
}
public void setOs(String os) {
this.os = os;
}
}

View File

@ -34,105 +34,115 @@ public class PrimeraHostset {
private String uuid; private String uuid;
private Map<String, Object> additionalProperties = new LinkedHashMap<String, Object>(); private Map<String, Object> additionalProperties = new LinkedHashMap<String, Object>();
public String getComment() { public String getComment() {
return comment; return comment;
} }
public void setComment(String comment) { public void setComment(String comment) {
this.comment = comment; this.comment = comment;
} }
public Integer getId() { public Integer getId() {
return id; return id;
} }
public void setId(Integer id) { public void setId(Integer id) {
this.id = id; this.id = id;
} }
public String getName() { public String getName() {
return name; return name;
} }
public void setName(String name) { public void setName(String name) {
this.name = name; this.name = name;
} }
public List<String> getSetmembers() { public List<String> getSetmembers() {
return setmembers; return setmembers;
} }
public void setSetmembers(List<String> setmembers) { public void setSetmembers(List<String> setmembers) {
this.setmembers = setmembers; this.setmembers = setmembers;
} }
public String getUuid() { public String getUuid() {
return uuid; return uuid;
} }
public void setUuid(String uuid) { public void setUuid(String uuid) {
this.uuid = uuid; this.uuid = uuid;
} }
public Map<String, Object> getAdditionalProperties() { public Map<String, Object> getAdditionalProperties() {
return additionalProperties; return additionalProperties;
} }
public void setAdditionalProperties(Map<String, Object> additionalProperties) { public void setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties; this.additionalProperties = additionalProperties;
} }
// adds members to a hostset // adds members to a hostset
public static class PrimeraHostsetVLUNRequest { public static class PrimeraHostsetVLUNRequest {
private String volumeName; private String volumeName;
private Boolean autoLun = true; private Boolean autoLun = true;
private Integer lun = 0; private Integer lun = 0;
private Integer maxAutoLun = 0; private Integer maxAutoLun = 0;
/** // hostset format: "set:<hostset>"
* This can be a single hostname OR the set of hosts in the format
* "set:<hostset>".
* For the purposes of this driver, its expected that the predominate usecase is
* to use
* a hostset that is aligned with a CloudStack Cluster.
*/
private String hostname; private String hostname;
public String getVolumeName() { public String getVolumeName() {
return volumeName; return volumeName;
} }
public void setVolumeName(String volumeName) { public void setVolumeName(String volumeName) {
this.volumeName = volumeName; this.volumeName = volumeName;
} }
public Boolean getAutoLun() { public Boolean getAutoLun() {
return autoLun; return autoLun;
} }
public void setAutoLun(Boolean autoLun) { public void setAutoLun(Boolean autoLun) {
this.autoLun = autoLun; this.autoLun = autoLun;
} }
public Integer getLun() { public Integer getLun() {
return lun; return lun;
} }
public void setLun(Integer lun) { public void setLun(Integer lun) {
this.lun = lun; this.lun = lun;
} }
public Integer getMaxAutoLun() { public Integer getMaxAutoLun() {
return maxAutoLun; return maxAutoLun;
} }
public void setMaxAutoLun(Integer maxAutoLun) { public void setMaxAutoLun(Integer maxAutoLun) {
this.maxAutoLun = maxAutoLun; this.maxAutoLun = maxAutoLun;
} }
public String getHostname() { public String getHostname() {
return hostname; return hostname;
} }
public void setHostname(String hostname) { public void setHostname(String hostname) {
this.hostname = hostname; this.hostname = hostname;
} }

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraPort {
private String wwn;
private PrimeraPortPos portPos;
public String getWwn() {
return wwn;
}
public void setWwn(String wwn) {
this.wwn = wwn;
}
public PrimeraPortPos getPortPos() {
return portPos;
}
public void setPortPos(PrimeraPortPos portPos) {
this.portPos = portPos;
}
}

View File

@ -0,0 +1,47 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraPortPos {
private Integer cardPort;
private Integer node;
private Integer slot;
public Integer getCardPort() {
return cardPort;
}
public void setCardPort(Integer cardPort) {
this.cardPort = cardPort;
}
public Integer getNode() {
return node;
}
public void setNode(Integer node) {
this.node = node;
}
public Integer getSlot() {
return slot;
}
public void setSlot(Integer slot) {
this.slot = slot;
}
}

View File

@ -35,7 +35,7 @@ public class PrimeraVolumeCopyRequestParameters {
private String snapCPG = null; private String snapCPG = null;
private Boolean skipZero = null; private Boolean skipZero = null;
private Boolean saveSnapshot = null; private Boolean saveSnapshot = null;
/** 1=HIGH, 2=MED, 3=LOW */ // 1=HIGH, 2=MED, 3=LOW
private Integer priority = null; private Integer priority = null;
public String getDestVolume() { public String getDestVolume() {
return destVolume; return destVolume;

View File

@ -22,10 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true) @JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL) @JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumePromoteRequest { public class PrimeraVolumePromoteRequest {
/** private Integer action = 4; // PROMOTE_VIRTUAL_COPY, https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
* Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
*/
private Integer action = 4;
private Boolean online = true; private Boolean online = true;
private Integer priority = 2; // MEDIUM private Integer priority = 2; // MEDIUM
private Boolean allowRemoteCopyParent = true; private Boolean allowRemoteCopyParent = true;

View File

@ -68,6 +68,11 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti
final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER); final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER);
final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL); final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL);
final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE); final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE);
if (provider == null) {
return new Pair<Boolean, ActionOnFailedAuthentication>(false, null);
}
String oauthProvider = ((provider == null) ? null : provider[0]); String oauthProvider = ((provider == null) ? null : provider[0]);
String email = ((emailArray == null) ? null : emailArray[0]); String email = ((emailArray == null) ? null : emailArray[0]);
String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]); String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]);

View File

@ -22,7 +22,7 @@ OUTPUT_FILE=${3:?"Output file/path is required"}
echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}" echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}"
qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { qemu-img convert -n -p -W -t writeback -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
# if its a block device make sure we flush caches before exiting # if its a block device make sure we flush caches before exiting
lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && { lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && {
blockdev --flushbufs ${OUTPUT_FILE} blockdev --flushbufs ${OUTPUT_FILE}

View File

@ -1057,7 +1057,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
created = false; created = false;
VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); VolumeInfo vol = volFactory.getVolume(cmd.getEntityId());
vol.stateTransit(Volume.Event.DestroyRequested); vol.stateTransit(Volume.Event.DestroyRequested);
throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e);
} finally { } finally {
if (!created) { if (!created) {
s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend");
@ -3347,6 +3347,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd); DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd);
// if no new disk offering was provided, and match is required, default to the offering of the
// original volume. otherwise it falls through with no check and the target volume may
// not work correctly in some scenarios with the target provider. Adminstrator
// can disable this flag dynamically for certain bulk migration scenarios if required.
if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) {
newDiskOffering = diskOffering;
}
validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool); validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool);
if (vm != null) { if (vm != null) {
@ -3432,14 +3439,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
Account caller = CallContext.current().getCallingAccount(); Account caller = CallContext.current().getCallingAccount();
DataCenter zone = null; DataCenter zone = null;
Volume volume = _volsDao.findById(cmd.getId()); Volume volume = _volsDao.findById(cmd.getId());
if (volume != null) { if (volume == null) {
throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId()));
}
zone = _dcDao.findById(volume.getDataCenterId()); zone = _dcDao.findById(volume.getDataCenterId());
}
_accountMgr.checkAccess(caller, newDiskOffering, zone); _accountMgr.checkAccess(caller, newDiskOffering, zone);
DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) {
throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid()));
}
return newDiskOffering; return newDiskOffering;
} }
@ -3524,6 +3529,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags); return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags);
} }
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
String[] oldDOStorageTags = oldDO.getTagsArray();
String[] newDOStorageTags = newDO.getTagsArray();
if (oldDOStorageTags.length == 0) {
return true;
}
if (newDOStorageTags.length == 0) {
return false;
}
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
}
@Override @Override
public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) { public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) {
Pair<List<String>, Boolean> storagePoolTags = getStoragePoolTags(destPool); Pair<List<String>, Boolean> storagePoolTags = getStoragePoolTags(destPool);
@ -3553,18 +3570,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return result; return result;
} }
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
String[] oldDOStorageTags = oldDO.getTagsArray();
String[] newDOStorageTags = newDO.getTagsArray();
if (oldDOStorageTags.length == 0) {
return true;
}
if (newDOStorageTags.length == 0) {
return false;
}
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
}
/** /**
* Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule, * Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule,
* or a normal list of tags. * or a normal list of tags.

View File

@ -6400,6 +6400,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
+ " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS)); + " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS));
} }
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
if (vols.size() > 1 &&
!(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) {
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
}
// Check that Vm does not have VM Snapshots // Check that Vm does not have VM Snapshots
if (_vmSnapshotDao.findByVm(vmId).size() > 0) { if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");

View File

@ -92,7 +92,9 @@ public class SnapshotHelper {
*/ */
public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) {
if (!kvmSnapshotOnlyInPrimaryStorage) { if (!kvmSnapshotOnlyInPrimaryStorage) {
if (snapInfo != null) {
logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId()));
}
return; return;
} }