mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Updates to HPE-Primera and Pure FlashArray Drivers to use Host-based VLUN Assignments (#8889)
* Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes * update to add timestamp when deleting pure volumes to avoid future conflicts * update to migrate to properly check disk offering is valid for the target storage pool * Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes * update to add timestamp when deleting pure volumes to avoid future conflicts * update to migrate to properly check disk offering is valid for the target storage pool * improve error handling when copying volumes to add precision to which step failed * rename pure volume before delete to avoid conflicts if the same name is used before its expunged on the array * remove dead code in AdaptiveDataStoreLifeCycleImpl.java * Fix issues found in PR checks * fix session refresh TTL logic * updates from PR comments * logic to delete by path ONLY on supported OUI * fix to StorageSystemDataMotionStrategy compile error * change noisy debug message to trace message * fix double callback call in handleVolumeMigrationFromNonManagedStorageToManagedStorage * fix for flash array delete error * fix typo in StorageSystemDataMotionStrategy * change copyVolume to use writeback to speed up copy ops * remove returning PrimaryStorageDownloadAnswer when connectPhysicalDisk returns false during KVMStorageProcessor template copy * remove change to only set UUID on snapshot if it is a vmSnapshot * reverting change to UserVmManagerImpl.configureCustomRootDiskSize * add error checking/simplification per comments from @slavkap * Update engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com> * address PR comments from @sureshanaparti --------- Co-authored-by: GLOVER RENE <rg9975@cs419-mgmtserver.rg9975nprd.app.ecp.att.com> Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com>
This commit is contained in:
parent
42e71175d7
commit
6ee6603359
@ -1504,18 +1504,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
|
||||
for (VolumeVO vol : vols) {
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
|
||||
DataTO volTO = volumeInfo.getTO();
|
||||
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
|
||||
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
|
||||
|
||||
disk.setDetails(getDetails(volumeInfo, dataStore));
|
||||
|
||||
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
|
||||
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
|
||||
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
|
||||
}
|
||||
|
||||
// make sure this is done AFTER grantAccess, as grantAccess may change the volume's state
|
||||
DataTO volTO = volumeInfo.getTO();
|
||||
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
|
||||
disk.setDetails(getDetails(volumeInfo, dataStore));
|
||||
vm.addDisk(disk);
|
||||
}
|
||||
|
||||
|
||||
@ -45,6 +45,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
@ -148,6 +149,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
private static final int LOCK_TIME_IN_SECONDS = 300;
|
||||
private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported.";
|
||||
|
||||
|
||||
@Inject
|
||||
protected AgentManager agentManager;
|
||||
@Inject
|
||||
@ -685,8 +687,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
|
||||
AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||
String errMsg = null;
|
||||
|
||||
try {
|
||||
HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType();
|
||||
|
||||
@ -697,37 +697,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
if (HypervisorType.XenServer.equals(hypervisorType)) {
|
||||
handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo);
|
||||
}
|
||||
else {
|
||||
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
|
||||
DataTO dataTO = destVolumeInfo.getTO();
|
||||
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(dataTO);
|
||||
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
|
||||
callback.complete(result);
|
||||
} else {
|
||||
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
|
||||
String errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
|
||||
ex.getMessage();
|
||||
|
||||
throw new CloudRuntimeException(errMsg, ex);
|
||||
}
|
||||
finally {
|
||||
CopyCmdAnswer copyCmdAnswer;
|
||||
|
||||
if (errMsg != null) {
|
||||
copyCmdAnswer = new CopyCmdAnswer(errMsg);
|
||||
}
|
||||
else {
|
||||
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
|
||||
|
||||
DataTO dataTO = destVolumeInfo.getTO();
|
||||
|
||||
copyCmdAnswer = new CopyCmdAnswer(dataTO);
|
||||
}
|
||||
|
||||
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
|
||||
|
||||
result.setResult(errMsg);
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
|
||||
@ -846,12 +830,25 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
checkAvailableForMigration(vm);
|
||||
|
||||
String errMsg = null;
|
||||
HostVO hostVO = null;
|
||||
try {
|
||||
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
|
||||
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
|
||||
updatePathFromScsiName(volumeVO);
|
||||
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
|
||||
HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
|
||||
hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
|
||||
|
||||
// if managed we need to grant access
|
||||
PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid());
|
||||
if (pds == null) {
|
||||
throw new CloudRuntimeException("Unable to find primary data store driver for this volume");
|
||||
}
|
||||
|
||||
// grant access (for managed volumes)
|
||||
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
|
||||
|
||||
// re-retrieve volume to get any updated information from grant
|
||||
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
|
||||
|
||||
// migrate the volume via the hypervisor
|
||||
String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
|
||||
@ -872,6 +869,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
throw new CloudRuntimeException(errMsg, ex);
|
||||
}
|
||||
} finally {
|
||||
// revoke access (for managed volumes)
|
||||
if (hostVO != null) {
|
||||
try {
|
||||
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e);
|
||||
}
|
||||
}
|
||||
|
||||
// re-retrieve volume to get any updated information from grant
|
||||
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
|
||||
|
||||
CopyCmdAnswer copyCmdAnswer;
|
||||
if (errMsg != null) {
|
||||
copyCmdAnswer = new CopyCmdAnswer(errMsg);
|
||||
@ -922,6 +931,125 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
return hostVO;
|
||||
}
|
||||
|
||||
private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) {
|
||||
VolumeInfo tempVolumeInfo = null;
|
||||
VolumeVO tempVolumeVO = null;
|
||||
try {
|
||||
tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
|
||||
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
|
||||
tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId());
|
||||
_volumeDao.persist(tempVolumeVO);
|
||||
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
|
||||
|
||||
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
|
||||
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
|
||||
// refresh volume info as data could have changed
|
||||
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
|
||||
} else {
|
||||
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
|
||||
}
|
||||
return tempVolumeInfo;
|
||||
} catch (Throwable e) {
|
||||
try {
|
||||
if (tempVolumeInfo != null) {
|
||||
tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null);
|
||||
}
|
||||
|
||||
// cleanup temporary volume
|
||||
if (tempVolumeVO != null) {
|
||||
_volumeDao.remove(tempVolumeVO.getId());
|
||||
}
|
||||
} catch (Throwable e2) {
|
||||
LOGGER.warn("Failed to delete temporary volume created for copy", e2);
|
||||
}
|
||||
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simplier logic for copy from snapshot for adaptive driver only.
|
||||
* @param snapshotInfo
|
||||
* @param destData
|
||||
* @param callback
|
||||
*/
|
||||
private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||
CopyCmdAnswer copyCmdAnswer = null;
|
||||
DataObject srcFinal = null;
|
||||
HostVO hostVO = null;
|
||||
DataStore srcDataStore = null;
|
||||
boolean tempRequired = false;
|
||||
|
||||
try {
|
||||
snapshotInfo.processEvent(Event.CopyingRequested);
|
||||
hostVO = getHost(snapshotInfo);
|
||||
DataObject destOnStore = destData;
|
||||
srcDataStore = snapshotInfo.getDataStore();
|
||||
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
|
||||
CopyCommand copyCommand = null;
|
||||
if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) {
|
||||
srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo);
|
||||
tempRequired = true;
|
||||
} else {
|
||||
srcFinal = snapshotInfo;
|
||||
}
|
||||
|
||||
_volumeService.grantAccess(srcFinal, hostVO, srcDataStore);
|
||||
|
||||
DataTO srcTo = srcFinal.getTO();
|
||||
|
||||
// have to set PATH as extraOptions due to logic in KVM hypervisor processor
|
||||
HashMap<String,String> extraDetails = new HashMap<>();
|
||||
extraDetails.put(DiskTO.PATH, srcTo.getPath());
|
||||
|
||||
copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
copyCommand.setOptions(extraDetails);
|
||||
copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand);
|
||||
} catch (Exception ex) {
|
||||
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
|
||||
LOGGER.warn(msg, ex);
|
||||
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
|
||||
}
|
||||
finally {
|
||||
// remove access tot he volume that was used
|
||||
if (srcFinal != null && hostVO != null && srcDataStore != null) {
|
||||
_volumeService.revokeAccess(srcFinal, hostVO, srcDataStore);
|
||||
}
|
||||
|
||||
// delete the temporary volume if it was needed
|
||||
if (srcFinal != null && tempRequired) {
|
||||
try {
|
||||
srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null);
|
||||
} catch (Throwable e) {
|
||||
LOGGER.warn("Failed to delete temporary volume created for copy", e);
|
||||
}
|
||||
}
|
||||
|
||||
// check we have a reasonable result
|
||||
String errMsg = null;
|
||||
if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) {
|
||||
errMsg = "Unable to create template from snapshot";
|
||||
copyCmdAnswer = new CopyCmdAnswer(errMsg);
|
||||
} else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
|
||||
errMsg = "Unable to create template from snapshot";
|
||||
} else if (!copyCmdAnswer.getResult()) {
|
||||
errMsg = copyCmdAnswer.getDetails();
|
||||
}
|
||||
|
||||
//submit processEvent
|
||||
if (StringUtils.isEmpty(errMsg)) {
|
||||
snapshotInfo.processEvent(Event.OperationSuccessed);
|
||||
} else {
|
||||
snapshotInfo.processEvent(Event.OperationFailed);
|
||||
}
|
||||
|
||||
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
|
||||
result.setResult(copyCmdAnswer.getDetails());
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases:
|
||||
* 1) When creating a template from a snapshot
|
||||
@ -932,6 +1060,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
* @param callback callback for async
|
||||
*/
|
||||
private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
|
||||
|
||||
// if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler
|
||||
if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) {
|
||||
handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
String errMsg = null;
|
||||
CopyCmdAnswer copyCmdAnswer = null;
|
||||
boolean usingBackendSnapshot = false;
|
||||
@ -1698,14 +1833,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) {
|
||||
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
CopyCmdAnswer copyCmdAnswer;
|
||||
|
||||
try {
|
||||
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
|
||||
|
||||
copyCommand.setOptions2(destDetails);
|
||||
@ -1730,42 +1864,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
return copyCmdAnswer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot)
|
||||
|
||||
* @param volumeVO
|
||||
* @param snapshotInfo
|
||||
*/
|
||||
public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) {
|
||||
VolumeVO volumeVO = null;
|
||||
try {
|
||||
volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
|
||||
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
|
||||
volumeVO.setPoolId(snapshotInfo.getDataStore().getId());
|
||||
_volumeDao.persist(volumeVO);
|
||||
VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
|
||||
|
||||
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
|
||||
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
|
||||
// refresh volume info as data could have changed
|
||||
tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
|
||||
// save the "temp" volume info into the snapshot details (we need this to clean up at the end)
|
||||
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true);
|
||||
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true);
|
||||
// NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO()
|
||||
// whenever the TemporaryVolumeCopyPath is set.
|
||||
} else {
|
||||
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
// cleanup temporary volume
|
||||
if (volumeVO != null) {
|
||||
_volumeDao.remove(volumeVO.getId());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
|
||||
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
|
||||
@ -1777,13 +1875,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
|
||||
*/
|
||||
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
|
||||
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
|
||||
prepTempVolumeForCopyFromSnapshot(snapshotInfo);
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
|
||||
|
||||
try {
|
||||
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
|
||||
}
|
||||
@ -1798,31 +1891,20 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
* invocation of createVolumeFromSnapshot(SnapshotInfo).
|
||||
*/
|
||||
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
|
||||
VolumeVO volumeVO = null;
|
||||
// cleanup any temporary volume previously created for copy from a snapshot
|
||||
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
|
||||
SnapshotDetailsVO tempUuid = null;
|
||||
tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
|
||||
if (tempUuid == null || tempUuid.getValue() == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
volumeVO = _volumeDao.findByUuid(tempUuid.getValue());
|
||||
if (volumeVO != null) {
|
||||
_volumeDao.remove(volumeVO.getId());
|
||||
}
|
||||
_snapshotDetailsDao.remove(tempUuid.getId());
|
||||
_snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
|
||||
return;
|
||||
}
|
||||
|
||||
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
|
||||
|
||||
try {
|
||||
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
|
||||
}
|
||||
finally {
|
||||
_snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||
LOGGER.debug("Cleaning up temporary volume created for copy from a snapshot");
|
||||
|
||||
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
|
||||
|
||||
try {
|
||||
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
|
||||
}
|
||||
finally {
|
||||
_snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||
}
|
||||
|
||||
} catch (Throwable e) {
|
||||
LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2459,15 +2541,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
try {
|
||||
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
|
||||
|
||||
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) {
|
||||
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
|
||||
_volumeService.grantAccess(volumeInfo, hostVO, srcDataStore);
|
||||
}
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
Map<String, String> srcDetails = getVolumeDetails(volumeInfo);
|
||||
|
||||
copyCommand.setOptions(srcDetails);
|
||||
@ -2496,7 +2578,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
|
||||
}
|
||||
finally {
|
||||
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) {
|
||||
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
|
||||
try {
|
||||
_volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore);
|
||||
}
|
||||
@ -2591,13 +2673,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
long snapshotId = snapshotInfo.getId();
|
||||
|
||||
// if the snapshot required a temporary volume be created check if the UUID is set so we can
|
||||
// retrieve the temporary volume's path to use during remote copy
|
||||
List<SnapshotDetailsVO> storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath");
|
||||
if (storedDetails != null && storedDetails.size() > 0) {
|
||||
String value = storedDetails.get(0).getValue();
|
||||
snapshotDetails.put(DiskTO.PATH, value);
|
||||
} else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
|
||||
if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
|
||||
snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath());
|
||||
} else {
|
||||
snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN));
|
||||
@ -2813,6 +2889,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
|
||||
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
|
||||
|
||||
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
|
||||
|
||||
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(),
|
||||
srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value());
|
||||
|
||||
@ -2855,18 +2933,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId());
|
||||
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
|
||||
|
||||
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
|
||||
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
|
||||
|
||||
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
|
||||
copyVolumeCommand.setSrcDetails(srcDetails);
|
||||
|
||||
handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
|
||||
|
||||
if (srcVolumeDetached) {
|
||||
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
|
||||
}
|
||||
|
||||
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
|
||||
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
|
||||
|
||||
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
|
||||
copyVolumeCommand.setSrcDetails(srcDetails);
|
||||
|
||||
CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand);
|
||||
|
||||
if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) {
|
||||
@ -2938,19 +3016,20 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
||||
srcData = cacheData;
|
||||
}
|
||||
|
||||
CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
try {
|
||||
CopyCommand copyCommand = null;
|
||||
if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
|
||||
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
|
||||
|
||||
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
|
||||
|
||||
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
copyCommand.setOptions(srcDetails);
|
||||
} else {
|
||||
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
|
||||
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
}
|
||||
|
||||
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
|
||||
|
||||
Map<String, String> destDetails = getVolumeDetails(volumeInfo);
|
||||
|
||||
copyCommand.setOptions2(destDetails);
|
||||
|
||||
@ -101,7 +101,9 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet {
|
||||
log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName));
|
||||
ApplicationContext context = getApplicationContext(moduleDefinitionName);
|
||||
try {
|
||||
if (context.containsBean("moduleStartup")) {
|
||||
if (context == null) {
|
||||
log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName));
|
||||
} else if (context.containsBean("moduleStartup")) {
|
||||
Runnable runnable = context.getBean("moduleStartup", Runnable.class);
|
||||
log.info(String.format("Starting module [%s].", moduleDefinitionName));
|
||||
runnable.run();
|
||||
|
||||
@ -122,7 +122,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API
|
||||
}
|
||||
|
||||
if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) {
|
||||
LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account));
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -76,7 +76,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
|
||||
|
||||
Project project = CallContext.current().getProject();
|
||||
if (project == null) {
|
||||
LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user));
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user));
|
||||
}
|
||||
return apiNames;
|
||||
}
|
||||
|
||||
@ -114,8 +116,10 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
|
||||
|
||||
Project project = CallContext.current().getProject();
|
||||
if (project == null) {
|
||||
LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName,
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName,
|
||||
user));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -302,15 +302,27 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVo
|
||||
(destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString());
|
||||
|
||||
try {
|
||||
storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails);
|
||||
KVMStoragePool sourceStoragePool = storagePoolManager.getStoragePool(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid());
|
||||
|
||||
if (!sourceStoragePool.connectPhysicalDisk(srcPath, srcDetails)) {
|
||||
return new MigrateVolumeAnswer(command, false, "Unable to connect source volume on hypervisor", srcPath);
|
||||
}
|
||||
|
||||
KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
|
||||
if (srcPhysicalDisk == null) {
|
||||
return new MigrateVolumeAnswer(command, false, "Unable to get handle to source volume on hypervisor", srcPath);
|
||||
}
|
||||
|
||||
KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid());
|
||||
|
||||
storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails);
|
||||
if (!destPrimaryStorage.connectPhysicalDisk(destPath, destDetails)) {
|
||||
return new MigrateVolumeAnswer(command, false, "Unable to connect destination volume on hypervisor", srcPath);
|
||||
}
|
||||
|
||||
storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
|
||||
KVMPhysicalDisk newDiskCopy = storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
|
||||
if (newDiskCopy == null) {
|
||||
return new MigrateVolumeAnswer(command, false, "Copy command failed to return handle to copied physical disk", destPath);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
return new MigrateVolumeAnswer(command, false, ex.getMessage(), null);
|
||||
|
||||
@ -16,13 +16,36 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel)
|
||||
public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
|
||||
|
||||
private Logger LOGGER = Logger.getLogger(getClass());
|
||||
|
||||
private String hostname = null;
|
||||
private String hostnameFq = null;
|
||||
|
||||
public FiberChannelAdapter() {
|
||||
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
|
||||
// get the hostname - we need this to compare to connid values
|
||||
try {
|
||||
InetAddress inetAddress = InetAddress.getLocalHost();
|
||||
hostname = inetAddress.getHostName(); // basic hostname
|
||||
if (hostname.indexOf(".") > 0) {
|
||||
hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain
|
||||
}
|
||||
hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname
|
||||
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]");
|
||||
} catch (UnknownHostException e) {
|
||||
LOGGER.error("Error getting hostname", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -72,6 +95,11 @@ public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
|
||||
address = value;
|
||||
} else if (key.equals("connid")) {
|
||||
connectionId = value;
|
||||
} else if (key.startsWith("connid.")) {
|
||||
String inHostname = key.substring(7);
|
||||
if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) {
|
||||
connectionId = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
|
||||
Map<String, String> details = primaryStore.getDetails();
|
||||
|
||||
String path = details != null ? details.get("managedStoreTarget") : null;
|
||||
String path = derivePath(primaryStore, destData, details);
|
||||
|
||||
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
|
||||
s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
|
||||
@ -328,6 +328,16 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map<String, String> details) {
|
||||
String path = null;
|
||||
if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) {
|
||||
path = destData.getPath();
|
||||
} else {
|
||||
path = details != null ? details.get("managedStoreTarget") : null;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
// this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk
|
||||
private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) {
|
||||
final int index = templateUrl.lastIndexOf("/");
|
||||
@ -407,7 +417,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds());
|
||||
} if (primaryPool.getType() == StoragePoolType.PowerFlex) {
|
||||
Map<String, String> details = primaryStore.getDetails();
|
||||
String path = details != null ? details.get("managedStoreTarget") : null;
|
||||
String path = derivePath(primaryStore, destData, details);
|
||||
|
||||
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) {
|
||||
s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid());
|
||||
@ -1048,7 +1058,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
srcVolume.clearPassphrase();
|
||||
if (isCreatedFromVmSnapshot) {
|
||||
s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot");
|
||||
} else if (primaryPool.getType() != StoragePoolType.RBD) {
|
||||
} else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) {
|
||||
deleteSnapshotOnPrimary(cmd, snapshot, primaryPool);
|
||||
}
|
||||
|
||||
@ -2482,8 +2492,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||
if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) {
|
||||
s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid());
|
||||
}
|
||||
String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null;
|
||||
destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath;
|
||||
destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails());
|
||||
} else {
|
||||
final String volumeName = UUID.randomUUID().toString();
|
||||
destVolumeName = volumeName + "." + destFormat.getFileExtension();
|
||||
|
||||
@ -21,20 +21,17 @@ import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
@ -43,8 +40,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.OutputInterpreter;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.joda.time.Duration;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class);
|
||||
@ -55,6 +51,14 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
*/
|
||||
static byte[] CLEANUP_LOCK = new byte[0];
|
||||
|
||||
/**
|
||||
* List of supported OUI's (needed for path-based cleanup logic on disconnects after live migrations)
|
||||
*/
|
||||
static String[] SUPPORTED_OUI_LIST = {
|
||||
"0002ac", // HPE Primera 3PAR
|
||||
"24a937" // Pure Flasharray
|
||||
};
|
||||
|
||||
/**
|
||||
* Property keys and defaults
|
||||
*/
|
||||
@ -82,6 +86,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
* Initialize static program-wide configurations and background jobs
|
||||
*/
|
||||
static {
|
||||
|
||||
long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000;
|
||||
boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue();
|
||||
|
||||
@ -96,16 +101,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
throw new Error("Unable to find the disconnectVolume.sh script");
|
||||
}
|
||||
|
||||
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
|
||||
if (resizeScript == null) {
|
||||
throw new Error("Unable to find the resizeVolume.sh script");
|
||||
}
|
||||
|
||||
copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript);
|
||||
if (copyScript == null) {
|
||||
throw new Error("Unable to find the copyVolume.sh script");
|
||||
}
|
||||
|
||||
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
|
||||
|
||||
if (cleanupEnabled) {
|
||||
cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript);
|
||||
if (cleanupScript == null) {
|
||||
@ -137,9 +139,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
|
||||
public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type);
|
||||
|
||||
/**
|
||||
* We expect WWN values in the volumePath so need to convert it to an actual physical path
|
||||
*/
|
||||
public abstract AddressInfo parseAndValidatePath(String path);
|
||||
|
||||
@Override
|
||||
@ -151,6 +150,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
return null;
|
||||
}
|
||||
|
||||
// we expect WWN values in the volumePath so need to convert it to an actual physical path
|
||||
AddressInfo address = parseAndValidatePath(volumePath);
|
||||
return getPhysicalDisk(address, pool);
|
||||
}
|
||||
@ -186,15 +186,23 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
|
||||
if (StringUtils.isEmpty(volumePath)) {
|
||||
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
|
||||
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pool == null) {
|
||||
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
|
||||
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set");
|
||||
return false;
|
||||
}
|
||||
|
||||
// we expect WWN values in the volumePath so need to convert it to an actual physical path
|
||||
AddressInfo address = this.parseAndValidatePath(volumePath);
|
||||
|
||||
// validate we have a connection id - we can't proceed without that
|
||||
if (address.getConnectionId() == null) {
|
||||
LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path");
|
||||
return false;
|
||||
}
|
||||
|
||||
int waitTimeInSec = diskWaitTimeSecs;
|
||||
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
|
||||
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
|
||||
@ -207,31 +215,62 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
|
||||
AddressInfo address = this.parseAndValidatePath(volumePath);
|
||||
if (address.getAddress() == null) {
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) returning FALSE, volume path has no address field", volumePath, pool.getUuid()));
|
||||
return false;
|
||||
}
|
||||
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true;
|
||||
|
||||
if (result.getExitCode() != 0) {
|
||||
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode()));
|
||||
}
|
||||
|
||||
if (LOGGER.isDebugEnabled()) {
|
||||
LOGGER.debug("multipath flush output: " + result.getResult());
|
||||
LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult()));
|
||||
}
|
||||
|
||||
return (result.getExitCode() == 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
|
||||
LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath) {
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath));
|
||||
ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", ""));
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true;
|
||||
if (localPath == null) {
|
||||
return false;
|
||||
}
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) START", localPath));
|
||||
if (localPath.startsWith("/dev/mapper/")) {
|
||||
String multipathName = localPath.replace("/dev/mapper/3", "");
|
||||
// this ensures we only disconnect multipath devices supported by this driver
|
||||
for (String oui: SUPPORTED_OUI_LIST) {
|
||||
if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) {
|
||||
ScriptResult result = runScript(disconnectScript, 60000L, multipathName);
|
||||
if (result.getExitCode() != 0) {
|
||||
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", multipathName, result.getExitCode()));
|
||||
}
|
||||
if (LOGGER.isDebugEnabled()) {
|
||||
LOGGER.debug("multipath flush output: " + result.getResult());
|
||||
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode()));
|
||||
}
|
||||
return (result.getExitCode() == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath));
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) {
|
||||
LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString()));
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -275,15 +314,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate inputs and return the source file for a template copy
|
||||
* @param templateFilePath
|
||||
* @param destTemplatePath
|
||||
* @param destPool
|
||||
* @param format
|
||||
* @return
|
||||
*/
|
||||
File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) {
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
|
||||
if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) {
|
||||
LOGGER.error("Unable to create template from direct download template file due to insufficient data");
|
||||
throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data");
|
||||
@ -296,57 +329,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host");
|
||||
}
|
||||
|
||||
if (destTemplatePath == null || destTemplatePath.isEmpty()) {
|
||||
LOGGER.error("Failed to create template, target template disk path not provided");
|
||||
throw new CloudRuntimeException("Target template disk path not provided");
|
||||
}
|
||||
|
||||
if (this.isStoragePoolTypeSupported(destPool.getType())) {
|
||||
throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString());
|
||||
}
|
||||
|
||||
if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) {
|
||||
LOGGER.error("Failed to create template, unsupported template format: " + format.toString());
|
||||
throw new CloudRuntimeException("Unsupported template format: " + format.toString());
|
||||
}
|
||||
return sourceFile;
|
||||
}
|
||||
|
||||
String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) {
|
||||
String srcTemplateFilePath = templateFilePath;
|
||||
if (isTemplateExtractable(templateFilePath)) {
|
||||
srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString();
|
||||
LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath);
|
||||
String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath);
|
||||
Script.runSimpleBashScript(extractCommand);
|
||||
Script.runSimpleBashScript("rm -f " + templateFilePath);
|
||||
}
|
||||
return srcTemplateFilePath;
|
||||
}
|
||||
|
||||
QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) {
|
||||
if (format == Storage.ImageFormat.RAW) {
|
||||
return QemuImg.PhysicalDiskFormat.RAW;
|
||||
} else if (format == Storage.ImageFormat.QCOW2) {
|
||||
return QemuImg.PhysicalDiskFormat.QCOW2;
|
||||
} else {
|
||||
return QemuImg.PhysicalDiskFormat.RAW;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
|
||||
File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format);
|
||||
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
|
||||
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath());
|
||||
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath);
|
||||
return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout,
|
||||
byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) {
|
||||
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
|
||||
LOGGER.error("Unable to copy physical disk due to insufficient data");
|
||||
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
|
||||
}
|
||||
|
||||
validateForDiskCopy(disk, name, destPool);
|
||||
LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat());
|
||||
|
||||
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name);
|
||||
@ -366,60 +360,34 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat());
|
||||
QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
|
||||
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
|
||||
LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath());
|
||||
|
||||
LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath());
|
||||
|
||||
ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName());
|
||||
int rc = result.getExitCode();
|
||||
if (rc != 0) {
|
||||
throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult());
|
||||
}
|
||||
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult());
|
||||
LOGGER.debug("Successfully converted source volume at " + srcFile.getFileName() + " to destination volume: " + destDisk.getPath() + " " + result.getResult());
|
||||
|
||||
return destDisk;
|
||||
}
|
||||
|
||||
void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) {
|
||||
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
|
||||
LOGGER.error("Unable to copy physical disk due to insufficient data");
|
||||
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy a disk path to another disk path using QemuImg command
|
||||
* @param disk
|
||||
* @param destDisk
|
||||
* @param name
|
||||
* @param timeout
|
||||
*/
|
||||
void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) {
|
||||
QemuImg qemu;
|
||||
try {
|
||||
qemu = new QemuImg(timeout);
|
||||
} catch (LibvirtException | QemuImgException e) {
|
||||
throw new CloudRuntimeException (e);
|
||||
}
|
||||
QemuImgFile srcFile = null;
|
||||
QemuImgFile destFile = null;
|
||||
|
||||
try {
|
||||
srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
|
||||
destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
|
||||
|
||||
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
|
||||
qemu.convert(srcFile, destFile, true);
|
||||
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
try {
|
||||
Map<String, String> srcInfo = qemu.info(srcFile);
|
||||
LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo));
|
||||
} catch (Exception ignored) {
|
||||
LOGGER.warn("Unable to get info from source disk: " + disk.getName());
|
||||
}
|
||||
|
||||
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
|
||||
LOGGER.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg, e);
|
||||
private static final ScriptResult runScript(String script, long timeout, String...args) {
|
||||
ScriptResult result = new ScriptResult();
|
||||
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
|
||||
cmd.add(args);
|
||||
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
||||
String output = cmd.execute(parser);
|
||||
// its possible the process never launches which causes an NPE on getExitValue below
|
||||
if (output != null && output.contains("Unable to execute the command")) {
|
||||
result.setResult(output);
|
||||
result.setExitCode(-1);
|
||||
return result;
|
||||
}
|
||||
result.setResult(output);
|
||||
result.setExitCode(cmd.getExitValue());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -460,25 +428,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
}
|
||||
}
|
||||
|
||||
private static final ScriptResult runScript(String script, long timeout, String...args) {
|
||||
ScriptResult result = new ScriptResult();
|
||||
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
|
||||
cmd.add(args);
|
||||
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
||||
String output = cmd.execute(parser);
|
||||
// its possible the process never launches which causes an NPE on getExitValue below
|
||||
if (output != null && output.contains("Unable to execute the command")) {
|
||||
result.setResult(output);
|
||||
result.setExitCode(-1);
|
||||
return result;
|
||||
}
|
||||
result.setResult(output);
|
||||
result.setExitCode(cmd.getExitValue());
|
||||
return result;
|
||||
}
|
||||
|
||||
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
|
||||
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
|
||||
|
||||
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
|
||||
long maxTries = 10; // how many max retries to attempt the script
|
||||
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
|
||||
@ -556,40 +508,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||
return false;
|
||||
}
|
||||
|
||||
void runConnectScript(String lun, AddressInfo address) {
|
||||
try {
|
||||
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
|
||||
Process p = builder.start();
|
||||
int rc = p.waitFor();
|
||||
StringBuffer output = new StringBuffer();
|
||||
if (rc == 0) {
|
||||
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||
String line = null;
|
||||
while ((line = input.readLine()) != null) {
|
||||
output.append(line);
|
||||
output.append(" ");
|
||||
}
|
||||
} else {
|
||||
LOGGER.warn("Failure discovering LUN via " + connectScript);
|
||||
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
|
||||
String line = null;
|
||||
while ((line = error.readLine()) != null) {
|
||||
LOGGER.warn("error --> " + line);
|
||||
}
|
||||
}
|
||||
} catch (IOException | InterruptedException e) {
|
||||
throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e);
|
||||
}
|
||||
}
|
||||
|
||||
void sleep(long sleepTimeMs) {
|
||||
try {
|
||||
Thread.sleep(sleepTimeMs);
|
||||
} catch (Exception ex) {
|
||||
// don't do anything
|
||||
}
|
||||
}
|
||||
|
||||
long getPhysicalDiskSize(String diskPath) {
|
||||
if (StringUtils.isEmpty(diskPath)) {
|
||||
return 0;
|
||||
|
||||
@ -56,3 +56,44 @@ This provides instructions of which provider implementation class to load when t
|
||||
## Build and Deploy the Jar
|
||||
Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading
|
||||
all configured modules.
|
||||
|
||||
### Test Cases
|
||||
The following test cases should be run against configured installations of each storage array in a working Cloudstack installation.
|
||||
1. Create New Primera Storage Pool for Zone
|
||||
2. Create New Primera Storage Pool for Cluster
|
||||
3. Update Primera Storage Pool for Zone
|
||||
4. Update Primera Storage Pool for Cluster
|
||||
5. Create VM with Root Disk using Primera pool
|
||||
6. Create VM with Root and Data Disk using Primera pool
|
||||
7. Create VM with Root Disk using NFS and Data Disk on Primera pool
|
||||
8. Create VM with Root Disk on Primera Pool and Data Disk on NFS
|
||||
9. Snapshot root disk with VM using Primera Pool for root disk
|
||||
10. Snapshot data disk with VM using Primera Pool for data disk
|
||||
11. Snapshot VM (non-memory) with root and data disk using Primera pool
|
||||
12. Snapshot VM (non-memory) with root disk using Primera pool and data disk using NFS
|
||||
13. Snapshot VM (non-memory) with root disk using NFS pool and data disk using Primera pool
|
||||
14. Create new template from previous snapshot root disk on Primera pool
|
||||
15. Create new volume from previous snapshot root disk on Primera pool
|
||||
16. Create new volume from previous snapshot data disk on Primera pool
|
||||
17. Create new VM using template created from Primera root snapshot and using Primera as root volume pool
|
||||
18. Create new VM using template created from Primera root snapshot and using NFS as root volume pool
|
||||
19. Delete previously created Primera snapshot
|
||||
20. Create previously created Primera volume attached to a VM that is running (should fail)
|
||||
21. Create previously created Primera volume attached to a VM that is not running (should fail)
|
||||
22. Detach a Primera volume from a non-running VM (should work)
|
||||
23. Attach a Primera volume to a running VM (should work)
|
||||
24. Attach a Primera volume to a non-running VM (should work)
|
||||
25. Create a 'thin' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=true, reduce=false)
|
||||
26. Create a 'sparse' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=false, reduce=true)
|
||||
27. Create a 'fat' Disk Offering and tagged for Primera pool and provision and attach a data volume to a VM using this offering (should fail as 'fat' not supported)
|
||||
28. Perform volume migration of root volume from Primera pool to NFS pool on stopped VM
|
||||
29. Perform volume migration of root volume from NFS pool to Primera pool on stopped VM
|
||||
30. Perform volume migration of data volume from Primera pool to NFS pool on stopped VM
|
||||
31. Perform volume migration of data volume from NFS pool to Primera pool on stopped VM
|
||||
32. Perform VM data migration for a VM with 1 or more data volumes from all volumes on Primera pool to all volumes on NFS pool
|
||||
33. Perform VM data migration for a VM with 1 or more data volumes from all volumes on NFS pool to all volumes on Primera pool
|
||||
34. Perform live migration of a VM with a Primera root disk
|
||||
35. Perform live migration of a VM with a Primera data disk and NFS root disk
|
||||
36. Perform live migration of a VM with a Primera root disk and NFS data disk
|
||||
37. Perform volume migration between 2 Primera pools on the same backend Primera IP address
|
||||
38. Perform volume migration between 2 Primera pools on different Primera IP address
|
||||
|
||||
@ -69,14 +69,14 @@ public interface ProviderAdapter {
|
||||
* @param request
|
||||
* @return
|
||||
*/
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request);
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
|
||||
|
||||
/**
|
||||
* Detach the host from the storage context
|
||||
* @param context
|
||||
* @param request
|
||||
*/
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request);
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
|
||||
|
||||
/**
|
||||
* Delete the provided volume/object
|
||||
@ -154,4 +154,22 @@ public interface ProviderAdapter {
|
||||
* @return
|
||||
*/
|
||||
public boolean canAccessHost(ProviderAdapterContext context, String hostname);
|
||||
|
||||
/**
|
||||
* Returns true if the provider allows direct attach/connection of snapshots to a host
|
||||
* @return
|
||||
*/
|
||||
public boolean canDirectAttachSnapshot();
|
||||
|
||||
|
||||
/**
|
||||
* Given a ProviderAdapterDataObject, return a map of connection IDs to connection values. Generally
|
||||
* this would be used to return a map of hostnames and the VLUN ID for the attachment associated with
|
||||
* that hostname. If the provider is using a hostgroup/hostset model where the ID is assigned in common
|
||||
* across all hosts in the group, then the map MUST contain a single entry with host key set as a wildcard
|
||||
* character (exactly '*').
|
||||
* @param dataIn
|
||||
* @return
|
||||
*/
|
||||
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn);
|
||||
}
|
||||
|
||||
@ -19,6 +19,10 @@ package org.apache.cloudstack.storage.datastore.adapter;
|
||||
import java.util.Map;
|
||||
|
||||
public interface ProviderAdapterFactory {
|
||||
/** Name of the provider */
|
||||
public String getProviderName();
|
||||
/** create a new instance of a provider adapter */
|
||||
public ProviderAdapter create(String url, Map<String, String> details);
|
||||
/** returns true if this type of adapter can directly attach snapshots to hosts */
|
||||
public Object canDirectAttachSnapshot();
|
||||
}
|
||||
|
||||
@ -21,7 +21,6 @@ public class ProviderVolumeNamer {
|
||||
private static final String SNAPSHOT_PREFIX = "snap";
|
||||
private static final String VOLUME_PREFIX = "vol";
|
||||
private static final String TEMPLATE_PREFIX = "tpl";
|
||||
/** Simple method to allow sharing storage setup, primarily in lab/testing environment */
|
||||
private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier");
|
||||
|
||||
public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) {
|
||||
|
||||
@ -32,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
@ -43,6 +44,7 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
|
||||
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
|
||||
@ -53,10 +55,12 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
|
||||
import org.apache.cloudstack.storage.image.store.TemplateObject;
|
||||
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
@ -73,7 +77,6 @@ import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.ResizeVolumePayload;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
@ -133,6 +136,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
DomainDao _domainDao;
|
||||
@Inject
|
||||
VolumeService _volumeService;
|
||||
@Inject
|
||||
VolumeDataFactory volumeDataFactory;
|
||||
|
||||
private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null;
|
||||
|
||||
@ -142,7 +147,52 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
|
||||
@Override
|
||||
public DataTO getTO(DataObject data) {
|
||||
return null;
|
||||
// we need to get connectionId and and the VLUN ID for currently attached hosts to add to the DataTO object
|
||||
DataTO to = null;
|
||||
if (data.getType() == DataObjectType.VOLUME) {
|
||||
VolumeObjectTO vto = new VolumeObjectTO((VolumeObject)data);
|
||||
vto.setPath(getPath(data));
|
||||
to = vto;
|
||||
} else if (data.getType() == DataObjectType.TEMPLATE) {
|
||||
TemplateObjectTO tto = new TemplateObjectTO((TemplateObject)data);
|
||||
tto.setPath(getPath(data));
|
||||
to = tto;
|
||||
} else if (data.getType() == DataObjectType.SNAPSHOT) {
|
||||
SnapshotObjectTO sto = new SnapshotObjectTO((SnapshotObject)data);
|
||||
sto.setPath(getPath(data));
|
||||
to = sto;
|
||||
} else {
|
||||
to = super.getTO(data);
|
||||
}
|
||||
return to;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the given data object, return the path with current connection info. If a snapshot
|
||||
* object is passed, we will determine if a temporary volume is avialable for that
|
||||
* snapshot object and return that conneciton info instead.
|
||||
*/
|
||||
String getPath(DataObject data) {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(data.getDataStore().getId());
|
||||
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
|
||||
ProviderAdapter api = getAPI(storagePool, details);
|
||||
|
||||
ProviderAdapterDataObject dataIn = newManagedDataObject(data, storagePool);
|
||||
|
||||
/** This means the object is not yet associated with the external provider so path is null */
|
||||
if (dataIn.getExternalName() == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
ProviderAdapterContext context = newManagedVolumeContext(data);
|
||||
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
||||
ProviderVolume volume = api.getVolume(context, dataIn);
|
||||
// if this is an existing object, generate the path for it.
|
||||
String finalPath = null;
|
||||
if (volume != null) {
|
||||
finalPath = generatePathInfo(volume, connIdMap);
|
||||
}
|
||||
return finalPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -217,11 +267,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
dataIn.setExternalName(volume.getExternalName());
|
||||
dataIn.setExternalUuid(volume.getExternalUuid());
|
||||
|
||||
// add the volume to the host set
|
||||
String connectionId = api.attach(context, dataIn);
|
||||
|
||||
// update the cloudstack metadata about the volume
|
||||
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId);
|
||||
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
|
||||
|
||||
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
|
||||
result.setSuccess(true);
|
||||
@ -288,6 +335,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
ProviderAdapterContext context = newManagedVolumeContext(destdata);
|
||||
ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool);
|
||||
ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool);
|
||||
|
||||
outVolume = api.copy(context, sourceIn, destIn);
|
||||
|
||||
// populate this data - it may be needed later
|
||||
@ -302,17 +350,9 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
api.resize(context, destIn, destdata.getSize());
|
||||
}
|
||||
|
||||
String connectionId = api.attach(context, destIn);
|
||||
|
||||
String finalPath;
|
||||
// format: type=fiberwwn; address=<address>; connid=<connid>
|
||||
if (connectionId != null) {
|
||||
finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId);
|
||||
} else {
|
||||
finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase());
|
||||
}
|
||||
|
||||
persistVolumeData(storagePool, details, destdata, outVolume, connectionId);
|
||||
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
|
||||
String finalPath = generatePathInfo(outVolume, null);
|
||||
persistVolumeData(storagePool, details, destdata, outVolume, null);
|
||||
s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]");
|
||||
|
||||
VolumeObjectTO voto = new VolumeObjectTO();
|
||||
@ -442,6 +482,66 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
|
||||
}
|
||||
|
||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
s_logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid());
|
||||
|
||||
try {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
|
||||
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
|
||||
ProviderAdapter api = getAPI(storagePool, details);
|
||||
|
||||
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
|
||||
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
|
||||
api.attach(context, sourceIn, host.getName());
|
||||
|
||||
// rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above
|
||||
ProviderVolume vol = api.getVolume(context, sourceIn);
|
||||
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
||||
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
||||
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
|
||||
|
||||
|
||||
s_logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid());
|
||||
return true;
|
||||
} catch (Throwable e) {
|
||||
String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
// nothing to do if the host is null
|
||||
if (dataObject == null || host == null || dataStore == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
s_logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid());
|
||||
|
||||
try {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
|
||||
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
|
||||
ProviderAdapter api = getAPI(storagePool, details);
|
||||
|
||||
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
|
||||
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
|
||||
|
||||
api.detach(context, sourceIn, host.getName());
|
||||
|
||||
// rewrite the volume data, especially the connection string for informational purposes
|
||||
ProviderVolume vol = api.getVolume(context, sourceIn);
|
||||
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
||||
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
||||
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
|
||||
|
||||
s_logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid());
|
||||
} catch (Throwable e) {
|
||||
String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo,
|
||||
QualityOfServiceState qualityOfServiceState) {
|
||||
@ -492,15 +592,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
|
||||
// add the snapshot to the host group (needed for copying to non-provider storage
|
||||
// to create templates, etc)
|
||||
String connectionId = null;
|
||||
String finalAddress = outSnapshot.getAddress();
|
||||
if (outSnapshot.canAttachDirectly()) {
|
||||
connectionId = api.attach(context, inSnapshotDO);
|
||||
if (connectionId != null) {
|
||||
finalAddress = finalAddress + "::" + connectionId;
|
||||
}
|
||||
}
|
||||
|
||||
snapshotTO.setPath(finalAddress);
|
||||
snapshotTO.setName(outSnapshot.getName());
|
||||
snapshotTO.setHypervisorType(HypervisorType.KVM);
|
||||
@ -631,10 +723,12 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
// indicates the datastore can create temporary volumes for use when copying
|
||||
// data from a snapshot
|
||||
mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString());
|
||||
|
||||
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
|
||||
if (factory != null) {
|
||||
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
|
||||
} else {
|
||||
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", Boolean.FALSE.toString());
|
||||
}
|
||||
return mapCapabilities;
|
||||
}
|
||||
|
||||
@ -667,6 +761,11 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean requiresAccessForMigration(DataObject dataObject) {
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getProviderName() {
|
||||
return providerName;
|
||||
}
|
||||
@ -715,8 +814,13 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
object.setType(ProviderAdapterDataObject.Type.VOLUME);
|
||||
ProviderVolumeStats stats = api.getVolumeStats(context, object);
|
||||
|
||||
Long provisionedSizeInBytes = stats.getActualUsedInBytes();
|
||||
Long allocatedSizeInBytes = stats.getAllocatedInBytes();
|
||||
Long provisionedSizeInBytes = null;
|
||||
Long allocatedSizeInBytes = null;
|
||||
if (stats != null) {
|
||||
provisionedSizeInBytes = stats.getActualUsedInBytes();
|
||||
allocatedSizeInBytes = stats.getAllocatedInBytes();
|
||||
}
|
||||
|
||||
if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) {
|
||||
return null;
|
||||
}
|
||||
@ -734,31 +838,19 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
}
|
||||
|
||||
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
|
||||
DataObject dataObject, ProviderVolume volume, String connectionId) {
|
||||
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap) {
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
|
||||
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
|
||||
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
|
||||
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
|
||||
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
|
||||
}
|
||||
}
|
||||
|
||||
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
||||
ProviderVolume managedVolume, String connectionId) {
|
||||
ProviderVolume managedVolume, Map<String,String> connIdMap) {
|
||||
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
|
||||
|
||||
// if its null check if the storage provider returned one that is already set
|
||||
if (connectionId == null) {
|
||||
connectionId = managedVolume.getExternalConnectionId();
|
||||
}
|
||||
|
||||
String finalPath;
|
||||
// format: type=fiberwwn; address=<address>; connid=<connid>
|
||||
if (connectionId != null) {
|
||||
finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId);
|
||||
} else {
|
||||
finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase());
|
||||
}
|
||||
|
||||
String finalPath = generatePathInfo(managedVolume, connIdMap);
|
||||
volumeVO.setPath(finalPath);
|
||||
volumeVO.setFormat(ImageFormat.RAW);
|
||||
volumeVO.setPoolId(storagePool.getId());
|
||||
@ -783,25 +875,31 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
}
|
||||
|
||||
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
||||
ProviderVolume volume, String connectionId) {
|
||||
ProviderVolume volume, Map<String,String> connIdMap) {
|
||||
TemplateInfo templateInfo = (TemplateInfo) dataObject;
|
||||
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
|
||||
templateInfo.getId(), null);
|
||||
// template pool ref doesn't have a details object so we'll save:
|
||||
// 1. external name ==> installPath
|
||||
// 2. address ==> local download path
|
||||
if (connectionId == null) {
|
||||
templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(),
|
||||
volume.getAddress().toLowerCase()));
|
||||
} else {
|
||||
templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(),
|
||||
volume.getAddress().toLowerCase(), connectionId));
|
||||
}
|
||||
|
||||
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
|
||||
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
|
||||
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
|
||||
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
|
||||
}
|
||||
|
||||
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
|
||||
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
|
||||
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
|
||||
|
||||
// if a map was provided, add the connection IDs to the path info. the map is all the possible vlun id's used
|
||||
// across each host or the hostset (represented with host name key as "*");
|
||||
if (connIdMap != null && connIdMap.size() > 0) {
|
||||
for (String key: connIdMap.keySet()) {
|
||||
finalPath += String.format(" connid.%s=%s;", key, connIdMap.get(key));
|
||||
}
|
||||
}
|
||||
return finalPath;
|
||||
}
|
||||
|
||||
ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
|
||||
ProviderAdapterContext ctx = new ProviderAdapterContext();
|
||||
if (obj instanceof VolumeInfo) {
|
||||
@ -898,4 +996,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||
dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString()));
|
||||
return dataIn;
|
||||
}
|
||||
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,7 +189,6 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
|
||||
parameters.setName(dsName);
|
||||
parameters.setProviderName(providerName);
|
||||
parameters.setManaged(true);
|
||||
parameters.setCapacityBytes(capacityBytes);
|
||||
parameters.setUsedBytes(0);
|
||||
parameters.setCapacityIops(capacityIops);
|
||||
parameters.setHypervisorType(HypervisorType.KVM);
|
||||
@ -223,7 +222,7 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
|
||||
|
||||
// if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes
|
||||
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
|
||||
if (capacityBytes != null && capacityBytes != 0) {
|
||||
if (capacityBytes != null && capacityBytes != 0 && stats != null) {
|
||||
if (stats.getCapacityInBytes() > 0) {
|
||||
if (stats.getCapacityInBytes() < capacityBytes) {
|
||||
throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes());
|
||||
@ -233,8 +232,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
|
||||
}
|
||||
// if we have no user-provided capacity bytes, use the ones provided by storage
|
||||
else {
|
||||
if (stats.getCapacityInBytes() <= 0) {
|
||||
throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified");
|
||||
if (stats == null || stats.getCapacityInBytes() <= 0) {
|
||||
throw new InvalidParameterValueException("Capacity bytes not available from the storage provider, user provided capacity bytes must be specified");
|
||||
}
|
||||
parameters.setCapacityBytes(stats.getCapacityInBytes());
|
||||
}
|
||||
@ -383,8 +382,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
|
||||
* Update the storage pool configuration
|
||||
*/
|
||||
@Override
|
||||
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
|
||||
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details);
|
||||
public void updateStoragePool(StoragePool storagePool, Map<String, String> newDetails) {
|
||||
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -131,4 +131,8 @@ public class AdaptivePrimaryDatastoreAdapterFactoryMap {
|
||||
logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url);
|
||||
return api;
|
||||
}
|
||||
|
||||
public ProviderAdapterFactory getFactory(String providerName) {
|
||||
return this.factoryMap.get(providerName);
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,6 +54,8 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener {
|
||||
if (storagePoolHost == null) {
|
||||
storagePoolHost = new StoragePoolHostVO(poolId, hostId, "");
|
||||
storagePoolHostDao.persist(storagePoolHost);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -23,9 +23,9 @@ import java.net.URL;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
@ -109,7 +109,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) {
|
||||
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject,
|
||||
ProviderAdapterDiskOffering offering, long size) {
|
||||
FlashArrayVolume request = new FlashArrayVolume();
|
||||
request.setExternalName(
|
||||
pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject));
|
||||
@ -128,30 +129,50 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
* cluster (depending on Cloudstack Storage Pool configuration)
|
||||
*/
|
||||
@Override
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
|
||||
|
||||
// should not happen but double check for sanity
|
||||
if (dataObject.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
|
||||
throw new RuntimeException("This storage provider does not support direct attachments of snapshots to hosts");
|
||||
}
|
||||
|
||||
String volumeName = normalizeName(pod, dataObject.getExternalName());
|
||||
try {
|
||||
FlashArrayList<FlashArrayConnection> list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference<FlashArrayList<FlashArrayConnection>> () { });
|
||||
FlashArrayList<FlashArrayConnection> list = null;
|
||||
FlashArrayHost host = getHost(hostname);
|
||||
if (host != null) {
|
||||
list = POST("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName, null,
|
||||
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
|
||||
});
|
||||
}
|
||||
|
||||
if (list == null || list.getItems() == null || list.getItems().size() == 0) {
|
||||
throw new RuntimeException("Volume attach did not return lun information");
|
||||
}
|
||||
|
||||
FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list);
|
||||
FlashArrayConnection connection = (FlashArrayConnection) this.getFlashArrayItem(list);
|
||||
if (connection.getLun() == null) {
|
||||
throw new RuntimeException("Volume attach missing lun field");
|
||||
}
|
||||
|
||||
return ""+connection.getLun();
|
||||
return "" + connection.getLun();
|
||||
|
||||
} catch (Throwable e) {
|
||||
// the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it
|
||||
// the volume is already attached. happens in some scenarios where orchestration
|
||||
// creates the volume before copying to it
|
||||
if (e.toString().contains("Connection already exists")) {
|
||||
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + volumeName,
|
||||
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
|
||||
});
|
||||
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
|
||||
});
|
||||
if (list != null && list.getItems() != null) {
|
||||
return ""+list.getItems().get(0).getLun();
|
||||
for (FlashArrayConnection conn : list.getItems()) {
|
||||
if (conn.getHost() != null && conn.getHost().getName() != null &&
|
||||
(conn.getHost().getName().equals(hostname) || conn.getHost().getName().equals(hostname.substring(0, hostname.indexOf('.')))) &&
|
||||
conn.getLun() != null) {
|
||||
return "" + conn.getLun();
|
||||
}
|
||||
}
|
||||
throw new RuntimeException("Volume lun is not found in existing connection");
|
||||
} else {
|
||||
throw new RuntimeException("Volume lun is not found in existing connection");
|
||||
}
|
||||
@ -162,23 +183,42 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
|
||||
String volumeName = normalizeName(pod, dataObject.getExternalName());
|
||||
DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName);
|
||||
// hostname is always provided by cloudstack, but we will detach from hostgroup
|
||||
// if this pool is configured to use hostgroup for attachments
|
||||
if (hostgroup != null) {
|
||||
DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName);
|
||||
}
|
||||
|
||||
FlashArrayHost host = getHost(hostname);
|
||||
if (host != null) {
|
||||
DELETE("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
|
||||
// public void deleteVolume(String volumeNamespace, String volumeName) {
|
||||
// first make sure we are disconnected
|
||||
removeVlunsAll(context, pod, dataObject.getExternalName());
|
||||
String fullName = normalizeName(pod, dataObject.getExternalName());
|
||||
|
||||
FlashArrayVolume volume = new FlashArrayVolume();
|
||||
volume.setDestroyed(true);
|
||||
|
||||
// rename as we delete so it doesn't conflict if the template or volume is ever recreated
|
||||
// pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete
|
||||
String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date());
|
||||
volume.setExternalName(fullName + "-" + timestamp);
|
||||
|
||||
try {
|
||||
PATCH("/volumes?names=" + fullName, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
|
||||
});
|
||||
|
||||
// now delete it with new name
|
||||
volume.setDestroyed(true);
|
||||
|
||||
PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
|
||||
});
|
||||
} catch (CloudRuntimeException e) {
|
||||
if (e.toString().contains("Volume does not exist")) {
|
||||
return;
|
||||
@ -205,8 +245,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
return null;
|
||||
}
|
||||
|
||||
populateConnectionId(volume);
|
||||
|
||||
return volume;
|
||||
} catch (Exception e) {
|
||||
// assume any exception is a not found. Flash returns 400's for most errors
|
||||
@ -217,7 +255,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
@Override
|
||||
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) {
|
||||
// public FlashArrayVolume getVolumeByWwn(String wwn) {
|
||||
if (address == null ||addressType == null) {
|
||||
if (address == null || addressType == null) {
|
||||
throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress");
|
||||
}
|
||||
|
||||
@ -234,21 +272,19 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
FlashArrayVolume volume = null;
|
||||
try {
|
||||
FlashArrayList<FlashArrayVolume> list = GET("/volumes?filter=" + query,
|
||||
new TypeReference<FlashArrayList<FlashArrayVolume>>() {
|
||||
});
|
||||
new TypeReference<FlashArrayList<FlashArrayVolume>>() {
|
||||
});
|
||||
|
||||
// if we didn't get an address back its likely an empty object
|
||||
if (list == null || list.getItems() == null || list.getItems().size() == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
volume = (FlashArrayVolume)this.getFlashArrayItem(list);
|
||||
volume = (FlashArrayVolume) this.getFlashArrayItem(list);
|
||||
if (volume != null && volume.getAddress() == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
populateConnectionId(volume);
|
||||
|
||||
return volume;
|
||||
} catch (Exception e) {
|
||||
// assume any exception is a not found. Flash returns 400's for most errors
|
||||
@ -256,32 +292,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
private void populateConnectionId(FlashArrayVolume volume) {
|
||||
// we need to see if there is a connection (lun) associated with this volume.
|
||||
// note we assume 1 lun for the hostgroup associated with this object
|
||||
FlashArrayList<FlashArrayConnection> list = null;
|
||||
try {
|
||||
list = GET("/connections?volume_names=" + volume.getExternalName(),
|
||||
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
|
||||
});
|
||||
} catch (CloudRuntimeException e) {
|
||||
// this means there is no attachment associated with this volume on the array
|
||||
if (e.toString().contains("Bad Request")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (list != null && list.getItems() != null) {
|
||||
for (FlashArrayConnection conn: list.getItems()) {
|
||||
if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) {
|
||||
volume.setExternalConnectionId(""+conn.getLun());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) {
|
||||
// public void resizeVolume(String volumeNamespace, String volumeName, long
|
||||
@ -299,7 +309,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) {
|
||||
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
|
||||
ProviderAdapterDataObject targetDataObject) {
|
||||
// public FlashArrayVolume snapshotVolume(String volumeNamespace, String
|
||||
// volumeName, String snapshotName) {
|
||||
FlashArrayList<FlashArrayVolume> list = POST(
|
||||
@ -354,11 +365,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) {
|
||||
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
|
||||
ProviderAdapterDataObject destDataObject) {
|
||||
// private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace,
|
||||
// String destName) {
|
||||
if (sourceDataObject == null || sourceDataObject.getExternalName() == null
|
||||
||sourceDataObject.getType() == null) {
|
||||
|| sourceDataObject.getType() == null) {
|
||||
throw new RuntimeException("Provided volume has no external source information");
|
||||
}
|
||||
|
||||
@ -424,12 +436,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
@Override
|
||||
public void validate() {
|
||||
login();
|
||||
// check if hostgroup and pod from details really exist - we will
|
||||
// require a distinct configuration object/connection object for each type
|
||||
if (this.getHostgroup(hostgroup) == null) {
|
||||
throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url
|
||||
+ "], please validate configuration");
|
||||
}
|
||||
|
||||
if (this.getVolumeNamespace(pod) == null) {
|
||||
throw new RuntimeException(
|
||||
@ -477,40 +483,36 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
throw new RuntimeException("Unable to validate host access because a hostname was not provided");
|
||||
}
|
||||
|
||||
List<String> members = getHostgroupMembers(hostgroup);
|
||||
|
||||
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
|
||||
// hostname configuration
|
||||
String shortname;
|
||||
if (hostname.indexOf('.') > 0) {
|
||||
shortname = hostname.substring(0, (hostname.indexOf('.')));
|
||||
} else {
|
||||
shortname = hostname;
|
||||
FlashArrayHost host = getHost(hostname);
|
||||
if (host != null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (String member : members) {
|
||||
// exact match (short or long names)
|
||||
if (member.equals(hostname)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// primera has short name and cloudstack had long name
|
||||
if (member.equals(shortname)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// member has long name but cloudstack had shortname
|
||||
if (member.indexOf('.') > 0) {
|
||||
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private FlashArrayHost getHost(String hostname) {
|
||||
FlashArrayList<FlashArrayHost> list = null;
|
||||
|
||||
try {
|
||||
list = GET("/hosts?names=" + hostname,
|
||||
new TypeReference<FlashArrayList<FlashArrayHost>>() {
|
||||
});
|
||||
} catch (Exception e) {
|
||||
|
||||
}
|
||||
|
||||
if (list == null) {
|
||||
if (hostname.indexOf('.') > 0) {
|
||||
list = GET("/hosts?names=" + hostname.substring(0, (hostname.indexOf('.'))),
|
||||
new TypeReference<FlashArrayList<FlashArrayHost>>() {
|
||||
});
|
||||
}
|
||||
}
|
||||
return (FlashArrayHost) getFlashArrayItem(list);
|
||||
}
|
||||
|
||||
private String getAccessToken() {
|
||||
refreshSession(false);
|
||||
return accessToken;
|
||||
}
|
||||
|
||||
@ -527,13 +529,21 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// retry frequently but not every request to avoid DDOS on storage API
|
||||
logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds",
|
||||
logger.warn(
|
||||
"Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds",
|
||||
e);
|
||||
keyExpiration = System.currentTimeMillis() + (5 * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
private void validateLoginInfo(String urlStr) {
|
||||
/**
|
||||
* Login to the array and get an access token
|
||||
*/
|
||||
private void login() {
|
||||
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
|
||||
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
|
||||
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
|
||||
|
||||
URL urlFull;
|
||||
try {
|
||||
urlFull = new URL(urlStr);
|
||||
@ -571,15 +581,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
|
||||
if (hostgroup == null) {
|
||||
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
|
||||
if (hostgroup == null) {
|
||||
throw new RuntimeException(
|
||||
FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool");
|
||||
}
|
||||
}
|
||||
|
||||
apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION);
|
||||
if (apiLoginVersion == null) {
|
||||
apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION);
|
||||
@ -596,6 +597,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve for legacy purposes. if set, we'll remove any connections to hostgroup we find and use the host
|
||||
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
|
||||
if (hostgroup == null) {
|
||||
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
|
||||
}
|
||||
|
||||
String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
|
||||
if (connTimeoutStr == null) {
|
||||
connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
|
||||
@ -651,16 +658,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
} else {
|
||||
skipTlsValidation = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Login to the array and get an access token
|
||||
*/
|
||||
private void login() {
|
||||
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
|
||||
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
|
||||
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
|
||||
validateLoginInfo(urlStr);
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken");
|
||||
@ -749,7 +747,13 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
|
||||
if (list != null && list.getItems() != null) {
|
||||
for (FlashArrayConnection conn : list.getItems()) {
|
||||
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName);
|
||||
if (hostgroup != null && conn.getHostGroup() != null && conn.getHostGroup().getName() != null) {
|
||||
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names="
|
||||
+ volumeName);
|
||||
break;
|
||||
} else if (conn.getHost() != null && conn.getHost().getName() != null) {
|
||||
DELETE("/connections?host_names=" + conn.getHost().getName() + "&volume_names=" + volumeName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -762,32 +766,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
|
||||
private FlashArrayPod getVolumeNamespace(String name) {
|
||||
FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name, new TypeReference<FlashArrayList<FlashArrayPod>>() {
|
||||
});
|
||||
FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name,
|
||||
new TypeReference<FlashArrayList<FlashArrayPod>>() {
|
||||
});
|
||||
return (FlashArrayPod) getFlashArrayItem(list);
|
||||
}
|
||||
|
||||
private FlashArrayHostgroup getHostgroup(String name) {
|
||||
FlashArrayList<FlashArrayHostgroup> list = GET("/host-groups?name=" + name,
|
||||
new TypeReference<FlashArrayList<FlashArrayHostgroup>>() {
|
||||
});
|
||||
return (FlashArrayHostgroup) getFlashArrayItem(list);
|
||||
}
|
||||
|
||||
private List<String> getHostgroupMembers(String groupname) {
|
||||
FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname,
|
||||
new TypeReference<FlashArrayGroupMemberReferenceList>() {
|
||||
});
|
||||
if (list == null || list.getItems().size() == 0) {
|
||||
return null;
|
||||
}
|
||||
List<String> hostnames = new ArrayList<String>();
|
||||
for (FlashArrayGroupMemberReference ref : list.getItems()) {
|
||||
hostnames.add(ref.getMember().getName());
|
||||
}
|
||||
return hostnames;
|
||||
}
|
||||
|
||||
private FlashArrayVolume getSnapshot(String snapshotName) {
|
||||
FlashArrayList<FlashArrayVolume> list = GET("/volume-snapshots?names=" + snapshotName,
|
||||
new TypeReference<FlashArrayList<FlashArrayVolume>>() {
|
||||
@ -856,7 +840,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
return null;
|
||||
} catch (UnsupportedOperationException | IOException e) {
|
||||
throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e);
|
||||
throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]",
|
||||
e);
|
||||
}
|
||||
} else if (statusCode == 400) {
|
||||
try {
|
||||
@ -1083,4 +1068,39 @@ public class FlashArrayAdapter implements ProviderAdapter {
|
||||
}
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
|
||||
Map<String, String> map = new HashMap<String, String>();
|
||||
|
||||
// flasharray doesn't let you directly map a snapshot to a host, so we'll just return an empty map
|
||||
if (dataIn.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
|
||||
return map;
|
||||
}
|
||||
|
||||
try {
|
||||
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + dataIn.getExternalName(),
|
||||
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
|
||||
});
|
||||
|
||||
if (list != null && list.getItems() != null) {
|
||||
for (FlashArrayConnection conn : list.getItems()) {
|
||||
if (conn.getHost() != null) {
|
||||
map.put(conn.getHost().getName(), "" + conn.getLun());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// flasharray returns a 400 if the volume doesn't exist, so we'll just return an empty object.
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Error getting connection map for volume [" + dataIn.getExternalName() + "]: " + e.toString(), e);
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDirectAttachSnapshot() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,4 +33,9 @@ public class FlashArrayAdapterFactory implements ProviderAdapterFactory {
|
||||
return new FlashArrayAdapter(url, details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object canDirectAttachSnapshot() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -0,0 +1,46 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class FlashArrayHost {
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
public List<String> getWwns() {
|
||||
return wwns;
|
||||
}
|
||||
public void setWwns(List<String> wwns) {
|
||||
this.wwns = wwns;
|
||||
}
|
||||
@JsonProperty("name")
|
||||
private String name;
|
||||
@JsonProperty("wwns")
|
||||
private List<String> wwns;
|
||||
|
||||
}
|
||||
@ -83,7 +83,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
|
||||
@JsonIgnore
|
||||
public String getPodName() {
|
||||
if (pod != null) {
|
||||
return pod.getName();
|
||||
return pod.name;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
@ -129,7 +129,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
|
||||
}
|
||||
public void setPodName(String podname) {
|
||||
FlashArrayVolumePod pod = new FlashArrayVolumePod();
|
||||
pod.setName(podname);
|
||||
pod.name = podname;
|
||||
this.pod = pod;
|
||||
}
|
||||
@Override
|
||||
|
||||
@ -24,20 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class FlashArrayVolumePod {
|
||||
@JsonProperty("id")
|
||||
private String id;
|
||||
public String id;
|
||||
@JsonProperty("name")
|
||||
private String name;
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
public String name;
|
||||
}
|
||||
|
||||
@ -24,7 +24,6 @@ import java.security.KeyManagementException;
|
||||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
@ -73,7 +72,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs";
|
||||
|
||||
private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14);
|
||||
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000;
|
||||
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 60 * 1000;
|
||||
private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000;
|
||||
public static final long BYTES_IN_MiB = 1048576;
|
||||
|
||||
@ -106,18 +105,11 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
this.refreshSession(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that the hostgroup and pod from the details data exists. Each
|
||||
* configuration object/connection needs a distinct set of these 2 things.
|
||||
*/
|
||||
@Override
|
||||
public void validate() {
|
||||
login();
|
||||
if (this.getHostset(hostset) == null) {
|
||||
throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url
|
||||
+ "], please validate configuration");
|
||||
}
|
||||
|
||||
// check if hostgroup and pod from details really exist - we will
|
||||
// require a distinct configuration object/connection object for each type
|
||||
if (this.getCpg(cpg) == null) {
|
||||
throw new RuntimeException(
|
||||
"Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration");
|
||||
@ -126,6 +118,15 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
|
||||
@Override
|
||||
public void disconnect() {
|
||||
logger.info("PrimeraAdapter:disconnect(): closing session");
|
||||
try {
|
||||
_client.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("PrimeraAdapter:refreshSession(): Error closing client connection", e);
|
||||
} finally {
|
||||
_client = null;
|
||||
keyExpiration = -1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -176,10 +177,15 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) {
|
||||
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, String hostname) {
|
||||
assert dataIn.getExternalName() != null : "External name not provided internally on volume attach";
|
||||
PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest();
|
||||
request.setHostname("set:" + hostset);
|
||||
PrimeraHost host = getHost(hostname);
|
||||
if (host == null) {
|
||||
throw new RuntimeException("Unable to find host " + hostname + " on storage provider");
|
||||
}
|
||||
request.setHostname(host.getName());
|
||||
|
||||
request.setVolumeName(dataIn.getExternalName());
|
||||
request.setAutoLun(true);
|
||||
// auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4
|
||||
@ -194,12 +200,36 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
return toks[1];
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* This detaches ALL vlun's for the provided volume name IF they are associated to this hostset
|
||||
* @param context
|
||||
* @param request
|
||||
*/
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) {
|
||||
detach(context, request, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname) {
|
||||
// we expect to only be attaching one hostset to the vluns, so on detach we'll
|
||||
// remove ALL vluns we find.
|
||||
assert request.getExternalName() != null : "External name not provided internally on volume detach";
|
||||
removeAllVluns(request.getExternalName());
|
||||
|
||||
PrimeraVlunList list = getVluns(request.getExternalName());
|
||||
if (list != null && list.getMembers().size() > 0) {
|
||||
list.getMembers().forEach(vlun -> {
|
||||
// remove any hostset from old code if configured
|
||||
if (hostset != null && vlun.getHostname() != null && vlun.getHostname().equals("set:" + hostset)) {
|
||||
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
|
||||
}
|
||||
|
||||
if (hostname != null) {
|
||||
if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) {
|
||||
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void removeVlun(String name, Integer lunid, String hostString) {
|
||||
@ -208,20 +238,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
DELETE("/vluns/" + name + "," + lunid + "," + hostString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all vluns - this should only be done when you are sure the volume is no longer in use
|
||||
* @param name
|
||||
*/
|
||||
public void removeAllVluns(String name) {
|
||||
PrimeraVlunList list = getVolumeHostsets(name);
|
||||
if (list != null && list.getMembers() != null) {
|
||||
for (PrimeraVlun vlun: list.getMembers()) {
|
||||
removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public PrimeraVlunList getVolumeHostsets(String name) {
|
||||
public PrimeraVlunList getVluns(String name) {
|
||||
String query = "%22volumeName%20EQ%20" + name + "%22";
|
||||
return GET("/vluns?query=" + query, new TypeReference<PrimeraVlunList>() {});
|
||||
}
|
||||
@ -231,7 +248,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
assert request.getExternalName() != null : "External name not provided internally on volume delete";
|
||||
|
||||
// first remove vluns (take volumes from vluns) from hostset
|
||||
removeAllVluns(request.getExternalName());
|
||||
detach(context, request);
|
||||
DELETE("/volumes/" + request.getExternalName());
|
||||
}
|
||||
|
||||
@ -420,6 +437,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Long capacityBytes = 0L;
|
||||
if (cpgobj.getsDGrowth() != null) {
|
||||
capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB;
|
||||
@ -453,73 +471,59 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
|
||||
@Override
|
||||
public boolean canAccessHost(ProviderAdapterContext context, String hostname) {
|
||||
PrimeraHostset hostset = getHostset(this.hostset);
|
||||
|
||||
List<String> members = hostset.getSetmembers();
|
||||
|
||||
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
|
||||
// hostname configuration
|
||||
String shortname;
|
||||
if (hostname.indexOf('.') > 0) {
|
||||
shortname = hostname.substring(0, (hostname.indexOf('.')));
|
||||
} else {
|
||||
shortname = hostname;
|
||||
}
|
||||
for (String member: members) {
|
||||
// exact match (short or long names)
|
||||
if (member.equals(hostname)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// primera has short name and cloudstack had long name
|
||||
if (member.equals(shortname)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// member has long name but cloudstack had shortname
|
||||
int index = member.indexOf(".");
|
||||
if (index > 0) {
|
||||
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// check that the array has the host configured
|
||||
PrimeraHost host = this.getHost(hostname);
|
||||
if (host != null) {
|
||||
// if hostset is configured we'll additionally check if the host is in it (legacy/original behavior)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private PrimeraHost getHost(String name) {
|
||||
PrimeraHost host = GET("/hosts/" + name, new TypeReference<PrimeraHost>() { });
|
||||
if (host == null) {
|
||||
if (name.indexOf('.') > 0) {
|
||||
host = this.getHost(name.substring(0, (name.indexOf('.'))));
|
||||
}
|
||||
}
|
||||
return host;
|
||||
|
||||
}
|
||||
|
||||
private PrimeraCpg getCpg(String name) {
|
||||
return GET("/cpgs/" + name, new TypeReference<PrimeraCpg>() {
|
||||
});
|
||||
}
|
||||
|
||||
private PrimeraHostset getHostset(String name) {
|
||||
return GET("/hostsets/" + name, new TypeReference<PrimeraHostset>() {
|
||||
});
|
||||
}
|
||||
|
||||
private String getSessionKey() {
|
||||
refreshSession(false);
|
||||
return key;
|
||||
}
|
||||
|
||||
private synchronized void refreshSession(boolean force) {
|
||||
private synchronized String refreshSession(boolean force) {
|
||||
try {
|
||||
if (force || keyExpiration < System.currentTimeMillis()) {
|
||||
if (force || keyExpiration < (System.currentTimeMillis()-15000)) {
|
||||
// close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing
|
||||
_client.close();;
|
||||
_client = null;
|
||||
disconnect();
|
||||
login();
|
||||
keyExpiration = System.currentTimeMillis() + keyTtl;
|
||||
logger.debug("PrimeraAdapter:refreshSession(): session created or refreshed with key=" + key + ", expiration=" + keyExpiration);
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// retry frequently but not every request to avoid DDOS on storage API
|
||||
logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e);
|
||||
keyExpiration = System.currentTimeMillis() + (5*1000);
|
||||
}
|
||||
return key;
|
||||
}
|
||||
/**
|
||||
* Login to the array and get an access token
|
||||
*/
|
||||
private void login() {
|
||||
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
|
||||
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
|
||||
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
|
||||
|
||||
private void validateLoginInfo(String urlStr) {
|
||||
URL urlFull;
|
||||
try {
|
||||
urlFull = new URL(urlStr);
|
||||
@ -553,7 +557,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
cpg = queryParms.get(PrimeraAdapter.CPG);
|
||||
if (cpg == null) {
|
||||
throw new RuntimeException(
|
||||
PrimeraAdapter.CPG + " paramater/option required to configure this storage pool");
|
||||
PrimeraAdapter.CPG + " parameter/option required to configure this storage pool");
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,13 +570,10 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
// if this is null, we will use direct-to-host vlunids (preferred)
|
||||
hostset = connectionDetails.get(PrimeraAdapter.HOSTSET);
|
||||
if (hostset == null) {
|
||||
hostset = queryParms.get(PrimeraAdapter.HOSTSET);
|
||||
if (hostset == null) {
|
||||
throw new RuntimeException(
|
||||
PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool");
|
||||
}
|
||||
}
|
||||
|
||||
String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS);
|
||||
@ -629,16 +630,7 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
} else {
|
||||
skipTlsValidation = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Login to the array and get an access token
|
||||
*/
|
||||
private void login() {
|
||||
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
|
||||
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
|
||||
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
|
||||
validateLoginInfo(urlStr);
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
HttpPost request = new HttpPost(url + "/credentials");
|
||||
@ -652,6 +644,9 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
if (statusCode == 200 | statusCode == 201) {
|
||||
PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class);
|
||||
key = keyobj.getKey();
|
||||
// Set the key expiration to x minutes from now
|
||||
this.keyExpiration = System.currentTimeMillis() + keyTtl;
|
||||
logger.info("PrimeraAdapter:login(): successful, new session: New key=" + key + ", expiration=" + this.keyExpiration);
|
||||
} else if (statusCode == 401 || statusCode == 403) {
|
||||
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
|
||||
+ "] failed, unable to retrieve session token");
|
||||
@ -712,15 +707,15 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
private <T> T POST(String path, Object input, final TypeReference<T> type) {
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
this.refreshSession(false);
|
||||
String session_key = this.refreshSession(false);
|
||||
HttpPost request = new HttpPost(url + path);
|
||||
request.addHeader("Content-Type", "application/json");
|
||||
request.addHeader("Accept", "application/json");
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
|
||||
try {
|
||||
String data = mapper.writeValueAsString(input);
|
||||
request.setEntity(new StringEntity(data));
|
||||
logger.debug("POST data: " + request.getEntity());
|
||||
if (logger.isTraceEnabled()) logger.trace("POST data: " + request.getEntity());
|
||||
} catch (UnsupportedEncodingException | JsonProcessingException e) {
|
||||
throw new RuntimeException(
|
||||
"Error processing request payload to [" + url + "] for path [" + path + "]", e);
|
||||
@ -797,10 +792,11 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
this.refreshSession(false);
|
||||
String session_key = this.refreshSession(false);
|
||||
HttpPut request = new HttpPut(url + path);
|
||||
request.addHeader("Content-Type", "application/json");
|
||||
request.addHeader("Accept", "application/json");
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
|
||||
String data = mapper.writeValueAsString(input);
|
||||
request.setEntity(new StringEntity(data));
|
||||
|
||||
@ -850,10 +846,11 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
this.refreshSession(false);
|
||||
String session_key = this.refreshSession(false);
|
||||
HttpGet request = new HttpGet(url + path);
|
||||
request.addHeader("Content-Type", "application/json");
|
||||
request.addHeader("Accept", "application/json");
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
|
||||
|
||||
CloseableHttpClient client = getClient();
|
||||
response = (CloseableHttpResponse) client.execute(request);
|
||||
@ -892,10 +889,11 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
CloseableHttpResponse response = null;
|
||||
try {
|
||||
this.refreshSession(false);
|
||||
String session_key = this.refreshSession(false);
|
||||
HttpDelete request = new HttpDelete(url + path);
|
||||
request.addHeader("Content-Type", "application/json");
|
||||
request.addHeader("Accept", "application/json");
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
|
||||
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
|
||||
|
||||
CloseableHttpClient client = getClient();
|
||||
response = (CloseableHttpResponse) client.execute(request);
|
||||
@ -926,5 +924,22 @@ public class PrimeraAdapter implements ProviderAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
|
||||
Map<String,String> connIdMap = new HashMap<String,String>();
|
||||
PrimeraVlunList list = this.getVluns(dataIn.getExternalName());
|
||||
|
||||
if (list != null && list.getMembers() != null && list.getMembers().size() > 0) {
|
||||
for (PrimeraVlun vlun: list.getMembers()) {
|
||||
connIdMap.put(vlun.getHostname(), ""+vlun.getLun());
|
||||
}
|
||||
}
|
||||
|
||||
return connIdMap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDirectAttachSnapshot() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,4 +33,9 @@ public class PrimeraAdapterFactory implements ProviderAdapterFactory {
|
||||
return new PrimeraAdapter(url, details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object canDirectAttachSnapshot() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -0,0 +1,56 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.adapter.primera;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class PrimeraHost {
|
||||
private Integer id;
|
||||
private String name;
|
||||
private List<PrimeraPort> fcPaths;
|
||||
private PrimeraHostDescriptor descriptors;
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
public List<PrimeraPort> getFcPaths() {
|
||||
return fcPaths;
|
||||
}
|
||||
public void setFcPaths(List<PrimeraPort> fcPaths) {
|
||||
this.fcPaths = fcPaths;
|
||||
}
|
||||
public PrimeraHostDescriptor getDescriptors() {
|
||||
return descriptors;
|
||||
}
|
||||
public void setDescriptors(PrimeraHostDescriptor descriptors) {
|
||||
this.descriptors = descriptors;
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,40 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.datastore.adapter.primera;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class PrimeraHostDescriptor {
|
||||
private String IPAddr = null;
|
||||
private String os = null;
|
||||
public String getIPAddr() {
|
||||
return IPAddr;
|
||||
}
|
||||
public void setIPAddr(String iPAddr) {
|
||||
IPAddr = iPAddr;
|
||||
}
|
||||
public String getOs() {
|
||||
return os;
|
||||
}
|
||||
public void setOs(String os) {
|
||||
this.os = os;
|
||||
}
|
||||
|
||||
}
|
||||
@ -34,105 +34,115 @@ public class PrimeraHostset {
|
||||
private String uuid;
|
||||
private Map<String, Object> additionalProperties = new LinkedHashMap<String, Object>();
|
||||
|
||||
|
||||
|
||||
public String getComment() {
|
||||
return comment;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setComment(String comment) {
|
||||
this.comment = comment;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public List<String> getSetmembers() {
|
||||
return setmembers;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setSetmembers(List<String> setmembers) {
|
||||
this.setmembers = setmembers;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public Map<String, Object> getAdditionalProperties() {
|
||||
return additionalProperties;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void setAdditionalProperties(Map<String, Object> additionalProperties) {
|
||||
this.additionalProperties = additionalProperties;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// adds members to a hostset
|
||||
public static class PrimeraHostsetVLUNRequest {
|
||||
private String volumeName;
|
||||
private Boolean autoLun = true;
|
||||
private Integer lun = 0;
|
||||
private Integer maxAutoLun = 0;
|
||||
/**
|
||||
* This can be a single hostname OR the set of hosts in the format
|
||||
* "set:<hostset>".
|
||||
* For the purposes of this driver, its expected that the predominate usecase is
|
||||
* to use
|
||||
* a hostset that is aligned with a CloudStack Cluster.
|
||||
*/
|
||||
// hostset format: "set:<hostset>"
|
||||
private String hostname;
|
||||
|
||||
public String getVolumeName() {
|
||||
return volumeName;
|
||||
}
|
||||
|
||||
public void setVolumeName(String volumeName) {
|
||||
this.volumeName = volumeName;
|
||||
}
|
||||
|
||||
public Boolean getAutoLun() {
|
||||
return autoLun;
|
||||
}
|
||||
|
||||
public void setAutoLun(Boolean autoLun) {
|
||||
this.autoLun = autoLun;
|
||||
}
|
||||
|
||||
public Integer getLun() {
|
||||
return lun;
|
||||
}
|
||||
|
||||
public void setLun(Integer lun) {
|
||||
this.lun = lun;
|
||||
}
|
||||
|
||||
public Integer getMaxAutoLun() {
|
||||
return maxAutoLun;
|
||||
}
|
||||
|
||||
public void setMaxAutoLun(Integer maxAutoLun) {
|
||||
this.maxAutoLun = maxAutoLun;
|
||||
}
|
||||
|
||||
public String getHostname() {
|
||||
return hostname;
|
||||
}
|
||||
|
||||
public void setHostname(String hostname) {
|
||||
this.hostname = hostname;
|
||||
}
|
||||
|
||||
@ -0,0 +1,40 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.datastore.adapter.primera;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class PrimeraPort {
|
||||
private String wwn;
|
||||
private PrimeraPortPos portPos;
|
||||
public String getWwn() {
|
||||
return wwn;
|
||||
}
|
||||
public void setWwn(String wwn) {
|
||||
this.wwn = wwn;
|
||||
}
|
||||
public PrimeraPortPos getPortPos() {
|
||||
return portPos;
|
||||
}
|
||||
public void setPortPos(PrimeraPortPos portPos) {
|
||||
this.portPos = portPos;
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,47 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.datastore.adapter.primera;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class PrimeraPortPos {
|
||||
private Integer cardPort;
|
||||
private Integer node;
|
||||
private Integer slot;
|
||||
public Integer getCardPort() {
|
||||
return cardPort;
|
||||
}
|
||||
public void setCardPort(Integer cardPort) {
|
||||
this.cardPort = cardPort;
|
||||
}
|
||||
public Integer getNode() {
|
||||
return node;
|
||||
}
|
||||
public void setNode(Integer node) {
|
||||
this.node = node;
|
||||
}
|
||||
public Integer getSlot() {
|
||||
return slot;
|
||||
}
|
||||
public void setSlot(Integer slot) {
|
||||
this.slot = slot;
|
||||
}
|
||||
}
|
||||
@ -35,7 +35,7 @@ public class PrimeraVolumeCopyRequestParameters {
|
||||
private String snapCPG = null;
|
||||
private Boolean skipZero = null;
|
||||
private Boolean saveSnapshot = null;
|
||||
/** 1=HIGH, 2=MED, 3=LOW */
|
||||
// 1=HIGH, 2=MED, 3=LOW
|
||||
private Integer priority = null;
|
||||
public String getDestVolume() {
|
||||
return destVolume;
|
||||
|
||||
@ -22,10 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class PrimeraVolumePromoteRequest {
|
||||
/**
|
||||
* Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
|
||||
*/
|
||||
private Integer action = 4;
|
||||
private Integer action = 4; // PROMOTE_VIRTUAL_COPY, https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
|
||||
private Boolean online = true;
|
||||
private Integer priority = 2; // MEDIUM
|
||||
private Boolean allowRemoteCopyParent = true;
|
||||
|
||||
@ -68,6 +68,11 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti
|
||||
final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER);
|
||||
final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL);
|
||||
final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE);
|
||||
|
||||
if (provider == null) {
|
||||
return new Pair<Boolean, ActionOnFailedAuthentication>(false, null);
|
||||
}
|
||||
|
||||
String oauthProvider = ((provider == null) ? null : provider[0]);
|
||||
String email = ((emailArray == null) ? null : emailArray[0]);
|
||||
String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]);
|
||||
|
||||
@ -22,7 +22,7 @@ OUTPUT_FILE=${3:?"Output file/path is required"}
|
||||
|
||||
echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}"
|
||||
|
||||
qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
|
||||
qemu-img convert -n -p -W -t writeback -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
|
||||
# if its a block device make sure we flush caches before exiting
|
||||
lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && {
|
||||
blockdev --flushbufs ${OUTPUT_FILE}
|
||||
|
||||
@ -1057,7 +1057,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
created = false;
|
||||
VolumeInfo vol = volFactory.getVolume(cmd.getEntityId());
|
||||
vol.stateTransit(Volume.Event.DestroyRequested);
|
||||
throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e);
|
||||
throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e);
|
||||
} finally {
|
||||
if (!created) {
|
||||
s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend");
|
||||
@ -3347,6 +3347,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
|
||||
DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd);
|
||||
// if no new disk offering was provided, and match is required, default to the offering of the
|
||||
// original volume. otherwise it falls through with no check and the target volume may
|
||||
// not work correctly in some scenarios with the target provider. Adminstrator
|
||||
// can disable this flag dynamically for certain bulk migration scenarios if required.
|
||||
if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) {
|
||||
newDiskOffering = diskOffering;
|
||||
}
|
||||
validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool);
|
||||
|
||||
if (vm != null) {
|
||||
@ -3432,14 +3439,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
DataCenter zone = null;
|
||||
Volume volume = _volsDao.findById(cmd.getId());
|
||||
if (volume != null) {
|
||||
zone = _dcDao.findById(volume.getDataCenterId());
|
||||
if (volume == null) {
|
||||
throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId()));
|
||||
}
|
||||
zone = _dcDao.findById(volume.getDataCenterId());
|
||||
|
||||
_accountMgr.checkAccess(caller, newDiskOffering, zone);
|
||||
DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
||||
if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) {
|
||||
throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid()));
|
||||
}
|
||||
return newDiskOffering;
|
||||
}
|
||||
|
||||
@ -3524,6 +3529,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags);
|
||||
}
|
||||
|
||||
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
|
||||
String[] oldDOStorageTags = oldDO.getTagsArray();
|
||||
String[] newDOStorageTags = newDO.getTagsArray();
|
||||
if (oldDOStorageTags.length == 0) {
|
||||
return true;
|
||||
}
|
||||
if (newDOStorageTags.length == 0) {
|
||||
return false;
|
||||
}
|
||||
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) {
|
||||
Pair<List<String>, Boolean> storagePoolTags = getStoragePoolTags(destPool);
|
||||
@ -3553,18 +3570,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
return result;
|
||||
}
|
||||
|
||||
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
|
||||
String[] oldDOStorageTags = oldDO.getTagsArray();
|
||||
String[] newDOStorageTags = newDO.getTagsArray();
|
||||
if (oldDOStorageTags.length == 0) {
|
||||
return true;
|
||||
}
|
||||
if (newDOStorageTags.length == 0) {
|
||||
return false;
|
||||
}
|
||||
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule,
|
||||
* or a normal list of tags.
|
||||
|
||||
@ -6400,6 +6400,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
+ " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS));
|
||||
}
|
||||
|
||||
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
|
||||
if (vols.size() > 1 &&
|
||||
!(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) {
|
||||
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
|
||||
}
|
||||
|
||||
// Check that Vm does not have VM Snapshots
|
||||
if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
|
||||
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");
|
||||
|
||||
@ -92,7 +92,9 @@ public class SnapshotHelper {
|
||||
*/
|
||||
public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) {
|
||||
if (!kvmSnapshotOnlyInPrimaryStorage) {
|
||||
logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId()));
|
||||
if (snapInfo != null) {
|
||||
logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId()));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user