diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 1ee7200a313..8a2ec1a8905 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -77,13 +77,18 @@ public class Storage { } public static enum Capability { - HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"); + HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"), + ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS"); private final String capability; private Capability(String capability) { this.capability = capability; } + + public String toString() { + return this.capability; + } } public static enum ProvisioningType { @@ -150,7 +155,8 @@ public class Storage { ManagedNFS(true, false, false), Linstor(true, true, false), DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters - StorPool(true, true, true); + StorPool(true, true, true), + FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-) private final boolean shared; private final boolean overprovisioning; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 09ec5394921..7a907e0f76a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.command.admin.storage; import java.util.List; +import java.util.Map; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.log4j.Logger; @@ -32,6 +33,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import com.cloud.storage.StoragePool; import com.cloud.user.Account; +@SuppressWarnings("rawtypes") @APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateStoragePoolCmd extends BaseCmd { @@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd { " enable it back.") private Boolean enabled; + @Parameter(name = ApiConstants.DETAILS, + type = CommandType.MAP, + required = false, + description = "the details for the storage pool", + since = "4.19.0") + private Map details; + + @Parameter(name = ApiConstants.URL, + type = CommandType.STRING, + required = false, + description = "the URL of the storage pool", + since = "4.19.0") + private String url; + @Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) private Boolean isTagARule; @@ -115,6 +131,22 @@ public class UpdateStoragePoolCmd extends BaseCmd { return ApiCommandResourceType.StoragePool; } + public Map getDetails() { + return details; + } + + public void setDetails(Map details) { + this.details = details; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + @Override public void execute() { StoragePool result = _storageService.updateStoragePool(this); diff --git a/client/pom.xml b/client/pom.xml index 0451e8e09e8..a7665e8e3e8 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -111,6 +111,16 @@ cloud-plugin-storage-volume-storpool ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-primera + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-flasharray + ${project.version} + org.apache.cloudstack cloud-server diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b2eaaf7ea6e..3d107278eb7 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2957,6 +2957,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac *
    *
  • If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. *
  • If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. + *
  • If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools *
*/ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { @@ -2966,6 +2967,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (currentPool.getId() == targetPool.getId()) { return; } + + Map details = _storagePoolDao.getDetails(currentPool.getId()); + if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) { + return; + } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index e450addb261..370753ed923 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -193,7 +193,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { destData.getType() == DataObjectType.TEMPLATE)) { // volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools // Delete cache in order to certainly transfer a latest image. - s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + + if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { @@ -205,7 +205,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { - s_logger.debug("Decrease reference count of " + cacheType + + if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.releaseCacheObject(srcForCopy); } @@ -213,7 +213,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy object failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } @@ -331,7 +331,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("Failed to send to storage pool", e); + if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e); throw new CloudRuntimeException("Failed to send to storage pool", e); } } @@ -388,7 +388,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to image store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -411,7 +411,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (answer == null || !answer.getResult()) { if (answer != null) { - s_logger.debug("copy to primary store failed: " + answer.getDetails()); + if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -471,13 +471,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { s_logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep); answer = ep.sendMessage(command); + if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer); } if (answer == null || !answer.getResult()) { throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool); } else { // Update the volume details after migration. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume"); + VolumeVO volumeVo = volDao.findById(volume.getId()); Long oldPoolId = volume.getPoolId(); volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath()); @@ -496,6 +500,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } volumeVo.setFolder(folder); volDao.update(volume.getId(), volumeVo); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete"); + } return answer; @@ -507,7 +513,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { Answer answer = null; String errMsg = null; try { - s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); + if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { answer = copyVolumeFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { @@ -516,11 +522,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { answer = cloneVolume(srcData, destData); } else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) { + if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources"); if (srcData.getId() == destData.getId()) { // The volume has to be migrated across storage pools. + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING"); answer = migrateVolumeToPool(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult()); } else { + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING"); answer = copyVolumeBetweenPools(srcData, destData); + if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult()); } } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) { answer = copySnapshot(srcData, destData); @@ -532,7 +543,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { errMsg = answer.getDetails(); } } catch (Exception e) { - s_logger.debug("copy failed", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e); errMsg = e.toString(); } CopyCommandResult result = new CopyCommandResult(null, answer); @@ -627,7 +638,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } return answer; } catch (Exception e) { - s_logger.debug("copy snasphot failed: ", e); + if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 1419ae36d25..b24452336bd 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -106,6 +106,7 @@ import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; @@ -186,6 +187,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private EndPointSelector selector; @Inject VMTemplatePoolDao templatePoolDao; + @Inject + private VolumeDataFactory _volFactory; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -400,15 +403,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } else { - String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " + - "Migration in this case is not yet supported."; - - handleError(errMsg, callback); + handleVolumeMigrationFromManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } } else if (!isVolumeOnManagedStorage(destVolumeInfo)) { - String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case."; - - handleError(errMsg, callback); + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } } else { handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback); } @@ -453,7 +456,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { String volumePath = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -485,7 +488,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -512,12 +515,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } } + private void handleVolumeMigrationFromManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + AsyncCompletionCallback callback) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { + String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString()); + handleError(errMsg, callback); + } else { + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); + } + } + private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { String errMsg = null; try { - if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) { + if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) { throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " + "from managed storage to non-managed storage."); } @@ -525,10 +538,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HypervisorType hypervisorType = HypervisorType.KVM; VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " + - "a VM, the VM must be in the Stopped state."); - } + checkAvailableForMigration(vm); long destStoragePoolId = destVolumeInfo.getPoolId(); StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId); @@ -553,7 +563,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -579,9 +589,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) { if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && - !(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) { - throw new CloudRuntimeException("Only the following image types are currently supported: " + - ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)"); + !(imageFormat == ImageFormat.RAW && (StoragePoolType.PowerFlex == poolType || + StoragePoolType.FiberChannel == poolType))) { + throw new CloudRuntimeException(String.format("Only the following image types are currently supported: %s, %s, %s, %s (for PowerFlex and FiberChannel)", + ImageFormat.VHD.toString(), ImageFormat.OVA.toString(), ImageFormat.QCOW2.toString(), ImageFormat.RAW.toString())); } } @@ -685,14 +696,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); } else { - handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo); + handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } catch (Exception ex) { errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { CopyCmdAnswer copyCmdAnswer; @@ -826,24 +837,73 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { _volumeDao.update(srcVolumeInfo.getId(), volumeVO); } - private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { + private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { VirtualMachine vm = srcVolumeInfo.getAttachedVM(); - if (vm != null && vm.getState() != VirtualMachine.State.Stopped) { - throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + - "a VM, the VM must be in the Stopped state."); + checkAvailableForMigration(vm); + + String errMsg = null; + try { + destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + updatePathFromScsiName(volumeVO); + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // migrate the volume via the hypervisor + String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); + + updateVolumePath(destVolumeInfo.getId(), path); + volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + // only set this if it was not set. default to QCOW2 for KVM + if (volumeVO.getFormat() == null) { + volumeVO.setFormat(ImageFormat.QCOW2); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } catch (Exception ex) { + errMsg = "Primary storage migration failed due to an unexpected error: " + + ex.getMessage(); + if (ex instanceof CloudRuntimeException) { + throw ex; + } else { + throw new CloudRuntimeException(errMsg, ex); + } + } finally { + CopyCmdAnswer copyCmdAnswer; + if (errMsg != null) { + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } + else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + DataTO dataTO = destVolumeInfo.getTO(); + copyCmdAnswer = new CopyCmdAnswer(dataTO); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); } + } - destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); + private void checkAvailableForMigration(VirtualMachine vm) { + if (vm != null && (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Migrating)) { + throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " + + "a VM, the VM must be in the Stopped or Migrating state."); + } + } - VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setPath(volumeVO.get_iScsiName()); - - _volumeDao.update(volumeVO.getId(), volumeVO); - - destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + /** + * Only update the path from the iscsiName if the iscsiName is set. Otherwise take no action to avoid nullifying the path + * with a previously set path value. + */ + private void updatePathFromScsiName(VolumeVO volumeVO) { + if (volumeVO.get_iScsiName() != null) { + volumeVO.setPath(volumeVO.get_iScsiName()); + _volumeDao.update(volumeVO.getId(), volumeVO); + } + } + private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { long srcStoragePoolId = srcVolumeInfo.getPoolId(); StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId); @@ -856,14 +916,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); } - // migrate the volume via the hypervisor - migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); - - volumeVO = _volumeDao.findById(destVolumeInfo.getId()); - - volumeVO.setFormat(ImageFormat.QCOW2); - - _volumeDao.update(volumeVO.getId(), volumeVO); + return hostVO; } /** @@ -1075,7 +1128,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (usingBackendSnapshot) { @@ -1293,7 +1346,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { catch (Exception ex) { errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateManagedVolumeFromNonManagedSnapshot': " + ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); @@ -1674,6 +1727,42 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { return copyCmdAnswer; } + /** + * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) + + * @param volumeVO + * @param snapshotInfo + */ + public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + try { + volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(volumeVO); + VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + // save the "temp" volume info into the snapshot details (we need this to clean up at the end) + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); + _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); + // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() + // whenever the TemporaryVolumeCopyPath is set. + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + } catch (Throwable e) { + // cleanup temporary volume + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + throw e; + } + } + /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1685,8 +1774,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + prepTempVolumeForCopyFromSnapshot(snapshotInfo); + return; + } + + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1701,6 +1795,24 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + VolumeVO volumeVO = null; + // cleanup any temporary volume previously created for copy from a snapshot + if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { + SnapshotDetailsVO tempUuid = null; + tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + if (tempUuid == null || tempUuid.getValue() == null) { + return; + } + + volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + _snapshotDetailsDao.remove(tempUuid.getId()); + _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); + return; + } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); try { @@ -2363,7 +2475,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { try { StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId()); - if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) { + if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && + !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && ( + StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || + StoragePoolType.FiberChannel == storagePoolVO.getPoolType()))) { throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently."); } @@ -2506,7 +2621,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { long snapshotId = snapshotInfo.getId(); - if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) { + // if the snapshot required a temporary volume be created check if the UUID is set so we can + // retrieve the temporary volume's path to use during remote copy + List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); + if (storedDetails != null && storedDetails.size() > 0) { + String value = storedDetails.get(0).getValue(); + snapshotDetails.put(DiskTO.PATH, value); + } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2718,8 +2839,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) { - boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null; - try { Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); @@ -2727,16 +2846,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); - if (srcVolumeDetached) { - _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)agentManager.send(hostVO.getId(), migrateVolumeCommand); - if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) { if (migrateVolumeAnswer != null && StringUtils.isNotEmpty(migrateVolumeAnswer.getDetails())) { throw new CloudRuntimeException(migrateVolumeAnswer.getDetails()); @@ -2745,42 +2859,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException(errMsg); } } - - if (srcVolumeDetached) { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - - try { - _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - return migrateVolumeAnswer.getVolumePath(); - } - catch (Exception ex) { + } catch (CloudRuntimeException ex) { + throw ex; + } catch (Exception ex) { + throw new CloudRuntimeException("Unexpected error during volume migration: " + ex.getMessage(), ex); + } finally { try { - _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - } - catch (Exception e) { - // This volume should be deleted soon, so just log a warning here. - LOGGER.warn(e.getMessage(), e); - } - - if (srcVolumeDetached) { _volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); + } catch (Throwable e) { + LOGGER.warn("During cleanup post-migration and exception occured: " + e); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Exception during post-migration cleanup.", e); + } } - - String msg = "Failed to perform volume migration : "; - - LOGGER.warn(msg, ex); - - throw new CloudRuntimeException(msg + ex.getMessage(), ex); - } - finally { - handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 47577cc52b2..c0ef227251c 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -882,9 +882,7 @@ public class VolumeServiceImpl implements VolumeService { */ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) { // create a template volume on primary storage - AsyncCallFuture createTemplateFuture = new AsyncCallFuture<>(); TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo, srcTemplateInfo.getDeployAsIsConfiguration()); - VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { @@ -897,7 +895,6 @@ public class VolumeServiceImpl implements VolumeService { // At this point, we have an entry in the DB that points to our cached template. // We need to lock it as there may be other VMs that may get started using the same template. // We want to avoid having to create multiple cache copies of the same template. - int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); long templatePoolRefId = templatePoolRef.getId(); @@ -909,28 +906,27 @@ public class VolumeServiceImpl implements VolumeService { try { // create a cache volume on the back-end - templateOnPrimary.processEvent(Event.CreateOnlyRequested); + CreateAsyncCompleteCallback callback = new CreateAsyncCompleteCallback(); - CreateVolumeContext createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture); - AsyncCallbackDispatcher createCaller = AsyncCallbackDispatcher.create(this); - - createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext); - - destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller); - - VolumeApiResult result = createTemplateFuture.get(); - - if (result.isFailed()) { - String errMesg = result.getResult(); - + destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, callback); + // validate we got a good result back + if (callback.result == null || callback.result.isFailed()) { + String errMesg; + if (callback.result == null) { + errMesg = "Unknown/unable to determine result"; + } else { + errMesg = callback.result.getResult(); + } + templateOnPrimary.processEvent(Event.OperationFailed); throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); } + + templateOnPrimary.processEvent(Event.OperationSuccessed); + } catch (Throwable e) { s_logger.debug("Failed to create template volume on storage", e); - templateOnPrimary.processEvent(Event.OperationFailed); - throw new CloudRuntimeException(e.getMessage()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); @@ -939,6 +935,17 @@ public class VolumeServiceImpl implements VolumeService { return templateOnPrimary; } + private static class CreateAsyncCompleteCallback implements AsyncCompletionCallback { + + public CreateCmdResult result; + + @Override + public void complete(CreateCmdResult result) { + this.result = result; + } + + } + /** * This function copies a template from secondary storage to a template volume * created on managed storage. This template volume will be used as a cache. @@ -1464,6 +1471,16 @@ public class VolumeServiceImpl implements VolumeService { if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); } + } catch (Exception e) { + if (templateOnPrimary != null) { + templateOnPrimary.processEvent(Event.OperationFailed); + } + VolumeApiResult result = new VolumeApiResult(volumeInfo); + result.setResult(e.getLocalizedMessage()); + result.setSuccess(false); + future.complete(result); + s_logger.warn("Failed to create template on primary storage", e); + return future; } finally { if (lock != null) { lock.unlock(); diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 9363ebd2379..0306a062df9 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -61,7 +61,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP @Override public boolean isEnabled() { if (!roleService.isEnabled()) { - LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); + } } return roleService.isEnabled(); } @@ -119,7 +121,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP Account userAccount = accountService.getAccount(user.getAccountId()); if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) { - LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); + } return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 5c893e5d12f..2a09c340891 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -279,6 +279,10 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper srcDetails = command.getSrcDetails(); String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath(); + // its possible a volume has details but is not using IQN addressing... + if (srcPath == null) { + srcPath = srcVolumeObjectTO.getPath(); + } VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData(); PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java index 36ff69d83af..4f1ad728b5d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java @@ -50,6 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.hypervisor.kvm.storage.MultipathSCSIPool; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage.StoragePoolType; @@ -84,6 +85,10 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper; connid= + String type = null; + String address = null; + String connectionId = null; + String path = null; + String[] parts = inPath.split(";"); + // handle initial code of wwn only + if (parts.length == 1) { + type = "FIBERWWN"; + address = parts[0]; + } else { + for (String part: parts) { + String[] pair = part.split("="); + if (pair.length == 2) { + String key = pair[0].trim(); + String value = pair[1].trim(); + if (key.equals("type")) { + type = value.toUpperCase(); + } else if (key.equals("address")) { + address = value; + } else if (key.equals("connid")) { + connectionId = value; + } + } + } + } + + if ("FIBERWWN".equals(type)) { + path = "/dev/mapper/3" + address; + } else { + throw new CloudRuntimeException("Invalid address type provided for target disk: " + type); + } + + return new AddressInfo(type, address, connectionId, path); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index dd31025d35f..1be4a8b6185 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -290,9 +290,12 @@ public class KVMStorageProcessor implements StorageProcessor { final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); - if (primaryPool.getType() == StoragePoolType.RBD || - primaryPool.getType() == StoragePoolType.PowerFlex || - primaryPool.getType() == StoragePoolType.Linstor) { + + if(List.of( + StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(primaryPool.getType())) { newTemplate.setFormat(ImageFormat.RAW); } else { newTemplate.setFormat(ImageFormat.QCOW2); @@ -584,7 +587,9 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromVolume(final CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + // handle cases where the managed storage driver had to make a temporary volume from + // the snapshot in order to support the copy + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -712,7 +717,7 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromSnapshot(CopyCommand cmd) { Map details = cmd.getOptions(); - if (details != null && details.get(DiskTO.IQN) != null) { + if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) { // use the managed-storage approach return createTemplateFromVolumeOrSnapshot(cmd); } @@ -750,12 +755,15 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool secondaryStorage = null; try { + // look for options indicating an overridden path or IQN. Used when snapshots have to be + // temporarily copied on the manaaged storage device before the actual copy to target object Map details = cmd.getOptions(); - - String path = details != null ? details.get(DiskTO.IQN) : null; - + String path = details != null ? details.get(DiskTO.PATH) : null; if (path == null) { - new CloudRuntimeException("The 'path' field must be specified."); + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } } storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); @@ -2188,7 +2196,16 @@ public class KVMStorageProcessor implements StorageProcessor { Map details = cmd.getOptions2(); - String path = details != null ? details.get(DiskTO.IQN) : null; + String path = cmd.getDestTO().getPath(); + if (path == null) { + path = details != null ? details.get(DiskTO.PATH) : null; + if (path == null) { + path = details != null ? details.get(DiskTO.IQN) : null; + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } + } + } storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java new file mode 100644 index 00000000000..06dea46a98d --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -0,0 +1,758 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; +import org.joda.time.Duration; + +public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { + static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); + static final Map MapStorageUuidToStoragePool = new HashMap<>(); + + /** + * A lock to avoid any possiblity of multiple requests for a scan + */ + static byte[] CLEANUP_LOCK = new byte[0]; + + /** + * Property keys and defaults + */ + static final Property CLEANUP_FREQUENCY_SECS = new Property("multimap.cleanup.frequency.secs", 60); + static final Property CLEANUP_TIMEOUT_SECS = new Property("multimap.cleanup.timeout.secs", 4); + static final Property CLEANUP_ENABLED = new Property("multimap.cleanup.enabled", true); + static final Property CLEANUP_SCRIPT = new Property("multimap.cleanup.script", "cleanStaleMaps.sh"); + static final Property CONNECT_SCRIPT = new Property("multimap.connect.script", "connectVolume.sh"); + static final Property COPY_SCRIPT = new Property("multimap.copy.script", "copyVolume.sh"); + static final Property DISCONNECT_SCRIPT = new Property("multimap.disconnect.script", "disconnectVolume.sh"); + static final Property RESIZE_SCRIPT = new Property("multimap.resize.script", "resizeVolume.sh"); + static final Property DISK_WAIT_SECS = new Property("multimap.disk.wait.secs", 240); + static final Property STORAGE_SCRIPTS_DIR = new Property("multimap.storage.scripts.dir", "scripts/storage/multipath"); + + static Timer cleanupTimer = new Timer(); + private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue(); + private static String connectScript = CONNECT_SCRIPT.getFinalValue(); + private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue(); + private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue(); + private static String resizeScript = RESIZE_SCRIPT.getFinalValue(); + private static String copyScript = COPY_SCRIPT.getFinalValue(); + private static int diskWaitTimeSecs = DISK_WAIT_SECS.getFinalValue(); + + /** + * Initialize static program-wide configurations and background jobs + */ + static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; + boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); + + + connectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), connectScript); + if (connectScript == null) { + throw new Error("Unable to find the connectVolume.sh script"); + } + + disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript); + if (disconnectScript == null) { + throw new Error("Unable to find the disconnectVolume.sh script"); + } + + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (resizeScript == null) { + throw new Error("Unable to find the resizeVolume.sh script"); + } + + copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); + if (copyScript == null) { + throw new Error("Unable to find the copyVolume.sh script"); + } + + if (cleanupEnabled) { + cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); + if (cleanupScript == null) { + throw new Error("Unable to find the cleanStaleMaps.sh script and " + CLEANUP_ENABLED.getName() + " is true"); + } + + TimerTask task = new TimerTask() { + @Override + public void run() { + try { + MultipathSCSIAdapterBase.cleanupStaleMaps(); + } catch (Throwable e) { + LOGGER.warn("Error running stale multipath map cleanup", e); + } + } + }; + + cleanupTimer = new Timer("MultipathMapCleanupJob"); + cleanupTimer.scheduleAtFixedRate(task, 0, cleanupFrequency); + } + } + + @Override + public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { + return getStoragePool(uuid); + } + + public abstract String getName(); + + public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); + + /** + * We expect WWN values in the volumePath so need to convert it to an actual physical path + */ + public abstract AddressInfo parseAndValidatePath(String path); + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(volumePath,pool) called with args (%s,%s)", volumePath, pool)); + + if (StringUtils.isEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to get physical disk, volume path or pool not specified"); + return null; + } + + AddressInfo address = parseAndValidatePath(volumePath); + return getPhysicalDisk(address, pool); + } + + private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool) { + LOGGER.debug(String.format("getPhysicalDisk(addressInfo,pool) called with args (%s,%s)", address.getPath(), pool)); + KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + long diskSize = getPhysicalDiskSize(address.getPath()); + disk.setSize(diskSize); + disk.setVirtualSize(diskSize); + LOGGER.debug("Physical disk " + disk.getPath() + " with format " + disk.getFormat() + " and size " + disk.getSize() + " provided"); + return disk; + } + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map details) { + LOGGER.info(String.format("createStoragePool(uuid,host,port,path,type) called with args (%s, %s, %s, %s, %s)", uuid, host, ""+port, path, type)); + MultipathSCSIPool storagePool = new MultipathSCSIPool(uuid, host, port, path, type, details, this); + MapStorageUuidToStoragePool.put(uuid, storagePool); + return storagePool; + } + + @Override + public boolean deleteStoragePool(String uuid) { + return MapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { + LOGGER.info("connectPhysicalDisk called for [" + volumePath + "]"); + + if (StringUtils.isEmpty(volumePath)) { + LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined"); + } + + if (pool == null) { + LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set"); + } + + AddressInfo address = this.parseAndValidatePath(volumePath); + int waitTimeInSec = diskWaitTimeSecs; + if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { + String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); + if (StringUtils.isNotEmpty(waitTime)) { + waitTimeInSec = Integer.valueOf(waitTime).intValue(); + } + } + return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec); + } + + @Override + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); + AddressInfo address = this.parseAndValidatePath(volumePath); + ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true; + } + + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); + return false; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); + ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); + if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + } + + @Override + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { + LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); + return true; + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + LOGGER.info(String.format("createTemplateFromDisk(disk,name,format,size,destPool) called with args (%s, %s, %s, %s, %s) [not implemented]", disk.getPath(), name, format.toString(), ""+size, destPool.getUuid())); + return null; + } + + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + LOGGER.info(String.format("listPhysicalDisks(uuid,pool) called with args (%s, %s) [not implemented]", storagePoolUuid, pool.getUuid())); + return null; + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null, null); + } + + @Override + public boolean refresh(KVMStoragePool pool) { + LOGGER.info(String.format("refresh(pool) called with args (%s)", pool.getUuid())); + return true; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + LOGGER.info(String.format("deleteStroagePool(pool) called with args (%s)", pool.getUuid())); + return deleteStoragePool(pool.getUuid()); + } + + @Override + public boolean createFolder(String uuid, String path) { + LOGGER.info(String.format("createFolder(uuid,path) called with args (%s, %s) [not implemented]", uuid, path)); + return createFolder(uuid, path, null); + } + + @Override + public boolean createFolder(String uuid, String path, String localPath) { + LOGGER.info(String.format("createFolder(uuid,path,localPath) called with args (%s, %s, %s) [not implemented]", uuid, path, localPath)); + return true; + } + + /** + * Validate inputs and return the source file for a template copy + * @param templateFilePath + * @param destTemplatePath + * @param destPool + * @param format + * @return + */ + File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { + LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); + } + + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + + File sourceFile = new File(templateFilePath); + if (!sourceFile.exists()) { + throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); + } + + if (destTemplatePath == null || destTemplatePath.isEmpty()) { + LOGGER.error("Failed to create template, target template disk path not provided"); + throw new CloudRuntimeException("Target template disk path not provided"); + } + + if (this.isStoragePoolTypeSupported(destPool.getType())) { + throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); + } + + if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { + LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + throw new CloudRuntimeException("Unsupported template format: " + format.toString()); + } + return sourceFile; + } + + String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { + String srcTemplateFilePath = templateFilePath; + if (isTemplateExtractable(templateFilePath)) { + srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); + LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); + Script.runSimpleBashScript(extractCommand); + Script.runSimpleBashScript("rm -f " + templateFilePath); + } + return srcTemplateFilePath; + } + + QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { + if (format == Storage.ImageFormat.RAW) { + return QemuImg.PhysicalDiskFormat.RAW; + } else if (format == Storage.ImageFormat.QCOW2) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } else { + return QemuImg.PhysicalDiskFormat.RAW; + } + } + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { + File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, + byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + + validateForDiskCopy(disk, name, destPool); + LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + } + + if (srcPassphrase != null || dstPassphrase != null) { + throw new CloudRuntimeException("Storage provider does not support user-space encrypted source or destination volumes"); + } + + destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + destDisk.setVirtualSize(disk.getVirtualSize()); + destDisk.setSize(disk.getSize()); + + LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); + QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); + int rc = result.getExitCode(); + if (rc != 0) { + throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); + } + LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); + + return destDisk; + } + + void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } + } + + /** + * Copy a disk path to another disk path using QemuImg command + * @param disk + * @param destDisk + * @param name + * @param timeout + */ + void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { + QemuImg qemu; + try { + qemu = new QemuImg(timeout); + } catch (LibvirtException | QemuImgException e) { + throw new CloudRuntimeException (e); + } + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + + try { + srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + qemu.convert(srcFile, destFile, true); + LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); + } catch (QemuImgException | LibvirtException e) { + try { + Map srcInfo = qemu.info(srcFile); + LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); + } catch (Exception ignored) { + LOGGER.warn("Unable to get info from source disk: " + disk.getName()); + } + + String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplate'"); + } + + @Override + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, + String name, PhysicalDiskFormat format, long size, + KVMStoragePool destPool, int timeout, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplateBacking'"); + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + throw new UnsupportedOperationException("Unimplemented method 'createPhysicalDisk'"); + } + + boolean isTemplateExtractable(String templatePath) { + ScriptResult result = runScript("file", 5000L, templatePath, "| awk -F' ' '{print $2}'"); + String type = result.getResult(); + return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip"); + } + + String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) { + if (downloadedTemplateFile.endsWith(".zip")) { + return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".bz2")) { + return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".gz")) { + return "gunzip -c " + downloadedTemplateFile + " > " + templateFile; + } else { + throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile); + } + } + + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; + } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; + } + + boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { + LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run + long maxTries = 10; // how many max retries to attempt the script + long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait + int timeBetweenTries = 1000; // how long to sleep between tries + // wait at least 60 seconds even if input was lower + if (waitTimeInSec < 60) { + waitTimeInSec = 60; + } + KVMPhysicalDisk physicalDisk = null; + + // Rescan before checking for the physical disk + int tries = 0; + while (waitTimeInMillis > 0 && tries < maxTries) { + tries++; + long start = System.currentTimeMillis(); + String lun; + if (address.getConnectionId() == null) { + lun = "-"; + } else { + lun = address.getConnectionId(); + } + + Process p = null; + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + p = builder.start(); + if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) { + int rc = p.exitValue(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + + physicalDisk = getPhysicalDisk(address, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return true; + } + + break; + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } else { + LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries); + } + } catch (IOException | InterruptedException | IllegalThreadStateException e) { + LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e); + } finally { + if (p != null && p.isAlive()) { + p.destroyForcibly(); + } + } + + long elapsed = System.currentTimeMillis() - start; + waitTimeInMillis = waitTimeInMillis - elapsed; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + + LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); + return false; + } + + void runConnectScript(String lun, AddressInfo address) { + try { + ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); + Process p = builder.start(); + int rc = p.waitFor(); + StringBuffer output = new StringBuffer(); + if (rc == 0) { + BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); + String line = null; + while ((line = input.readLine()) != null) { + output.append(line); + output.append(" "); + } + } else { + LOGGER.warn("Failure discovering LUN via " + connectScript); + BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = error.readLine()) != null) { + LOGGER.warn("error --> " + line); + } + } + } catch (IOException | InterruptedException e) { + throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); + } + } + + void sleep(long sleepTimeMs) { + try { + Thread.sleep(sleepTimeMs); + } catch (Exception ex) { + // don't do anything + } + } + + long getPhysicalDiskSize(String diskPath) { + if (StringUtils.isEmpty(diskPath)) { + return 0; + } + + Script diskCmd = new Script("blockdev", LOGGER); + diskCmd.add("--getsize64", diskPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + Long size = Long.parseLong(parser.getLine()); + + if (size <= 0) { + // its possible the path can't be seen on the host yet, lets rescan + // now rerun the command + parser = new OutputInterpreter.OneLineParser(); + result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.debug("Unable to get the disk size at path: " + diskPath); + return 0; + } + + size = Long.parseLong(parser.getLine()); + } + + return size; + } + + public void resize(String path, String vmName, long newSize) { + if (LOGGER.isDebugEnabled()) LOGGER.debug("Executing resize of " + path + " to " + newSize + " bytes for VM " + vmName); + + // extract wwid + AddressInfo address = parseAndValidatePath(path); + if (address == null || address.getAddress() == null) { + LOGGER.error("Unable to resize volume, address value is not valid"); + throw new CloudRuntimeException("Unable to resize volume, address value is not valid"); + } + + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("Running %s %s %s %s", resizeScript, address.getAddress(), vmName, newSize)); + + // call resizeVolume.sh + ScriptResult result = runScript(resizeScript, 60000L, address.getAddress(), vmName, ""+newSize); + + if (result.getExitCode() != 0) { + throw new CloudRuntimeException("Failed to resize volume at address " + address.getAddress() + " to " + newSize + " bytes for VM " + vmName + ": " + result.getResult()); + } + + LOGGER.info("Resize of volume at address " + address.getAddress() + " completed successfully: " + result.getResult()); + } + + static void cleanupStaleMaps() { + synchronized(CLEANUP_LOCK) { + long start = System.currentTimeMillis(); + ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000); + LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null); + } + } + + public static final class AddressInfo { + String type; + String address; + String connectionId; + String path; + + public AddressInfo(String type, String address, String connectionId, String path) { + this.type = type; + this.address = address; + this.connectionId = connectionId; + this.path = path; + } + + public String getType() { + return type; + } + + public String getAddress() { + return address; + } + + public String getConnectionId() { + return connectionId; + } + + public String getPath() { + return path; + } + + public String toString() { + return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + } + } + + public static class Property { + private String name; + private T defaultValue; + + Property(String name, T value) { + this.name = name; + this.defaultValue = value; + } + + public String getName() { + return this.name; + } + + public T getDefaultValue() { + return this.defaultValue; + } + + public T getFinalValue() { + File agentPropertiesFile = PropertiesUtil.findConfigFile("agent.properties"); + if (agentPropertiesFile == null) { + LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", "agent.properties", name, defaultValue)); + return defaultValue; + } else { + try { + String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name); + if (StringUtils.isBlank(configValue)) { + LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); + return defaultValue; + } else { + if (defaultValue instanceof Integer) { + return (T)Integer.getInteger(configValue); + } else if (defaultValue instanceof Long) { + return (T)Long.getLong(configValue); + } else if (defaultValue instanceof String) { + return (T)configValue; + } else if (defaultValue instanceof Boolean) { + return (T)Boolean.valueOf(configValue); + } else { + return null; + } + } + } catch (IOException var5) { + LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), var5); + return defaultValue; + } + } + } + } + + public static class ScriptResult { + private int exitCode = -1; + private String result = null; + public int getExitCode() { + return exitCode; + } + public void setExitCode(int exitCode) { + this.exitCode = exitCode; + } + public String getResult() { + return result; + } + public void setResult(String result) { + this.result = result; + } + } + +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java new file mode 100644 index 00000000000..bc2f072f719 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIPool.java @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.joda.time.Duration; + +import com.cloud.agent.api.to.HostTO; +import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ProvisioningType; + +public class MultipathSCSIPool implements KVMStoragePool { + private String uuid; + private String sourceHost; + private int sourcePort; + private String sourceDir; + private Storage.StoragePoolType storagePoolType; + private StorageAdaptor storageAdaptor; + private long capacity; + private long used; + private long available; + private Map details; + + public MultipathSCSIPool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map poolDetails, StorageAdaptor adaptor) { + this.uuid = uuid; + sourceHost = host; + sourcePort = port; + sourceDir = path; + storagePoolType = poolType; + storageAdaptor = adaptor; + capacity = 0; + used = 0; + available = 0; + details = poolDetails; + } + + public MultipathSCSIPool(String uuid, StorageAdaptor adapter) { + this.uuid = uuid; + sourceHost = null; + sourcePort = -1; + sourceDir = null; + storagePoolType = Storage.StoragePoolType.FiberChannel; + details = new HashMap(); + this.storageAdaptor = adapter; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, ProvisioningType arg1, long arg2, byte[] arg3) { + return null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String arg0, PhysicalDiskFormat arg1, ProvisioningType arg2, long arg3, + byte[] arg4) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, Map details) { + return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId) { + return storageAdaptor.getPhysicalDisk(volumeId, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) { + return true; + } + + @Override + public List listPhysicalDisks() { + return null; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + @Override + public long getCapacity() { + return this.capacity; + } + + public void setUsed(long used) { + this.used = used; + } + + @Override + public long getUsed() { + return this.used; + } + + public void setAvailable(long available) { + this.available = available; + } + + @Override + public long getAvailable() { + return this.available; + } + + @Override + public boolean refresh() { + return false; + } + + @Override + public boolean isExternalSnapshot() { + return true; + } + + @Override + public String getLocalPath() { + return null; + } + + @Override + public String getSourceHost() { + return this.sourceHost; + } + + @Override + public String getSourceDir() { + return this.sourceDir; + } + + @Override + public int getSourcePort() { + return this.sourcePort; + } + + @Override + public String getAuthUserName() { + return null; + } + + @Override + public String getAuthSecret() { + return null; + } + + @Override + public Storage.StoragePoolType getType() { + return storagePoolType; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public QemuImg.PhysicalDiskFormat getDefaultFormat() { + return QemuImg.PhysicalDiskFormat.RAW; + } + + @Override + public boolean createFolder(String path) { + return false; + } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } + + @Override + public Map getDetails() { + return this.details; + } + + @Override + public boolean isPoolSupportHA() { + return false; + } + + @Override + public String getHearthBeatPath() { + return null; + } + + @Override + public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, + boolean hostValidation) { + return null; + } + + @Override + public String getStorageNodeId() { + return null; + } + + @Override + public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { + return null; + } + + @Override + public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, + String volumeUUIDListString, String vmActivityCheckPath, long duration) { + return null; + } + + public void resize(String path, String vmName, long newSize) { + ((MultipathSCSIAdapterBase)storageAdaptor).resize(path, vmName, newSize); + } +} diff --git a/plugins/pom.xml b/plugins/pom.xml index 6c4d561f896..2edbbd5ee1d 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -133,6 +133,9 @@ storage/volume/scaleio storage/volume/linstor storage/volume/storpool + storage/volume/adaptive + storage/volume/flasharray + storage/volume/primera storage/object/minio storage/object/simulator diff --git a/plugins/storage/volume/adaptive/README.md b/plugins/storage/volume/adaptive/README.md new file mode 100644 index 00000000000..041f1f1a128 --- /dev/null +++ b/plugins/storage/volume/adaptive/README.md @@ -0,0 +1,58 @@ +# CloudStack Volume Provider Adaptive Plugin Base + +The Adaptive Plugin Base is an abstract volume storage provider that +provides a generic implementation for managing volumes that are exposed +to hosts through FiberChannel and similar methods but managed independently +through a storage API or interface. The ProviderAdapter, and associated +classes, provide a decoupled interface from the rest of +Cloudstack that covers the exact actions needed +to interface with a storage provider. Each storage provider can extend +and implement the ProviderAdapter without needing to understand the internal +logic of volume management, database structure, etc. + +## Implement the Provider Interface +To implement a provider, create another module -- or a standalone project -- +and implement the following interfaces from the **org.apache.cloudstack.storage.datastore.adapter** package: + +1. **ProviderAdapter** - this is the primary interface used to communicate with the storage provider when volume management actions are required. +2. **ProviderAdapterFactory** - the implementation of this class creates the correct ProviderAdapter when needed. + +Follow Javadoc for each class on further instructions for implementing each function. + +## Implement the Primary Datastore Provider Plugin +Once the provider interface is implemented, you will need to extend the **org.apache.cloudstack.storage.datastore.provider.AdaptiveProviderDatastoreProviderImpl** class. When extending it, you simply need to implement a default +constructor that creates an instance of the ProviderAdapterFactory implementation created in #2 above. Once created, you need to call the parent constructor and pass the factory object. + +## Provide the Configuration for the Provider Plugin +Lastly, you need to include a module file and Spring configuration for your Primary Datastore Provider Plugin class so Cloudstack will load it during startup. + +### Module Properties +This provides the hint to Cloudstack to load this as a module during startup. +``` +#resources/META-INF/cloudstack/storage-volume-/module.properties +name=storage-volume- +parent=storage +``` +### Spring Bean Context Configuration +This provides instructions of which provider implementation class to load when the Spring bean initilization is running. +``` + + + + + + +``` +## Build and Deploy the Jar +Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading +all configured modules. diff --git a/plugins/storage/volume/adaptive/pom.xml b/plugins/storage/volume/adaptive/pom.xml new file mode 100644 index 00000000000..a8ef6337a0c --- /dev/null +++ b/plugins/storage/volume/adaptive/pom.xml @@ -0,0 +1,62 @@ + + + 4.0.0 + cloud-plugin-storage-volume-adaptive + Apache CloudStack Plugin - Storage Volume Adaptive Base Provider + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + org.apache.cloudstack + cloud-engine-storage-snapshot + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-default + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java new file mode 100644 index 00000000000..0cd44cd04c2 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Map; + +/** + * A simple DataStore adaptive interface. This interface allows the ManagedVolumeDataStoreDriverImpl + * to interact with the external provider without the provider needing to interface with any CloudStack + * objects, factories or database tables, simplifying the implementation and maintenance of the provider + * interface. + */ +public interface ProviderAdapter { + // some common keys across providers. Provider code determines what to do with it + public static final String API_USERNAME_KEY = "api_username"; + public static final String API_PASSWORD_KEY = "api_password"; + public static final String API_TOKEN_KEY = "api_token"; + public static final String API_PRIVATE_KEY = "api_privatekey"; + public static final String API_URL_KEY = "api_url"; + public static final String API_SKIP_TLS_VALIDATION_KEY = "api_skiptlsvalidation"; + // one of: basicauth (default), apitoken, privatekey + public static final String API_AUTHENTICATION_TYPE_KEY = "api_authn_type"; + + /** + * Refresh the connector with the provided details + * @param details + */ + public void refresh(Map details); + + /** + * Return if currently connected/configured properly, otherwise throws a RuntimeException + * with information about what is misconfigured + * @return + */ + public void validate(); + + /** + * Forcefully remove/disconnect + */ + public void disconnect(); + + /** + * Create a new volume on the storage provider + * @param context + * @param volume + * @param diskOffering + * @param sizeInBytes + * @return + */ + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject volume, ProviderAdapterDiskOffering diskOffering, long sizeInBytes); + + /** + * Attach the volume to the target object for the provided context. Returns the scope-specific connection value (for example, the LUN) + * @param context + * @param request + * @return + */ + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Detach the host from the storage context + * @param context + * @param request + */ + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Delete the provided volume/object + * @param context + * @param request + */ + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Copy a source object to a destination volume. The source object can be a Volume, Snapshot, or Template + */ + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume); + + /** + * Make a device-specific snapshot of the provided volume + */ + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetSnapshot); + + /** + * Revert the snapshot to its base volume. Replaces the base volume with the snapshot point on the storage array + * @param context + * @param request + * @return + */ + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Resize a volume + * @param context + * @param request + * @param totalNewSizeInBytes + */ + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes); + + /** + * Return the managed volume info from storage system. + * @param context + * @param request + * @return ProviderVolume object or null if the object was not found but no errors were encountered. + */ + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Return the managed snapshot info from storage system + * @param context + * @param request + * @return ProviderSnapshot object or null if the object was not found but no errors were encountered. + */ + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Given an array-specific address, find the matching volume information from the array + * @param addressType + * @param address + * @return + */ + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, ProviderVolume.AddressType addressType, String address); + + /** + * Returns stats about the managed storage where the volumes and snapshots are created/managed + * @return + */ + public ProviderVolumeStorageStats getManagedStorageStats(); + + /** + * Returns stats about a specific volume + * @return + */ + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request); + + /** + * Returns true if the given hostname is accessible to the storage provider. + * @param context + * @param request + * @return + */ + public boolean canAccessHost(ProviderAdapterContext context, String hostname); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java new file mode 100644 index 00000000000..e5e9f77d15b --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterConstants.java @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderAdapterConstants { + public static final String EXTERNAL_UUID = "external_uuid"; + public static final String EXTERNAL_NAME = "external_name"; +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java new file mode 100644 index 00000000000..c726fd6ca63 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterContext.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderAdapterContext { + private String domainUuid; + private String domainName; + private Long domainId; + private String zoneUuid; + private String zoneName; + private Long zoneId; + private String accountUuid; + private String accountName; + private Long accountId; + public String getDomainUuid() { + return domainUuid; + } + public void setDomainUuid(String domainUuid) { + this.domainUuid = domainUuid; + } + public String getDomainName() { + return domainName; + } + public void setDomainName(String domainName) { + this.domainName = domainName; + } + public Long getDomainId() { + return domainId; + } + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + public String getZoneUuid() { + return zoneUuid; + } + public void setZoneUuid(String zoneUuid) { + this.zoneUuid = zoneUuid; + } + public String getZoneName() { + return zoneName; + } + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + public Long getZoneId() { + return zoneId; + } + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + public String getAccountUuid() { + return accountUuid; + } + public void setAccountUuid(String accountUuid) { + this.accountUuid = accountUuid; + } + public String getAccountName() { + return accountName; + } + public void setAccountName(String accountName) { + this.accountName = accountName; + } + public Long getAccountId() { + return accountId; + } + public void setAccountId(Long accountId) { + this.accountId = accountId; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java new file mode 100644 index 00000000000..16e0170cc60 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDataObject.java @@ -0,0 +1,159 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +/** + * Represents a translation object for transmitting meta-data about a volume, + * snapshot or template between cloudstack and the storage provider + */ +public class ProviderAdapterDataObject { + public enum Type { + VOLUME(), + SNAPSHOT(), + TEMPLATE(), + ARCHIVE() + } + /** + * The cloudstack UUID of the object + */ + private String uuid; + /** + * The cloudstack name of the object (generated or user provided) + */ + private String name; + /** + * The type of the object + */ + private Type type; + /** + * The internal local ID of the object (not globally unique) + */ + private Long id; + /** + * The external name assigned on the storage array. it may be dynamiically + * generated or derived from cloudstack data + */ + private String externalName; + + /** + * The external UUID of the object on the storage array. This may be different + * or the same as the cloudstack UUID depending on implementation. + */ + private String externalUuid; + + /** + * The internal (non-global) ID of the datastore this object is defined in + */ + private Long dataStoreId; + + /** + * The global ID of the datastore this object is defined in + */ + private String dataStoreUuid; + + /** + * The name of the data store this object is defined in + */ + private String dataStoreName; + + /** + * Represents the device connection id, typically a LUN, used to find the volume in conjunction with Address and AddressType. + */ + private String externalConnectionId; + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Type getType() { + return type; + } + + public void setType(Type type) { + this.type = type; + } + + public String getExternalName() { + return externalName; + } + + public void setExternalName(String externalName) { + this.externalName = externalName; + } + + public String getExternalUuid() { + return externalUuid; + } + + public void setExternalUuid(String externalUuid) { + this.externalUuid = externalUuid; + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Long getDataStoreId() { + return dataStoreId; + } + + public void setDataStoreId(Long dataStoreId) { + this.dataStoreId = dataStoreId; + } + + public String getDataStoreUuid() { + return dataStoreUuid; + } + + public void setDataStoreUuid(String dataStoreUuid) { + this.dataStoreUuid = dataStoreUuid; + } + + public String getDataStoreName() { + return dataStoreName; + } + + public void setDataStoreName(String dataStoreName) { + this.dataStoreName = dataStoreName; + } + + public String getExternalConnectionId() { + return externalConnectionId; + } + + public void setExternalConnectionId(String externalConnectionId) { + this.externalConnectionId = externalConnectionId; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java new file mode 100644 index 00000000000..1db5efbb8ec --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterDiskOffering.java @@ -0,0 +1,194 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Date; +import org.apache.commons.lang.NotImplementedException; +import com.cloud.offering.DiskOffering; + +/** + * Wrapper Disk Offering that masks the cloudstack-dependent classes from the storage provider code + */ +public class ProviderAdapterDiskOffering { + private ProvisioningType type; + private DiskCacheMode diskCacheMode; + private DiskOffering hiddenDiskOffering; + private State state; + public ProviderAdapterDiskOffering(DiskOffering hiddenDiskOffering) { + this.hiddenDiskOffering = hiddenDiskOffering; + if (hiddenDiskOffering.getProvisioningType() != null) { + this.type = ProvisioningType.getProvisioningType(hiddenDiskOffering.getProvisioningType().toString()); + } + if (hiddenDiskOffering.getCacheMode() != null) { + this.diskCacheMode = DiskCacheMode.getDiskCasehMode(hiddenDiskOffering.getCacheMode().toString()); + } + if (hiddenDiskOffering.getState() != null) { + this.state = State.valueOf(hiddenDiskOffering.getState().toString()); + } + } + public Long getBytesReadRate() { + return hiddenDiskOffering.getBytesReadRate(); + } + public Long getBytesReadRateMax() { + return hiddenDiskOffering.getBytesReadRateMax(); + } + public Long getBytesReadRateMaxLength() { + return hiddenDiskOffering.getBytesReadRateMaxLength(); + } + public Long getBytesWriteRate() { + return hiddenDiskOffering.getBytesWriteRate(); + } + public Long getBytesWriteRateMax() { + return hiddenDiskOffering.getBytesWriteRateMax(); + } + public Long getBytesWriteRateMaxLength() { + return hiddenDiskOffering.getBytesWriteRateMaxLength(); + } + public DiskCacheMode getCacheMode() { + return diskCacheMode; + } + public Date getCreated() { + return hiddenDiskOffering.getCreated(); + } + public long getDiskSize() { + return hiddenDiskOffering.getDiskSize(); + } + public boolean getDiskSizeStrictness() { + return hiddenDiskOffering.getDiskSizeStrictness(); + } + public String getDisplayText() { + return hiddenDiskOffering.getDisplayText(); + } + public boolean getEncrypt() { + return hiddenDiskOffering.getEncrypt(); + } + public Integer getHypervisorSnapshotReserve() { + return hiddenDiskOffering.getHypervisorSnapshotReserve(); + } + public long getId() { + return hiddenDiskOffering.getId(); + } + public Long getIopsReadRate() { + return hiddenDiskOffering.getIopsReadRate(); + } + public Long getIopsReadRateMax() { + return hiddenDiskOffering.getIopsReadRateMax(); + } + public Long getIopsReadRateMaxLength() { + return hiddenDiskOffering.getIopsReadRateMaxLength(); + } + public Long getIopsWriteRate() { + return hiddenDiskOffering.getIopsWriteRate(); + } + public Long getIopsWriteRateMax() { + return hiddenDiskOffering.getIopsWriteRateMax(); + } + public Long getIopsWriteRateMaxLength() { + return hiddenDiskOffering.getIopsWriteRateMaxLength(); + } + public Long getMaxIops() { + return hiddenDiskOffering.getMaxIops(); + } + public Long getMinIops() { + return hiddenDiskOffering.getMinIops(); + } + public String getName() { + return hiddenDiskOffering.getName(); + } + public State getState() { + return state; + } + public String getTags() { + return hiddenDiskOffering.getTags(); + } + public String[] getTagsArray() { + return hiddenDiskOffering.getTagsArray(); + } + public String getUniqueName() { + return hiddenDiskOffering.getUniqueName(); + } + public String getUuid() { + return hiddenDiskOffering.getUuid(); + } + public ProvisioningType getType() { + return type; + } + public void setType(ProvisioningType type) { + this.type = type; + } + + public static enum ProvisioningType { + THIN("thin"), + SPARSE("sparse"), + FAT("fat"); + + private final String provisionType; + + private ProvisioningType(String provisionType){ + this.provisionType = provisionType; + } + + public String toString(){ + return this.provisionType; + } + + public static ProvisioningType getProvisioningType(String provisioningType){ + + if(provisioningType.equals(THIN.provisionType)){ + return ProvisioningType.THIN; + } else if(provisioningType.equals(SPARSE.provisionType)){ + return ProvisioningType.SPARSE; + } else if (provisioningType.equals(FAT.provisionType)){ + return ProvisioningType.FAT; + } else { + throw new NotImplementedException("Invalid provisioning type specified: " + provisioningType); + } + } + } + + + enum State { + Inactive, Active, + } + + enum DiskCacheMode { + NONE("none"), WRITEBACK("writeback"), WRITETHROUGH("writethrough"); + + private final String _diskCacheMode; + + DiskCacheMode(String cacheMode) { + _diskCacheMode = cacheMode; + } + + @Override + public String toString() { + return _diskCacheMode; + } + + public static DiskCacheMode getDiskCasehMode(String cacheMode) { + if (cacheMode.equals(NONE._diskCacheMode)) { + return NONE; + } else if (cacheMode.equals(WRITEBACK._diskCacheMode)) { + return WRITEBACK; + } else if (cacheMode.equals(WRITETHROUGH._diskCacheMode)) { + return WRITETHROUGH; + } else { + throw new NotImplementedException("Invalid cache mode specified: " + cacheMode); + } + } + }; +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java new file mode 100644 index 00000000000..13a843d4763 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +import java.util.Map; + +public interface ProviderAdapterFactory { + public String getProviderName(); + public ProviderAdapter create(String url, Map details); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java new file mode 100644 index 00000000000..50262ae6f2b --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderSnapshot.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public interface ProviderSnapshot extends ProviderVolume { + /** + * Returns true if the provider supports directly attaching the snapshot. + * If false is returned, it indicates that cloudstack needs to perform + * a temporary volume copy prior to copying the snapshot to a new + * volume on another provider + * @return + */ + public Boolean canAttachDirectly(); +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java new file mode 100644 index 00000000000..25577903e3d --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolume.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public interface ProviderVolume { + + public Boolean isDestroyed(); + public String getId(); + public void setId(String id); + public String getName(); + public void setName(String name); + public Integer getPriority(); + public void setPriority(Integer priority); + public String getState(); + public AddressType getAddressType(); + public void setAddressType(AddressType addressType); + public String getAddress(); + public Long getAllocatedSizeInBytes(); + public Long getUsedBytes(); + public String getExternalUuid(); + public String getExternalName(); + public String getExternalConnectionId(); + public enum AddressType { + FIBERWWN + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java new file mode 100644 index 00000000000..5a72871e9c0 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeNamer { + + private static final String SNAPSHOT_PREFIX = "snap"; + private static final String VOLUME_PREFIX = "vol"; + private static final String TEMPLATE_PREFIX = "tpl"; + /** Simple method to allow sharing storage setup, primarily in lab/testing environment */ + private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier"); + + public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) { + ProviderAdapterDataObject.Type objType = obj.getType(); + String prefix = null; + if (objType == ProviderAdapterDataObject.Type.SNAPSHOT) { + prefix = SNAPSHOT_PREFIX; + } else if (objType == ProviderAdapterDataObject.Type.VOLUME) { + prefix = VOLUME_PREFIX; + } else if (objType == ProviderAdapterDataObject.Type.TEMPLATE) { + prefix = TEMPLATE_PREFIX; + } else { + throw new RuntimeException("Unknown ManagedDataObject type provided: " + obj.getType()); + } + + if (ENV_PREFIX != null) { + prefix = ENV_PREFIX + "-" + prefix; + } + + return prefix + "-" + obj.getDataStoreId() + "-" + context.getDomainId() + "-" + context.getAccountId() + "-" + obj.getId(); + } + + + public static String generateObjectComment(ProviderAdapterContext context, ProviderAdapterDataObject obj) { + return "CSInfo [Account=" + context.getAccountName() + + "; Domain=" + context.getDomainName() + + "; DomainUUID=" + context.getDomainUuid() + + "; Account=" + context.getAccountName() + + "; AccountUUID=" + context.getAccountUuid() + + "; ObjectEndUserName=" + obj.getName() + + "; ObjectUUID=" + obj.getUuid() + "]"; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java new file mode 100644 index 00000000000..33638e1f9ea --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStats.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeStats { + private Long allocatedInBytes; + private Long virtualUsedInBytes; + private Long actualUsedInBytes; + private Long iops; + private Long throughput; + public Long getAllocatedInBytes() { + return allocatedInBytes; + } + public void setAllocatedInBytes(Long allocatedInBytes) { + this.allocatedInBytes = allocatedInBytes; + } + public Long getVirtualUsedInBytes() { + return virtualUsedInBytes; + } + public void setVirtualUsedInBytes(Long virtualUsedInBytes) { + this.virtualUsedInBytes = virtualUsedInBytes; + } + public Long getActualUsedInBytes() { + return actualUsedInBytes; + } + public void setActualUsedInBytes(Long actualUsedInBytes) { + this.actualUsedInBytes = actualUsedInBytes; + } + public Long getIops() { + return iops; + } + public void setIops(Long iops) { + this.iops = iops; + } + public Long getThroughput() { + return throughput; + } + public void setThroughput(Long throughput) { + this.throughput = throughput; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java new file mode 100644 index 00000000000..0624ef2db12 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeStorageStats.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter; + +public class ProviderVolumeStorageStats { + /** + * Total capacity in bytes currently physically used on the storage system within the scope of given API configuration + */ + private long capacityInBytes; + /** + * Virtual amount of bytes allocated for use. Typically what the users of the volume think they have before + * any compression, deduplication, or thin-provisioning semantics are accounted for. + */ + private Long virtualUsedInBytes; + /** + * Actual physical bytes used on the storage system within the scope of the given API configuration + */ + private Long actualUsedInBytes; + /** + * Current IOPS + */ + private Long iops; + /** + * Current raw throughput + */ + private Long throughput; + public Long getVirtualUsedInBytes() { + return virtualUsedInBytes; + } + public void setVirtualUsedInBytes(Long virtualUsedInBytes) { + this.virtualUsedInBytes = virtualUsedInBytes; + } + public Long getActualUsedInBytes() { + return actualUsedInBytes; + } + public void setActualUsedInBytes(Long actualUsedInBytes) { + this.actualUsedInBytes = actualUsedInBytes; + } + public Long getIops() { + return iops; + } + public void setIops(Long iops) { + this.iops = iops; + } + public Long getThroughput() { + return throughput; + } + public void setThroughput(Long throughput) { + this.throughput = throughput; + } + public Long getCapacityInBytes() { + return capacityInBytes; + } + public void setCapacityInBytes(Long capacityInBytes) { + this.capacityInBytes = capacityInBytes; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java new file mode 100644 index 00000000000..d908d48c7da --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -0,0 +1,901 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Map; +import javax.inject.Inject; +import org.apache.log4j.Logger; + +import java.util.HashMap; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.projects.dao.ProjectDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.ImageFormat; + +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; + +public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl { + + static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class); + + private String providerName = null; + + @Inject + AccountManager _accountMgr; + @Inject + DiskOfferingDao _diskOfferingDao; + @Inject + VolumeDao _volumeDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; + @Inject + ProjectDao _projectDao; + @Inject + SnapshotDataStoreDao _snapshotDataStoreDao; + @Inject + SnapshotDetailsDao _snapshotDetailsDao; + @Inject + VolumeDetailsDao _volumeDetailsDao; + @Inject + VMTemplatePoolDao _vmTemplatePoolDao; + @Inject + AccountDao _accountDao; + @Inject + StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject + SnapshotDao _snapshotDao; + @Inject + VMTemplateDao _vmTemplateDao; + @Inject + DataCenterDao _datacenterDao; + @Inject + DomainDao _domainDao; + @Inject + VolumeService _volumeService; + + private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; + + public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + this._adapterFactoryMap = factoryMap; + } + + @Override + public DataTO getTO(DataObject data) { + return null; + } + + @Override + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + + public ProviderAdapter getAPI(StoragePool pool, Map details) { + return _adapterFactoryMap.getAPI(pool.getUuid(), pool.getStorageProviderName(), details); + } + + @Override + public void createAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback callback) { + CreateCmdResult result = null; + try { + s_logger.info("Volume creation starting for data store [" + dataStore.getName() + + "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); + + // quota size of the cloudbyte volume will be increased with the given + // HypervisorSnapshotReserve + Long volumeSizeBytes = dataObject.getSize(); + // cloudstack talks bytes, primera talks MiB + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + ProviderAdapterDiskOffering inDiskOffering = null; + // only get the offering if its a volume type. If its a template type we skip this. + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + // get the disk offering as provider may need to see details of this to + // provision the correct type of volume + VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeVO.getDiskOfferingId()); + if (diskOffering.isUseLocalStorage()) { + throw new CloudRuntimeException( + "Disk offering requires local storage but this storage provider does not suppport local storage. Please contact the cloud adminstrator to have the disk offering configuration updated to avoid this conflict."); + } + inDiskOffering = new ProviderAdapterDiskOffering(diskOffering); + } + + // if its a template and it already exist, just return the info -- may mean a previous attempt to + // copy this template failed after volume creation and its state has not advanced yet. + ProviderVolume volume = null; + if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + volume = api.getVolume(context, dataIn); + if (volume != null) { + s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + } + } + + // create the volume if it didn't already exist + if (volume == null) { + // klunky - if this fails AND this detail property is set, it means upstream may have already created it + // in VolumeService and DataMotionStrategy tries to do it again before copying... + try { + volume = api.create(context, dataIn, inDiskOffering, volumeSizeBytes); + } catch (Exception e) { + VolumeDetailVO csId = _volumeDetailsDao.findDetail(dataObject.getId(), "cloneOfTemplate"); + if (csId != null && csId.getId() > 0) { + volume = api.getVolume(context, dataIn); + } else { + throw e; + } + } + s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + } + + // set these from the discovered or created volume before proceeding + dataIn.setExternalName(volume.getExternalName()); + dataIn.setExternalUuid(volume.getExternalUuid()); + + // add the volume to the host set + String connectionId = api.attach(context, dataIn); + + // update the cloudstack metadata about the volume + persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); + + result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); + result.setSuccess(true); + s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + } catch (Throwable e) { + s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + result = new CreateCmdResult(null, new Answer(null)); + result.setResult(e.toString()); + result.setSuccess(false); + throw new CloudRuntimeException(e.getMessage()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void deleteAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback callback) { + s_logger.debug("Delete volume started"); + CommandResult result = new CommandResult(); + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject inData = newManagedDataObject(dataObject, storagePool); + // skip adapter delete if neither external identifier is set. Probably means the volume + // create failed before this chould be set + if (!(inData.getExternalName() == null && inData.getExternalUuid() == null)) { + api.delete(context, inData); + } + result.setResult("Successfully deleted volume"); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume delete failed with exception", e); + result.setResult(e.toString()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destdata, + AsyncCompletionCallback callback) { + CopyCommandResult result = null; + try { + s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + + if (!canCopy(srcdata, destdata)) { + throw new CloudRuntimeException( + "The data store provider is unable to perform copy operations because the source or destination object is not the correct type of volume"); + } + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(srcdata.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + + ProviderVolume outVolume; + ProviderAdapterContext context = newManagedVolumeContext(destdata); + ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); + ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); + outVolume = api.copy(context, sourceIn, destIn); + + // populate this data - it may be needed later + destIn.setExternalName(outVolume.getExternalName()); + destIn.setExternalConnectionId(outVolume.getExternalConnectionId()); + destIn.setExternalUuid(outVolume.getExternalUuid()); + + // if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size + // we won't, however, shrink a volume if its smaller. + if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) { + s_logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); + api.resize(context, destIn, destdata.getSize()); + } + + String connectionId = api.attach(context, destIn); + + String finalPath; + // format: type=fiberwwn; address=
; connid= + if (connectionId != null) { + finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); + } else { + finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); + } + + persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + + VolumeObjectTO voto = new VolumeObjectTO(); + voto.setPath(finalPath); + + result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto)); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume copy failed with exception", e); + result = new CopyCommandResult(null, null); + result.setSuccess(false); + result.setResult(e.toString()); + } + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, + AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" + + srcData.getDataStore().getId() + " AND destData [" + + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); + try { + if (!isSameProvider(srcData)) { + s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); + return false; + } + + if (!isSameProvider(destData)) { + s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!"); + return false; + } + s_logger.debug( + "canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists"); + StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(srcData.getDataStore().getId()); + ProviderAdapter api = getAPI(poolVO, details); + + /** + * The storage provider generates its own names for snapshots which we store and + * retrieve when needed + */ + ProviderAdapterContext context = newManagedVolumeContext(srcData); + ProviderAdapterDataObject srcDataObject = newManagedDataObject(srcData, poolVO); + if (srcData instanceof SnapshotObject) { + ProviderSnapshot snapshot = api.getSnapshot(context, srcDataObject); + if (snapshot == null) { + return false; + } else { + return true; + } + } else { + ProviderVolume vol = api.getVolume(context, srcDataObject); + if (vol == null) { + return false; + } else { + return true; + } + } + } catch (Throwable e) { + s_logger.warn("Problem checking if we canCopy", e); + return false; + } + } + + @Override + public void resize(DataObject data, AsyncCompletionCallback callback) { + s_logger.debug("Resize volume started"); + CreateCmdResult result = null; + try { + + // Boolean status = false; + VolumeObject vol = (VolumeObject) data; + StoragePool pool = (StoragePool) data.getDataStore(); + + ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload(); + + StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); + + if (!(poolVO.isManaged())) { + super.resize(data, callback); + return; + } + + try { + Map details = _storagePoolDao.getDetails(pool.getId()); + ProviderAdapter api = getAPI(pool, details); + + // doesn't support shrink (maybe can truncate but separate API calls to + // investigate) + if (vol.getSize() > resizeParameter.newSize) { + throw new CloudRuntimeException("Storage provider does not support shrinking an existing volume"); + } + + ProviderAdapterContext context = newManagedVolumeContext(data); + ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO); + if (s_logger.isDebugEnabled()) s_logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); + api.resize(context, dataIn, resizeParameter.newSize); + + if (vol.isAttachedVM()) { + if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) { + if (s_logger.isDebugEnabled()) s_logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); + _volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName()); + } + } + + result = new CreateCmdResult(data.getUuid(), new Answer(null)); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Resize volume failed, please contact cloud support.", e); + result = new CreateCmdResult(null, new Answer(null)); + result.setResult(e.toString()); + result.setSuccess(false); + } + } finally { + if (callback != null) + callback.complete(result); + } + + } + + @Override + public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, + QualityOfServiceState qualityOfServiceState) { + s_logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + + volumeInfo.getPath() + ": " + qualityOfServiceState.toString()); + } + + @Override + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + VolumeInfo volume = (VolumeInfo) dataObject; + long volumeSize = volume.getSize(); + Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); + + if (hypervisorSnapshotReserve != null) { + if (hypervisorSnapshotReserve < 25) { + hypervisorSnapshotReserve = 25; + } + + volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); + } + + return volumeSize; + } + + @Override + public ChapInfo getChapInfo(DataObject dataObject) { + return null; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { + CreateCmdResult result = null; + try { + s_logger.debug("taking volume snapshot"); + SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO(); + + VolumeInfo baseVolume = snapshot.getBaseVolume(); + DataStore ds = baseVolume.getDataStore(); + StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId()); + + Map details = _storagePoolDao.getDetails(ds.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(snapshot); + ProviderAdapterDataObject inVolumeDO = newManagedDataObject(baseVolume, storagePool); + ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool); + ProviderSnapshot outSnapshot = api.snapshot(context, inVolumeDO, inSnapshotDO); + + // add the snapshot to the host group (needed for copying to non-provider storage + // to create templates, etc) + String connectionId = null; + String finalAddress = outSnapshot.getAddress(); + if (outSnapshot.canAttachDirectly()) { + connectionId = api.attach(context, inSnapshotDO); + if (connectionId != null) { + finalAddress = finalAddress + "::" + connectionId; + } + } + + snapshotTO.setPath(finalAddress); + snapshotTO.setName(outSnapshot.getName()); + snapshotTO.setHypervisorType(HypervisorType.KVM); + + // unclear why this is needed vs snapshotTO.setPath, but without it the path on + // the target snapshot object isn't set + // so a volume created from it also is not set and can't be attached to a VM + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + DiskTO.PATH, finalAddress, true); + _snapshotDetailsDao.persist(snapshotDetail); + + // save the name (reuse on revert) + snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_NAME, outSnapshot.getExternalName(), true); + _snapshotDetailsDao.persist(snapshotDetail); + + // save the uuid (reuse on revert) + snapshotDetail = new SnapshotDetailsVO(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_UUID, outSnapshot.getExternalUuid(), true); + _snapshotDetailsDao.persist(snapshotDetail); + + result = new CreateCmdResult(finalAddress, new CreateObjectAnswer(snapshotTO)); + result.setResult("Snapshot completed with new WWN " + finalAddress); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.debug("Failed to take snapshot: " + e.getMessage()); + result = new CreateCmdResult(null, null); + result.setResult(e.toString()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, + AsyncCompletionCallback callback) { + + CommandResult result = new CommandResult(); + ProviderAdapter api = null; + try { + DataStore ds = snapshotOnPrimaryStore.getDataStore(); + StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId()); + Map details = _storagePoolDao.getDetails(ds.getId()); + api = getAPI(storagePool, details); + + String externalName = null; + String externalUuid = null; + List list = _snapshotDetailsDao.findDetails(snapshot.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _snapshotDetailsDao.findDetails(snapshot.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + ProviderAdapterContext context = newManagedVolumeContext(snapshot); + ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool); + inSnapshotDO.setExternalName(externalName); + inSnapshotDO.setExternalUuid(externalUuid); + + // perform promote (async, wait for job to finish) + api.revert(context, inSnapshotDO); + + // set command as success + result.setSuccess(true); + } catch (Throwable e) { + s_logger.warn("revertSnapshot failed", e); + result.setResult(e.toString()); + result.setSuccess(false); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public long getUsedBytes(StoragePool storagePool) { + long usedSpaceBytes = 0; + // Volumes + List volumes = _volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready); + if (volumes != null) { + for (VolumeVO volume : volumes) { + usedSpaceBytes += volume.getSize(); + + long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0 + : volume.getVmSnapshotChainSize(); + usedSpaceBytes += vmSnapshotChainSize; + } + } + + // Snapshots + List snapshots = _snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(), + ObjectInDataStoreStateMachine.State.Ready); + if (snapshots != null) { + for (SnapshotDataStoreVO snapshot : snapshots) { + usedSpaceBytes += snapshot.getSize(); + } + } + + // Templates + List templates = _vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(), + ObjectInDataStoreStateMachine.State.Ready); + if (templates != null) { + for (VMTemplateStoragePoolVO template : templates) { + usedSpaceBytes += template.getTemplateSize(); + } + } + + s_logger.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); + + return usedSpaceBytes; + } + + @Override + public long getUsedIops(StoragePool storagePool) { + return super.getUsedIops(storagePool); + } + + @Override + public Map getCapabilities() { + Map mapCapabilities = new HashMap(); + + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes + mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); + // indicates the datastore can create temporary volumes for use when copying + // data from a snapshot + mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString()); + + return mapCapabilities; + } + + @Override + public boolean canProvideStorageStats() { + return true; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + Map details = _storagePoolDao.getDetails(storagePool.getId()); + String capacityBytesStr = details.get("capacityBytes"); + Long capacityBytes = null; + if (capacityBytesStr == null) { + ProviderAdapter api = getAPI(storagePool, details); + ProviderVolumeStorageStats stats = api.getManagedStorageStats(); + if (stats == null) { + return null; + } + capacityBytes = stats.getCapacityInBytes(); + } else { + capacityBytes = Long.parseLong(capacityBytesStr); + } + Long usedBytes = this.getUsedBytes(storagePool); + return new Pair(capacityBytes, usedBytes); + } + + @Override + public boolean canProvideVolumeStats() { + return true; + } + + public String getProviderName() { + return providerName; + } + + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumePath) { + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + ProviderVolume.AddressType addressType = null; + if (volumePath.indexOf(";") > 1) { + String[] fields = volumePath.split(";"); + if (fields.length > 0) { + for (String field: fields) { + if (field.trim().startsWith("address=")) { + String[] toks = field.split("="); + if (toks.length > 1) { + volumePath = toks[1]; + } + } else if (field.trim().startsWith("type=")) { + String[] toks = field.split("="); + if (toks.length > 1) { + addressType = ProviderVolume.AddressType.valueOf(toks[1]); + } + } + } + } + } else { + addressType = ProviderVolume.AddressType.FIBERWWN; + } + // limited context since this is not at an account level + ProviderAdapterContext context = new ProviderAdapterContext(); + context.setZoneId(storagePool.getDataCenterId()); + ProviderVolume volume = api.getVolumeByAddress(context, addressType, volumePath); + + if (volume == null) { + return null; + } + + ProviderAdapterDataObject object = new ProviderAdapterDataObject(); + object.setExternalUuid(volume.getExternalUuid()); + object.setExternalName(volume.getExternalName()); + object.setType(ProviderAdapterDataObject.Type.VOLUME); + ProviderVolumeStats stats = api.getVolumeStats(context, object); + + Long provisionedSizeInBytes = stats.getActualUsedInBytes(); + Long allocatedSizeInBytes = stats.getAllocatedInBytes(); + if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) { + return null; + } + return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + Map details = _storagePoolDao.getDetails(pool.getId()); + ProviderAdapter api = getAPI(pool, details); + + ProviderAdapterContext context = new ProviderAdapterContext(); + context.setZoneId(host.getDataCenterId()); + return api.canAccessHost(context, host.getName()); + } + + void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map storagePoolDetails, + DataObject dataObject, ProviderVolume volume, String connectionId) { + if (dataObject.getType() == DataObjectType.VOLUME) { + persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + } + } + + void persistVolumeData(StoragePoolVO storagePool, Map details, DataObject dataObject, + ProviderVolume managedVolume, String connectionId) { + VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); + + // if its null check if the storage provider returned one that is already set + if (connectionId == null) { + connectionId = managedVolume.getExternalConnectionId(); + } + + String finalPath; + // format: type=fiberwwn; address=
; connid= + if (connectionId != null) { + finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId); + } else { + finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase()); + } + + volumeVO.setPath(finalPath); + volumeVO.setFormat(ImageFormat.RAW); + volumeVO.setPoolId(storagePool.getId()); + volumeVO.setExternalUuid(managedVolume.getExternalUuid()); + volumeVO.setDisplay(true); + volumeVO.setDisplayVolume(true); + _volumeDao.update(volumeVO.getId(), volumeVO); + + volumeVO = _volumeDao.findById(volumeVO.getId()); + + VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + DiskTO.PATH, finalPath, true); + _volumeDetailsDao.persist(volumeDetailVO); + + volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true); + _volumeDetailsDao.persist(volumeDetailVO); + + volumeDetailVO = new VolumeDetailVO(volumeVO.getId(), + ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true); + _volumeDetailsDao.persist(volumeDetailVO); + } + + void persistTemplateData(StoragePoolVO storagePool, Map details, DataObject dataObject, + ProviderVolume volume, String connectionId) { + TemplateInfo templateInfo = (TemplateInfo) dataObject; + VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), + templateInfo.getId(), null); + // template pool ref doesn't have a details object so we'll save: + // 1. external name ==> installPath + // 2. address ==> local download path + if (connectionId == null) { + templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(), + volume.getAddress().toLowerCase())); + } else { + templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(), + volume.getAddress().toLowerCase(), connectionId)); + } + templatePoolRef.setLocalDownloadPath(volume.getExternalName()); + templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes()); + _vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); + } + + ProviderAdapterContext newManagedVolumeContext(DataObject obj) { + ProviderAdapterContext ctx = new ProviderAdapterContext(); + if (obj instanceof VolumeInfo) { + VolumeVO vol = _volumeDao.findById(obj.getId()); + ctx.setAccountId(vol.getAccountId()); + ctx.setDomainId(vol.getDomainId()); + } else if (obj instanceof SnapshotInfo) { + SnapshotVO snap = _snapshotDao.findById(obj.getId()); + ctx.setAccountId(snap.getAccountId()); + ctx.setDomainId(snap.getDomainId()); + } else if (obj instanceof TemplateInfo) { + VMTemplateVO template = _vmTemplateDao.findById(obj.getId()); + ctx.setAccountId(template.getAccountId()); + // templates don't have a domain ID so always set to 0 + ctx.setDomainId(0L); + } + + if (ctx.getAccountId() != null) { + AccountVO acct = _accountDao.findById(ctx.getAccountId()); + if (acct != null) { + ctx.setAccountUuid(acct.getUuid()); + ctx.setAccountName(acct.getName()); + } + } + + if (ctx.getDomainId() != null) { + DomainVO domain = _domainDao.findById(ctx.getDomainId()); + if (domain != null) { + ctx.setDomainUuid(domain.getUuid()); + ctx.setDomainName(domain.getName()); + } + } + + return ctx; + } + + boolean isSameProvider(DataObject obj) { + StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId()); + if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) { + return true; + } else { + return false; + } + } + + ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) { + ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject(); + if (data instanceof VolumeInfo) { + List list = _volumeDetailsDao.findDetails(data.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + String externalName = null; + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _volumeDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + String externalUuid = null; + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + dataIn.setName(((VolumeInfo) data).getName()); + dataIn.setExternalName(externalName); + dataIn.setExternalUuid(externalUuid); + } else if (data instanceof SnapshotInfo) { + List list = _snapshotDetailsDao.findDetails(data.getId(), + ProviderAdapterConstants.EXTERNAL_NAME); + String externalName = null; + if (list != null && list.size() > 0) { + externalName = list.get(0).getValue(); + } + + list = _snapshotDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID); + String externalUuid = null; + if (list != null && list.size() > 0) { + externalUuid = list.get(0).getValue(); + } + + dataIn = new ProviderAdapterDataObject(); + dataIn.setName(((SnapshotInfo) data).getName()); + dataIn.setExternalName(externalName); + dataIn.setExternalUuid(externalUuid); + } else if (data instanceof TemplateInfo) { + TemplateInfo ti = (TemplateInfo)data; + dataIn.setName(ti.getName()); + VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), ti.getId(), null); + dataIn.setExternalName(templatePoolRef.getLocalDownloadPath()); + } + dataIn.setId(data.getId()); + dataIn.setDataStoreId(data.getDataStore().getId()); + dataIn.setDataStoreUuid(data.getDataStore().getUuid()); + dataIn.setDataStoreName(data.getDataStore().getName()); + dataIn.setUuid(data.getUuid()); + dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString())); + return dataIn; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..56d9a25f34f --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -0,0 +1,407 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.io.UnsupportedEncodingException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.host.Host; + +/** + * Manages the lifecycle of a Managed Data Store in CloudStack + */ +public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { + @Inject + private PrimaryDataStoreDao _storagePoolDao; + private static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreLifeCycleImpl.class); + + @Inject + PrimaryDataStoreHelper _dataStoreHelper; + @Inject + protected ResourceManager _resourceMgr; + @Inject + private StoragePoolAutomation _storagePoolAutomation; + @Inject + private PrimaryDataStoreDao _primaryDataStoreDao; + @Inject + private StorageManager _storageMgr; + @Inject + private ClusterDao _clusterDao; + AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap; + + public AdaptiveDataStoreLifeCycleImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + _adapterFactoryMap = factoryMap; + } + + /** + * Initialize the storage pool + * https://hostname:port?cpg=&snapcpg=&hostset=&disabletlsvalidation=true& + */ + @Override + public DataStore initialize(Map dsInfos) { + // https://hostanme:443/cpgname/hostsetname. hostset should map to the cluster or zone (all nodes in the cluster or zone MUST be in the hostset and be configured outside cloudstack for now) + String url = (String) dsInfos.get("url"); + Long zoneId = (Long) dsInfos.get("zoneId"); + Long podId = (Long)dsInfos.get("podId"); + Long clusterId = (Long)dsInfos.get("clusterId"); + String dsName = (String) dsInfos.get("name"); + String providerName = (String) dsInfos.get("providerName"); + Long capacityBytes = (Long) dsInfos.get("capacityBytes"); + Long capacityIops = (Long)dsInfos.get("capacityIops"); + String tags = (String)dsInfos.get("tags"); + @SuppressWarnings("unchecked") + Map details = (Map) dsInfos.get("details"); + + // validate inputs are valid/provided as required + if (zoneId == null) throw new CloudRuntimeException("Zone Id must be specified."); + + URL uri = null; + try { + uri = new URL(url); + } catch (Exception ignored) { + throw new CloudRuntimeException(url + " is not a valid uri"); + } + + String username = null; + String password = null; + String token = null; + String userInfo = uri.getUserInfo(); + if (userInfo == null || userInfo.split(":").length < 2) { + // check if it was passed in the details object + username = details.get(ProviderAdapter.API_USERNAME_KEY); + if (username != null) { + password = details.get(ProviderAdapter.API_PASSWORD_KEY); + userInfo = username + ":" + password; + } else { + token = details.get(ProviderAdapter.API_TOKEN_KEY); + } + } else { + try { + userInfo = java.net.URLDecoder.decode(userInfo, StandardCharsets.UTF_8.toString()); + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Unexpected error parsing the provided user info; check that it does not include any invalid characters"); + } + + username = userInfo.split(":")[0]; + password = userInfo.split(":")[1]; + } + + s_logger.info("Registering block storage provider with user=" + username); + + + if (clusterId != null) { + Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId); + + if (!hypervisorType.equals(HypervisorType.KVM)) { + throw new CloudRuntimeException("Unsupported hypervisor type for provided cluster: " + hypervisorType.toString()); + } + + // Primary datastore is cluster-wide, check and set the podId and clusterId parameters + if (podId == null) { + throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage."); + } + + s_logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host"); + + } else if (podId != null) { + throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage."); + } + + // validate we don't have any duplication going on + List storagePoolVO = _primaryDataStoreDao.findPoolsByProvider(providerName); + if (CollectionUtils.isNotEmpty(storagePoolVO)) { + for (StoragePoolVO poolVO : storagePoolVO) { + Map poolDetails = _primaryDataStoreDao.getDetails(poolVO.getId()); + String otherPoolUrl = poolDetails.get(ProviderAdapter.API_URL_KEY); + if (dsName.equals(poolVO.getName())) { + throw new InvalidParameterValueException("A pool with the name [" + dsName + "] already exists, choose another name"); + } + + if (uri.toString().equals(otherPoolUrl)) { + throw new IllegalArgumentException("Provider URL [" + otherPoolUrl + "] is already in use by another storage pool named [" + poolVO.getName() + "], please validate you have correct API and CPG"); + } + } + } + + s_logger.info("Validated no other pool exists with this name: " + dsName); + + try { + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + parameters.setHost(uri.getHost()); + parameters.setPort(uri.getPort()); + parameters.setPath(uri.getPath() + "?" + uri.getQuery()); + parameters.setType(StoragePoolType.FiberChannel); + parameters.setZoneId(zoneId); + parameters.setPodId(podId); + parameters.setClusterId(clusterId); + parameters.setName(dsName); + parameters.setProviderName(providerName); + parameters.setManaged(true); + parameters.setCapacityBytes(capacityBytes); + parameters.setUsedBytes(0); + parameters.setCapacityIops(capacityIops); + parameters.setHypervisorType(HypervisorType.KVM); + parameters.setTags(tags); + parameters.setUserInfo(userInfo); + parameters.setUuid(UUID.randomUUID().toString()); + + details.put(ProviderAdapter.API_URL_KEY, uri.toString()); + if (username != null) { + details.put(ProviderAdapter.API_USERNAME_KEY, username); + } + + if (password != null) { + details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.encrypt(password)); + } + + if (token != null) { + details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.encrypt(details.get(ProviderAdapter.API_TOKEN_KEY))); + } + // this appears to control placing the storage pool above network file system based storage pools in priority + details.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), "true"); + // this new capablity indicates the storage pool allows volumes to migrate to/from other pools (i.e. to/from NFS pools) + details.put(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString(), "true"); + parameters.setDetails(details); + + // make sure the storage array is connectable and the pod and hostgroup objects exist + ProviderAdapter api = _adapterFactoryMap.getAPI(parameters.getUuid(), providerName, details); + + // validate the provided details are correct/valid for the provider + api.validate(); + + // if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes + ProviderVolumeStorageStats stats = api.getManagedStorageStats(); + if (capacityBytes != null && capacityBytes != 0) { + if (stats.getCapacityInBytes() > 0) { + if (stats.getCapacityInBytes() < capacityBytes) { + throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes()); + } + } + parameters.setCapacityBytes(capacityBytes); + } + // if we have no user-provided capacity bytes, use the ones provided by storage + else { + if (stats.getCapacityInBytes() <= 0) { + throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified"); + } + parameters.setCapacityBytes(stats.getCapacityInBytes()); + } + + s_logger.info("Persisting [" + dsName + "] storage pool metadata to database"); + return _dataStoreHelper.createPrimaryDataStore(parameters); + } catch (Throwable e) { + s_logger.error("Problem persisting storage pool", e); + throw new CloudRuntimeException(e); + } + } + + /** + * Get the type of Hypervisor from the cluster id + * @param clusterId + * @return + */ + private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) { + ClusterVO cluster = _clusterDao.findById(clusterId); + if (cluster == null) { + throw new CloudRuntimeException("Unable to locate the specified cluster: " + clusterId); + } + + return cluster.getHypervisorType(); + } + + /** + * Attach the pool to a cluster (all hosts in a single cluster) + */ + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + s_logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); + _dataStoreHelper.attachCluster(store); + + StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); + + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + _primaryDataStoreDao.expunge(primarystore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + } + + if (dataStoreVO.isManaged()) { + //boolean success = false; + for (HostVO h : allHosts) { + s_logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + _storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + _primaryDataStoreDao.expunge(primarystore.getId()); + throw new CloudRuntimeException("Failed to access storage pool"); + } + + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + s_logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); + _dataStoreHelper.attachHost(store, scope, existingInfo); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { + s_logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + List poolHosts = new ArrayList(); + for (HostVO host : hosts) { + try { + _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + poolHosts.add(host); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } + } + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + dataStore + " in this zone."); + _primaryDataStoreDao.expunge(dataStore.getId()); + throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); + } + _dataStoreHelper.attachZone(dataStore, hypervisorType); + return true; + } + + /** + * Put the storage pool in maintenance mode + */ + @Override + public boolean maintain(DataStore store) { + s_logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); + if (_storagePoolAutomation.maintain(store)) { + return _dataStoreHelper.maintain(store); + } else { + return false; + } + } + + /** + * Cancel maintenance mode + */ + @Override + public boolean cancelMaintain(DataStore store) { + s_logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); + if (_dataStoreHelper.cancelMaintain(store)) { + return _storagePoolAutomation.cancelMaintain(store); + } else { + return false; + } + } + + /** + * Delete the data store + */ + @Override + public boolean deleteDataStore(DataStore store) { + s_logger.info("Delete datastore called for [" + store.getName() + "]"); + return _dataStoreHelper.deletePrimaryDataStore(store); + } + + /** + * Migrate objects in this store to another store + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + s_logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); + return false; + } + + /** + * Update the storage pool configuration + */ + @Override + public void updateStoragePool(StoragePool storagePool, Map details) { + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details); + } + + /** + * Enable the storage pool (allows volumes from this pool) + */ + @Override + public void enableStoragePool(DataStore store) { + s_logger.info("Enabling storage pool [" + store.getName() + "]"); + _dataStoreHelper.enable(store); + } + + /** + * Disable storage pool (stops new volume provisioning from pool) + */ + @Override + public void disableStoragePool(DataStore store) { + s_logger.info("Disabling storage pool [" + store.getName() + "]"); + _dataStoreHelper.disable(store); + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java new file mode 100644 index 00000000000..ee5caa7178e --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; +import org.apache.log4j.Logger; + +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; + +public class AdaptivePrimaryDatastoreAdapterFactoryMap { + private final Logger logger = Logger.getLogger(ProviderAdapter.class); + private Map factoryMap = new HashMap(); + private Map apiMap = new HashMap(); + + public AdaptivePrimaryDatastoreAdapterFactoryMap() { + + } + + /** + * Given a storage pool return current client. Reconfigure if changes are + * discovered + */ + public final ProviderAdapter getAPI(String uuid, String providerName, Map details) { + ProviderAdapter api = apiMap.get(uuid); + if (api == null) { + synchronized (this) { + api = apiMap.get(uuid); + if (api == null) { + api = createNewAdapter(uuid, providerName, details); + apiMap.put(uuid, api); + logger.debug("Cached the new ProviderAdapter for storage pool " + uuid); + } + } + } + return api; + } + + /** + * Update the API with the given UUID. allows for URL changes and authentication updates + * @param uuid + * @param providerName + * @param details + */ + public final void updateAPI(String uuid, String providerName, Map details) { + // attempt to create (which validates) the new info before updating the cache + ProviderAdapter adapter = createNewAdapter(uuid, providerName, details); + + // if its null its likely because no action has occured yet to trigger the API object to be loaded + if (adapter == null) { + throw new CloudRuntimeException("Adapter configruation failed for an unknown reason"); + } + + ProviderAdapter oldAdapter = apiMap.get(uuid); + apiMap.put(uuid, adapter); + try { + if (oldAdapter != null) oldAdapter.disconnect(); + } catch (Throwable e) { + logger.debug("Failure closing the old ProviderAdapter during an update of the cached data after validation of the new adapter configuration, likely the configuration is no longer valid", e); + } + } + + public void register(ProviderAdapterFactory factory) { + factoryMap.put(factory.getProviderName(), factory); + } + + protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map details) { + String authnType = details.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY); + if (authnType == null) authnType = "basicauth"; + String lookupKey = null; + if (authnType.equals("basicauth")) { + lookupKey = details.get(ProviderAdapter.API_USERNAME_KEY); + if (lookupKey == null) { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_USERNAME_KEY + "] is required when using authentication type [" + authnType + "]"); + } + } else if (authnType.equals("apitoken")) { + lookupKey = details.get(ProviderAdapter.API_TOKEN_KEY); + if (lookupKey == null) { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_TOKEN_KEY + "] is required when using authentication type [" + authnType + "]"); + } + } else { + throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_AUTHENTICATION_TYPE_KEY + "] not set to valid value"); + } + + String url = details.get(ProviderAdapter.API_URL_KEY); + if (url == null) { + throw new RuntimeException("URL required when configuring a Managed Block API storage provider"); + } + + logger.debug("Looking for Provider [" + providerName + "] at [" + url + "]"); + ProviderAdapterFactory factory = factoryMap.get(providerName); + if (factory == null) { + throw new RuntimeException("Unable to find a storage provider API factory for provider: " + providerName); + } + + // decrypt password or token before sending to provider + if (authnType.equals("basicauth")) { + try { + details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_PASSWORD_KEY))); + } catch (Exception e) { + logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_PASSWORD_KEY + "], trying to use as-is"); + } + } else if (authnType.equals("apitoken")) { + try { + details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_TOKEN_KEY))); + } catch (Exception e) { + logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_TOKEN_KEY + "], trying to use as-is"); + } + } + + ProviderAdapter api = factory.create(url, details); + api.validate(); + logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url); + return api; + } +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..200844702b2 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; +import org.apache.cloudstack.storage.datastore.driver.AdaptiveDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.AdaptiveDataStoreLifeCycleImpl; + +@Component +public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { + static final Logger s_logger = Logger.getLogger(AdaptivePrimaryDatastoreProviderImpl.class); + + AdaptiveDataStoreDriverImpl driver; + + HypervisorHostListener listener; + + AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap = new AdaptivePrimaryDatastoreAdapterFactoryMap(); + + DataStoreLifeCycle lifecycle; + + AdaptivePrimaryDatastoreProviderImpl(ProviderAdapterFactory f) { + s_logger.info("Creating " + f.getProviderName()); + factoryMap.register(f); + } + + @Override + public DataStoreLifeCycle getDataStoreLifeCycle() { + return this.lifecycle; + } + + @Override + public boolean configure(Map params) { + s_logger.info("Configuring " + getName()); + driver = new AdaptiveDataStoreDriverImpl(factoryMap); + driver.setProviderName(getName()); + lifecycle = ComponentContext.inject(new AdaptiveDataStoreLifeCycleImpl(factoryMap)); + driver = ComponentContext.inject(driver); + listener = ComponentContext.inject(new AdaptivePrimaryHostListener(factoryMap)); + return true; + } + + @Override + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; + } + +} diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java new file mode 100644 index 00000000000..68dd4a15c62 --- /dev/null +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.log4j.Logger; + +import com.cloud.exception.StorageConflictException; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; + +public class AdaptivePrimaryHostListener implements HypervisorHostListener { + static final Logger s_logger = Logger.getLogger(AdaptivePrimaryHostListener.class); + + @Inject + StoragePoolHostDao storagePoolHostDao; + + public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + s_logger.debug("hostAboutToBeRemoved called"); + return true; + } + + @Override + public boolean hostAdded(long hostId) { + s_logger.debug("hostAdded called"); + return true; + } + + @Override + public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { + s_logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + storagePoolHostDao.persist(storagePoolHost); + } + return true; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + s_logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + + if (storagePoolHost != null) { + storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + } + return true; + } + + @Override + public boolean hostEnabled(long hostId) { + s_logger.debug("hostEnabled called"); + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + s_logger.debug("hostRemoved called"); + return true; + } +} diff --git a/plugins/storage/volume/flasharray/pom.xml b/plugins/storage/volume/flasharray/pom.xml new file mode 100644 index 00000000000..267595b58e9 --- /dev/null +++ b/plugins/storage/volume/flasharray/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + cloud-plugin-storage-volume-flasharray + Apache CloudStack Plugin - Storage Volume - Pure Flash Array + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-storage-volume-adaptive + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java new file mode 100644 index 00000000000..3082a19c732 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -0,0 +1,1086 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; + +import org.apache.http.Header; +import org.apache.http.NameValuePair; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeNamer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume.AddressType; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustAllStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * Array API + */ +public class FlashArrayAdapter implements ProviderAdapter { + private Logger logger = Logger.getLogger(FlashArrayAdapter.class); + + public static final String HOSTGROUP = "hostgroup"; + public static final String STORAGE_POD = "pod"; + public static final String KEY_TTL = "keyttl"; + public static final String CONNECT_TIMEOUT_MS = "connectTimeoutMs"; + public static final String POST_COPY_WAIT_MS = "postCopyWaitMs"; + public static final String API_LOGIN_VERSION = "apiLoginVersion"; + public static final String API_VERSION = "apiVersion"; + + private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); + private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; + private static final long POST_COPY_WAIT_MS_DEFAULT = 5000; + private static final String API_LOGIN_VERSION_DEFAULT = "1.19"; + private static final String API_VERSION_DEFAULT = "2.23"; + + static final ObjectMapper mapper = new ObjectMapper(); + public String pod = null; + public String hostgroup = null; + private String username; + private String password; + private String accessToken; + private String url; + private long keyExpiration = -1; + private long keyTtl = KEY_TTL_DEFAULT; + private long connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + private long postCopyWait = POST_COPY_WAIT_MS_DEFAULT; + private CloseableHttpClient _client = null; + private boolean skipTlsValidation; + private String apiLoginVersion = API_LOGIN_VERSION_DEFAULT; + private String apiVersion = API_VERSION_DEFAULT; + + private Map connectionDetails = null; + + protected FlashArrayAdapter(String url, Map details) { + this.url = url; + this.connectionDetails = details; + login(); + } + + @Override + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) { + FlashArrayVolume request = new FlashArrayVolume(); + request.setExternalName( + pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject)); + request.setPodName(pod); + request.setAllocatedSizeBytes(roundUp512Boundary(size)); + FlashArrayList list = POST("/volumes?names=" + request.getExternalName() + "&overwrite=false", + request, new TypeReference>() { + }); + + return (ProviderVolume) getFlashArrayItem(list); + } + + /** + * Volumes must be added to a host set to be visable to the hosts. + * the Hostset should contain all the hosts that are membrers of the zone or + * cluster (depending on Cloudstack Storage Pool configuration) + */ + @Override + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String volumeName = normalizeName(pod, dataObject.getExternalName()); + try { + FlashArrayList list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference> () { }); + + if (list == null || list.getItems() == null || list.getItems().size() == 0) { + throw new RuntimeException("Volume attach did not return lun information"); + } + + FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list); + if (connection.getLun() == null) { + throw new RuntimeException("Volume attach missing lun field"); + } + + return ""+connection.getLun(); + + } catch (Throwable e) { + // the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it + if (e.toString().contains("Connection already exists")) { + FlashArrayList list = GET("/connections?volume_names=" + volumeName, + new TypeReference>() { + }); + if (list != null && list.getItems() != null) { + return ""+list.getItems().get(0).getLun(); + } else { + throw new RuntimeException("Volume lun is not found in existing connection"); + } + } else { + throw e; + } + } + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String volumeName = normalizeName(pod, dataObject.getExternalName()); + DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + } + + @Override + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + // public void deleteVolume(String volumeNamespace, String volumeName) { + // first make sure we are disconnected + removeVlunsAll(context, pod, dataObject.getExternalName()); + String fullName = normalizeName(pod, dataObject.getExternalName()); + + FlashArrayVolume volume = new FlashArrayVolume(); + volume.setDestroyed(true); + try { + PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + if (e.toString().contains("Volume does not exist")) { + return; + } else { + throw e; + } + } + } + + @Override + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + String externalName = dataObject.getExternalName(); + // if its not set, look for the generated name for some edge cases + if (externalName == null) { + externalName = pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject); + } + FlashArrayVolume volume = null; + try { + volume = getVolume(externalName); + // if we didn't get an address back its likely an empty object + if (volume != null && volume.getAddress() == null) { + return null; + } else if (volume == null) { + return null; + } + + populateConnectionId(volume); + + return volume; + } catch (Exception e) { + // assume any exception is a not found. Flash returns 400's for most errors + return null; + } + } + + @Override + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { + // public FlashArrayVolume getVolumeByWwn(String wwn) { + if (address == null ||addressType == null) { + throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress"); + } + + // only support WWN type addresses at this time. + if (!ProviderVolume.AddressType.FIBERWWN.equals(addressType)) { + throw new RuntimeException( + "Invalid volume address type [" + addressType + "] requested for volume search"); + } + + // convert WWN to serial to search on. strip out WWN type # + Flash OUI value + String serial = address.substring(FlashArrayVolume.PURE_OUI.length() + 1).toUpperCase(); + String query = "serial='" + serial + "'"; + + FlashArrayVolume volume = null; + try { + FlashArrayList list = GET("/volumes?filter=" + query, + new TypeReference>() { + }); + + // if we didn't get an address back its likely an empty object + if (list == null || list.getItems() == null || list.getItems().size() == 0) { + return null; + } + + volume = (FlashArrayVolume)this.getFlashArrayItem(list); + if (volume != null && volume.getAddress() == null) { + return null; + } + + populateConnectionId(volume); + + return volume; + } catch (Exception e) { + // assume any exception is a not found. Flash returns 400's for most errors + return null; + } + } + + private void populateConnectionId(FlashArrayVolume volume) { + // we need to see if there is a connection (lun) associated with this volume. + // note we assume 1 lun for the hostgroup associated with this object + FlashArrayList list = null; + try { + list = GET("/connections?volume_names=" + volume.getExternalName(), + new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + // this means there is no attachment associated with this volume on the array + if (e.toString().contains("Bad Request")) { + return; + } + } + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn: list.getItems()) { + if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) { + volume.setExternalConnectionId(""+conn.getLun()); + break; + } + } + + } + } + + @Override + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) { + // public void resizeVolume(String volumeNamespace, String volumeName, long + // newSizeInBytes) { + FlashArrayVolume volume = new FlashArrayVolume(); + volume.setAllocatedSizeBytes(roundUp512Boundary(newSizeInBytes)); + PATCH("/volumes?names=" + dataObject.getExternalName(), volume, null); + } + + /** + * Take a snapshot and return a Volume representing that snapshot + * + * @param volumeName + * @param snapshotName + * @return + */ + @Override + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) { + // public FlashArrayVolume snapshotVolume(String volumeNamespace, String + // volumeName, String snapshotName) { + FlashArrayList list = POST( + "/volume-snapshots?source_names=" + sourceDataObject.getExternalName(), null, + new TypeReference>() { + }); + + return (FlashArrayVolume) getFlashArrayItem(list); + } + + /** + * Replaces the base volume with the given snapshot. Note this can only be done + * when the snapshot and volume + * are + * + * @param name + * @return + */ + @Override + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject snapshotDataObject) { + // public void promoteSnapshot(String namespace, String snapshotName) { + if (snapshotDataObject == null || snapshotDataObject.getExternalName() == null) { + throw new RuntimeException("Snapshot revert not possible as an external snapshot name was not provided"); + } + + FlashArrayVolume snapshot = this.getSnapshot(snapshotDataObject.getExternalName()); + if (snapshot.getSource() == null) { + throw new CloudRuntimeException("Snapshot source was not available from the storage array"); + } + + String origVolumeName = snapshot.getSource().getName(); + + // now "create" a new volume with the snapshot volume as its source (basically a + // Flash array copy) + // and overwrite to true (volume already exists, we are recreating it) + FlashArrayVolume input = new FlashArrayVolume(); + input.setExternalName(origVolumeName); + input.setAllocatedSizeBytes(roundUp512Boundary(snapshot.getAllocatedSizeInBytes())); + input.setSource(new FlashArrayVolumeSource(snapshot.getExternalName())); + POST("/volumes?names=" + origVolumeName + "&overwrite=true", input, null); + + return this.getVolume(origVolumeName); + } + + @Override + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + FlashArrayList list = GET( + "/volume-snapshots?names=" + dataObject.getExternalName(), + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + @Override + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) { + // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, + // String destName) { + if (sourceDataObject == null || sourceDataObject.getExternalName() == null + ||sourceDataObject.getType() == null) { + throw new RuntimeException("Provided volume has no external source information"); + } + + if (destDataObject == null) { + throw new RuntimeException("Provided volume target information was not provided"); + } + + if (destDataObject.getExternalName() == null) { + // this means its a new volume? so our external name will be the Cloudstack UUID + destDataObject + .setExternalName(ProviderVolumeNamer.generateObjectName(context, destDataObject)); + } + + FlashArrayVolume currentVol; + if (sourceDataObject.getType().equals(ProviderAdapterDataObject.Type.SNAPSHOT)) { + currentVol = getSnapshot(sourceDataObject.getExternalName()); + } else { + currentVol = (FlashArrayVolume) this + .getFlashArrayItem(GET("/volumes?names=" + sourceDataObject.getExternalName(), + new TypeReference>() { + })); + } + + if (currentVol == null) { + throw new RuntimeException("Unable to find current volume to copy from"); + } + + // now "create" a new volume with the snapshot volume as its source (basically a + // Flash array copy) + // and overwrite to true (volume already exists, we are recreating it) + FlashArrayVolume payload = new FlashArrayVolume(); + payload.setExternalName(normalizeName(pod, destDataObject.getExternalName())); + payload.setPodName(pod); + payload.setAllocatedSizeBytes(roundUp512Boundary(currentVol.getAllocatedSizeInBytes())); + payload.setSource(new FlashArrayVolumeSource(sourceDataObject.getExternalName())); + FlashArrayList list = POST( + "/volumes?names=" + payload.getExternalName() + "&overwrite=true", payload, + new TypeReference>() { + }); + FlashArrayVolume outVolume = (FlashArrayVolume) getFlashArrayItem(list); + pause(postCopyWait); + return outVolume; + } + + private void pause(long period) { + try { + Thread.sleep(period); + } catch (InterruptedException e) { + + } + } + + public boolean supportsSnapshotConnection() { + return false; + } + + @Override + public void refresh(Map details) { + this.connectionDetails = details; + this.refreshSession(true); + } + + @Override + public void validate() { + login(); + // check if hostgroup and pod from details really exist - we will + // require a distinct configuration object/connection object for each type + if (this.getHostgroup(hostgroup) == null) { + throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url + + "], please validate configuration"); + } + + if (this.getVolumeNamespace(pod) == null) { + throw new RuntimeException( + "Pod [" + pod + "] not found in FlashArray at [" + url + "], please validate configuration"); + } + } + + @Override + public void disconnect() { + return; + } + + @Override + public ProviderVolumeStorageStats getManagedStorageStats() { + FlashArrayPod pod = getVolumeNamespace(this.pod); + // just in case + if (pod == null || pod.getFootprint() == 0) { + return null; + } + Long capacityBytes = pod.getQuotaLimit(); + Long usedBytes = pod.getQuotaLimit() - (pod.getQuotaLimit() - pod.getFootprint()); + ProviderVolumeStorageStats stats = new ProviderVolumeStorageStats(); + stats.setCapacityInBytes(capacityBytes); + stats.setActualUsedInBytes(usedBytes); + return stats; + } + + @Override + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + ProviderVolume vol = getVolume(dataObject.getExternalName()); + Long usedBytes = vol.getUsedBytes(); + Long allocatedSizeInBytes = vol.getAllocatedSizeInBytes(); + if (usedBytes == null || allocatedSizeInBytes == null) { + return null; + } + ProviderVolumeStats stats = new ProviderVolumeStats(); + stats.setAllocatedInBytes(allocatedSizeInBytes); + stats.setActualUsedInBytes(usedBytes); + return stats; + } + + @Override + public boolean canAccessHost(ProviderAdapterContext context, String hostname) { + if (hostname == null) { + throw new RuntimeException("Unable to validate host access because a hostname was not provided"); + } + + List members = getHostgroupMembers(hostgroup); + + // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack + // hostname configuration + String shortname; + if (hostname.indexOf('.') > 0) { + shortname = hostname.substring(0, (hostname.indexOf('.'))); + } else { + shortname = hostname; + } + + for (String member : members) { + // exact match (short or long names) + if (member.equals(hostname)) { + return true; + } + + // primera has short name and cloudstack had long name + if (member.equals(shortname)) { + return true; + } + + // member has long name but cloudstack had shortname + if (member.indexOf('.') > 0) { + if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { + return true; + } + } + } + return false; + } + + private String getAccessToken() { + refreshSession(false); + return accessToken; + } + + private synchronized void refreshSession(boolean force) { + try { + if (force || keyExpiration < System.currentTimeMillis()) { + // close client to force connection reset on appliance -- not doing this can + // result in NotAuthorized error...guessing + _client.close(); + ; + _client = null; + login(); + keyExpiration = System.currentTimeMillis() + keyTtl; + } + } catch (Exception e) { + // retry frequently but not every request to avoid DDOS on storage API + logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", + e); + keyExpiration = System.currentTimeMillis() + (5 * 1000); + } + } + + private void validateLoginInfo(String urlStr) { + URL urlFull; + try { + urlFull = new URL(urlStr); + } catch (MalformedURLException e) { + throw new RuntimeException("Invalid URL format: " + urlStr, e); + } + ; + + int port = urlFull.getPort(); + if (port <= 0) { + port = 443; + } + this.url = urlFull.getProtocol() + "://" + urlFull.getHost() + ":" + port + urlFull.getPath(); + + Map queryParms = new HashMap(); + if (urlFull.getQuery() != null) { + String[] queryToks = urlFull.getQuery().split("&"); + for (String tok : queryToks) { + if (tok.endsWith("=")) { + continue; + } + int i = tok.indexOf("="); + if (i > 0) { + queryParms.put(tok.substring(0, i), tok.substring(i + 1)); + } + } + } + + pod = connectionDetails.get(FlashArrayAdapter.STORAGE_POD); + if (pod == null) { + pod = queryParms.get(FlashArrayAdapter.STORAGE_POD); + if (pod == null) { + throw new RuntimeException( + FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); + } + } + + hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + throw new RuntimeException( + FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); + } + } + + apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION); + if (apiLoginVersion == null) { + apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION); + if (apiLoginVersion == null) { + apiLoginVersion = API_LOGIN_VERSION_DEFAULT; + } + } + + apiVersion = connectionDetails.get(FlashArrayAdapter.API_VERSION); + if (apiVersion == null) { + apiVersion = queryParms.get(FlashArrayAdapter.API_VERSION); + if (apiVersion == null) { + apiVersion = API_VERSION_DEFAULT; + } + } + + String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); + if (connTimeoutStr == null) { + connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); + } + if (connTimeoutStr == null) { + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } else { + try { + connTimeout = Integer.parseInt(connTimeoutStr); + } catch (NumberFormatException e) { + logger.warn("Connection timeout not formatted correctly, using default", e); + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } + } + + String keyTtlString = connectionDetails.get(FlashArrayAdapter.KEY_TTL); + if (keyTtlString == null) { + keyTtlString = queryParms.get(FlashArrayAdapter.KEY_TTL); + } + if (keyTtlString == null) { + keyTtl = KEY_TTL_DEFAULT; + } else { + try { + keyTtl = Integer.parseInt(keyTtlString); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + keyTtl = KEY_TTL_DEFAULT; + } + } + + String copyWaitStr = connectionDetails.get(FlashArrayAdapter.POST_COPY_WAIT_MS); + if (copyWaitStr == null) { + copyWaitStr = queryParms.get(FlashArrayAdapter.POST_COPY_WAIT_MS); + } + if (copyWaitStr == null) { + postCopyWait = POST_COPY_WAIT_MS_DEFAULT; + } else { + try { + postCopyWait = Integer.parseInt(copyWaitStr); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + postCopyWait = KEY_TTL_DEFAULT; + } + } + + String skipTlsValidationStr = connectionDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + if (skipTlsValidationStr == null) { + skipTlsValidationStr = queryParms.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + } + + if (skipTlsValidationStr != null) { + skipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } else { + skipTlsValidation = true; + } + } + + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + validateLoginInfo(urlStr); + CloseableHttpResponse response = null; + try { + HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken"); + // request.addHeader("Content-Type", "application/json"); + // request.addHeader("Accept", "application/json"); + ArrayList postParms = new ArrayList(); + postParms.add(new BasicNameValuePair("username", username)); + postParms.add(new BasicNameValuePair("password", password)); + request.setEntity(new UrlEncodedFormEntity(postParms, "UTF-8")); + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + int statusCode = response.getStatusLine().getStatusCode(); + FlashArrayApiToken apitoken = null; + if (statusCode == 200 | statusCode == 201) { + apitoken = mapper.readValue(response.getEntity().getContent(), FlashArrayApiToken.class); + if (apitoken == null) { + throw new CloudRuntimeException( + "Authentication responded successfully but no api token was returned"); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + + // now we need to get the access token + request = new HttpPost(url + "/" + apiVersion + "/login"); + request.addHeader("api-token", apitoken.getApiToken()); + response = (CloseableHttpResponse) client.execute(request); + + statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 | statusCode == 201) { + Header[] headers = response.getHeaders("x-auth-token"); + if (headers == null || headers.length == 0) { + throw new CloudRuntimeException( + "Getting access token responded successfully but access token was not available"); + } + accessToken = headers[0].getValue(); + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Error creating input for login, check username/password encoding"); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing login response from FlashArray [" + url + "]", e); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending login request to FlashArray [" + url + "]", e); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + logger.debug("Error closing response from login attempt to FlashArray", e); + } + } + } + + private void removeVlunsAll(ProviderAdapterContext context, String volumeNamespace, String volumeName) { + volumeName = normalizeName(volumeNamespace, volumeName); + FlashArrayList list = null; + + try { + list = GET("/connections?volume_names=" + volumeName, + new TypeReference>() { + }); + } catch (CloudRuntimeException e) { + // this means the volume being deleted no longer exists so no connections can be + // searched + if (e.toString().contains("Bad Request")) { + return; + } + } + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn : list.getItems()) { + DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName); + } + } + } + + private FlashArrayVolume getVolume(String volumeName) { + FlashArrayList list = GET("/volumes?names=" + volumeName, + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + private FlashArrayPod getVolumeNamespace(String name) { + FlashArrayList list = GET("/pods?names=" + name, new TypeReference>() { + }); + return (FlashArrayPod) getFlashArrayItem(list); + } + + private FlashArrayHostgroup getHostgroup(String name) { + FlashArrayList list = GET("/host-groups?name=" + name, + new TypeReference>() { + }); + return (FlashArrayHostgroup) getFlashArrayItem(list); + } + + private List getHostgroupMembers(String groupname) { + FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname, + new TypeReference() { + }); + if (list == null || list.getItems().size() == 0) { + return null; + } + List hostnames = new ArrayList(); + for (FlashArrayGroupMemberReference ref : list.getItems()) { + hostnames.add(ref.getMember().getName()); + } + return hostnames; + } + + private FlashArrayVolume getSnapshot(String snapshotName) { + FlashArrayList list = GET("/volume-snapshots?names=" + snapshotName, + new TypeReference>() { + }); + return (FlashArrayVolume) getFlashArrayItem(list); + } + + private Object getFlashArrayItem(FlashArrayList list) { + if (list.getItems() != null && list.getItems().size() > 0) { + return list.getItems().get(0); + } else { + return null; + } + } + + private String normalizeName(String volumeNamespace, String volumeName) { + if (!volumeName.contains("::")) { + if (volumeNamespace != null) { + volumeName = volumeNamespace + "::" + volumeName; + } + } + return volumeName; + } + + @SuppressWarnings("unchecked") + private T POST(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPost request = new HttpPost(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + if (input != null) { + try { + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new CloudRuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } + } + + CloseableHttpClient client = getClient(); + try { + response = (CloseableHttpResponse) client + .execute(request); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + path + "]", e); + } + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + try { + if (type != null) { + Header header = response.getFirstHeader("Location"); + if (type.getType().getTypeName().equals(String.class.getName())) { + if (header != null) { + return (T) header.getValue(); + } else { + return null; + } + } else { + return mapper.readValue(response.getEntity().getContent(), type); + } + } + return null; + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e); + } + } else if (statusCode == 400) { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error 400: " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException( + "Error processing bad request response from FlashArray [" + url + path + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error " + statusCode + ": " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on POST [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private T PATCH(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPatch request = new HttpPatch(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + if (type != null) + return mapper.readValue(response.getEntity().getContent(), type); + return null; + } else if (statusCode == 400) { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException("Invalid request error 400: " + payload); + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new CloudRuntimeException( + "Invalid request error from FlashArray on PUT [" + url + path + "]" + statusCode + ": " + + response.getStatusLine().getReasonPhrase() + " - " + payload); + } + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new CloudRuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing bad request response from FlashArray [" + url + "]", + e); + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + + } + + private T GET(String path, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpGet request = new HttpGet(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200) { + try { + return mapper.readValue(response.getEntity().getContent(), type); + } catch (UnsupportedOperationException | IOException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on GET [" + request.getURI() + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + } catch (UnsupportedOperationException e) { + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private void DELETE(String path) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpDelete request = new HttpDelete(url + "/" + apiVersion + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-auth-token", getAccessToken()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 404 || statusCode == 400) { + // this means the volume was deleted successfully, or doesn't exist (effective + // delete), or + // the volume name is malformed or too long - meaning it never got created to + // begin with (effective delete) + return; + } else if (statusCode == 401 || statusCode == 403) { + throw new CloudRuntimeException( + "Authentication or Authorization to FlashArray [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 409) { + throw new CloudRuntimeException( + "The volume cannot be deleted at this time due to existing dependencies. Validate that all snapshots associated with this volume have been deleted and try again."); + } else { + throw new CloudRuntimeException( + "Unexpected HTTP response code from FlashArray on DELETE [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new CloudRuntimeException("Error sending request to FlashArray [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to FlashArray API", e); + } + } + } + } + + private CloseableHttpClient getClient() { + if (_client == null) { + RequestConfig config = RequestConfig.custom() + .setConnectTimeout((int) connTimeout) + .setConnectionRequestTimeout((int) connTimeout) + .setSocketTimeout((int) connTimeout).build(); + + HostnameVerifier verifier = null; + SSLContext sslContext = null; + + if (this.skipTlsValidation) { + try { + verifier = NoopHostnameVerifier.INSTANCE; + sslContext = new SSLContextBuilder().loadTrustMaterial(null, TrustAllStrategy.INSTANCE).build(); + } catch (KeyManagementException e) { + throw new CloudRuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new CloudRuntimeException(e); + } catch (KeyStoreException e) { + throw new CloudRuntimeException(e); + } + } + + _client = HttpClients.custom() + .setDefaultRequestConfig(config) + .setSSLHostnameVerifier(verifier) + .setSSLContext(sslContext) + .build(); + } + return _client; + } + + /** + * pure array requires volume sizes in multiples of 512...we'll just round up to + * next 512 boundary + * + * @param sizeInBytes + * @return + */ + private Long roundUp512Boundary(Long sizeInBytes) { + Long remainder = sizeInBytes % 512; + if (remainder > 0) { + sizeInBytes = sizeInBytes + (512 - remainder); + } + return sizeInBytes; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java new file mode 100644 index 00000000000..d1c3cee8fa8 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; + +public class FlashArrayAdapterFactory implements ProviderAdapterFactory { + + @Override + public String getProviderName() { + return "Flash Array"; + } + + @Override + public ProviderAdapter create(String url, Map details) { + return new FlashArrayAdapter(url, details); + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java new file mode 100644 index 00000000000..0f1e133cb5b --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayApiToken.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayApiToken { + @JsonProperty("api_token") + private String apiToken; + public void setApiToken(String apiToken) { + this.apiToken = apiToken; + } + public String getApiToken() { + return apiToken; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java new file mode 100644 index 00000000000..76cec9f70c4 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnection.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnection { + @JsonProperty("host_group") + private FlashArrayConnectionHostgroup hostGroup; + @JsonProperty("host") + private FlashArrayConnectionHost host; + @JsonProperty("volume") + private FlashArrayVolume volume; + @JsonProperty("lun") + private Integer lun; + + public FlashArrayConnectionHostgroup getHostGroup() { + return hostGroup; + } + + public void setHostGroup(FlashArrayConnectionHostgroup hostGroup) { + this.hostGroup = hostGroup; + } + + public FlashArrayConnectionHost getHost() { + return host; + } + + public void setHost(FlashArrayConnectionHost host) { + this.host = host; + } + + public FlashArrayVolume getVolume() { + return volume; + } + + public void setVolume(FlashArrayVolume volume) { + this.volume = volume; + } + + public Integer getLun() { + return lun; + } + + public void setLun(Integer lun) { + this.lun = lun; + } + + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java new file mode 100644 index 00000000000..27dcf08ab21 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHost.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnectionHost { + @JsonProperty("name") + private String name; + public FlashArrayConnectionHost() {} + public FlashArrayConnectionHost(String name) { + this.name = name; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java new file mode 100644 index 00000000000..27a0f60cbae --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayConnectionHostgroup.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayConnectionHostgroup { + @JsonProperty("name") + private String name; + + public FlashArrayConnectionHostgroup() {} + public FlashArrayConnectionHostgroup(String name) { + this.name = name; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java new file mode 100644 index 00000000000..f0f6d9e57fb --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReference.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayGroupMemberReference { + @JsonProperty("group") + private FlashArrayGroupNameWrapper group; + @JsonProperty("member") + private FlashArrayGroupMemberNameWrapper member; + + public static class FlashArrayGroupNameWrapper { + @JsonProperty("name") + private String name; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public static class FlashArrayGroupMemberNameWrapper { + @JsonProperty("name") + private String name; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + public FlashArrayGroupNameWrapper getGroup() { + return group; + } + + public void setGroup(FlashArrayGroupNameWrapper group) { + this.group = group; + } + + public FlashArrayGroupMemberNameWrapper getMember() { + return member; + } + + public void setMember(FlashArrayGroupMemberNameWrapper member) { + this.member = member; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java new file mode 100644 index 00000000000..b17c8a5b1f9 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayGroupMemberReferenceList.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayGroupMemberReferenceList { + @JsonProperty("items") + private ArrayList items; + + public ArrayList getItems() { + return items; + } + + public void setItems(ArrayList items) { + this.items = items; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java new file mode 100644 index 00000000000..1a2e3911e24 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHostgroup.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayHostgroup { + @JsonProperty("name") + private String name; + @JsonProperty("connection_count") + private Long connectionCount; + @JsonProperty("host_count") + private Long hostCount; + @JsonProperty("is_local") + private Boolean local; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Long getConnectionCount() { + return connectionCount; + } + public void setConnectionCount(Long connectionCount) { + this.connectionCount = connectionCount; + } + public Long getHostCount() { + return hostCount; + } + public void setHostCount(Long hostCount) { + this.hostCount = hostCount; + } + public Boolean getLocal() { + return local; + } + public void setLocal(Boolean local) { + this.local = local; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java new file mode 100644 index 00000000000..992c3fc8b67 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayList.java @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayList { + @JsonProperty("more_items_remaining") + private Boolean moreItemsRemaining; + @JsonProperty("total_item_count") + private Integer totalItemCount; + @JsonProperty("continuation_token") + private String continuationToken; + @JsonProperty("items") + private List items; + public Boolean getMoreItemsRemaining() { + return moreItemsRemaining; + } + public void setMoreItemsRemaining(Boolean moreItemsRemaining) { + this.moreItemsRemaining = moreItemsRemaining; + } + public Integer getTotalItemCount() { + return totalItemCount; + } + public void setTotalItemCount(Integer totalItemCount) { + this.totalItemCount = totalItemCount; + } + public String getContinuationToken() { + return continuationToken; + } + public void setContinuationToken(String continuationToken) { + this.continuationToken = continuationToken; + } + public List getItems() { + return items; + } + public void setItems(List items) { + this.items = items; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java new file mode 100644 index 00000000000..ddbfc298df4 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayPod.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayPod { + @JsonProperty("name") + private String name; + @JsonProperty("id") + private String id; + @JsonProperty("destroyed") + private Boolean destroyed; + @JsonProperty("footprint") + private Long footprint; + @JsonProperty("quota_limit") + private Long quotaLimit; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public Boolean getDestroyed() { + return destroyed; + } + public void setDestroyed(Boolean destroyed) { + this.destroyed = destroyed; + } + public Long getFootprint() { + return footprint; + } + public void setFootprint(Long footprint) { + this.footprint = footprint; + } + public Long getQuotaLimit() { + return quotaLimit; + } + public void setQuotaLimit(Long quotaLimit) { + this.quotaLimit = quotaLimit; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java new file mode 100644 index 00000000000..685d4e1f1cf --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTag.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayTag { + @JsonProperty("copyable") + private Boolean copyable; + @JsonProperty("key") + private String key; + @JsonProperty("namespace") + private String namespace; + @JsonProperty("value") + private String value; + + public FlashArrayTag() { + + } + + public FlashArrayTag(String namespace, String key, String value) { + this.key = key; + this.namespace = namespace; + this.value = value; + } + + public Boolean getCopyable() { + return copyable; + } + + public void setCopyable(Boolean copyable) { + this.copyable = copyable; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getNamespace() { + return namespace; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java new file mode 100644 index 00000000000..7a23343a647 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayTagList.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayTagList { + @JsonProperty("tags") + public List tags; + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java new file mode 100644 index 00000000000..f939d70a77f --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java @@ -0,0 +1,253 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolume implements ProviderSnapshot { + public static final String PURE_OUI = "24a9370"; + + @JsonProperty("destroyed") + private Boolean destroyed; + /** The virtual size requested for this volume */ + @JsonProperty("provisioned") + private Long allocatedSizeBytes; + @JsonIgnore + private String id; + @JsonIgnore // we don't use the Cloudstack user name at all + private String name; + @JsonIgnore + private String shortExternalName; + @JsonProperty("pod") + private FlashArrayVolumePod pod; + @JsonProperty("priority") + private Integer priority; + @JsonProperty("promotion_status") + private String promotionStatus; + @JsonProperty("subtype") + private String subtype; + @JsonProperty("space") + private FlashArrayVolumeSpace space; + @JsonProperty("source") + private FlashArrayVolumeSource source; + @JsonProperty("serial") + private String serial; + @JsonProperty("name") + private String externalName; + @JsonProperty("id") + private String externalUuid; + @JsonIgnore + private AddressType addressType; + @JsonIgnore + private String connectionId; + + public FlashArrayVolume() { + this.addressType = AddressType.FIBERWWN; + } + + @Override + public Boolean isDestroyed() { + return destroyed; + } + @Override + @JsonIgnore + public String getId() { + return id; + } + @Override + @JsonIgnore + public String getName() { + return name; + } + @JsonIgnore + public String getPodName() { + if (pod != null) { + return pod.getName(); + } else { + return null; + } + } + @Override + @JsonIgnore + public Integer getPriority() { + return priority; + } + @Override + @JsonIgnore + public String getState() { + return null; + } + @Override + @JsonIgnore + public AddressType getAddressType() { + return addressType; + } + @Override + @JsonIgnore + public String getAddress() { + if (serial == null) return null; + return ("6" + PURE_OUI + serial).toLowerCase(); + } + @Override + public String getExternalConnectionId() { + return connectionId; + } + + @JsonIgnore + public void setExternalConnectionId(String externalConnectionId) { + this.connectionId = externalConnectionId; + } + + @Override + public void setId(String id) { + this.id = id; + } + @Override + public void setName(String name) { + this.name = name; + } + public void setPodName(String podname) { + FlashArrayVolumePod pod = new FlashArrayVolumePod(); + pod.setName(podname); + this.pod = pod; + } + @Override + public void setPriority(Integer priority) { + this.priority = priority; + } + @Override + public void setAddressType(AddressType addressType) { + this.addressType = addressType; + } + @Override + @JsonIgnore + public Long getAllocatedSizeInBytes() { + return this.allocatedSizeBytes; + } + public void setAllocatedSizeBytes(Long size) { + this.allocatedSizeBytes = size; + } + @Override + @JsonIgnore + public Long getUsedBytes() { + if (space != null) { + return space.getVirtual(); + } else { + return null; + } + } + + public void setDestroyed(Boolean destroyed) { + this.destroyed = destroyed; + } + public FlashArrayVolumeSource getSource() { + return source; + } + public void setSource(FlashArrayVolumeSource source) { + this.source = source; + } + @Override + public String getExternalUuid() { + return externalUuid; + } + @Override + public String getExternalName() { + return externalName; + } + + public void setExternalUuid(String uuid) { + this.externalUuid = uuid; + } + + public void setExternalName(String name) { + this.externalName = name; + } + @Override + public Boolean canAttachDirectly() { + return false; + } + public String getConnectionId() { + return connectionId; + } + public void setConnectionId(String connectionId) { + this.connectionId = connectionId; + } + + public Boolean getDestroyed() { + return destroyed; + } + + public Long getAllocatedSizeBytes() { + return allocatedSizeBytes; + } + + public String getShortExternalName() { + return shortExternalName; + } + + public void setShortExternalName(String shortExternalName) { + this.shortExternalName = shortExternalName; + } + + public FlashArrayVolumePod getPod() { + return pod; + } + + public void setPod(FlashArrayVolumePod pod) { + this.pod = pod; + } + + public String getPromotionStatus() { + return promotionStatus; + } + + public void setPromotionStatus(String promotionStatus) { + this.promotionStatus = promotionStatus; + } + + public String getSubtype() { + return subtype; + } + + public void setSubtype(String subtype) { + this.subtype = subtype; + } + + public FlashArrayVolumeSpace getSpace() { + return space; + } + + public void setSpace(FlashArrayVolumeSpace space) { + this.space = space; + } + + public String getSerial() { + return serial; + } + + public void setSerial(String serial) { + this.serial = serial; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java new file mode 100644 index 00000000000..1e46441e7d1 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java @@ -0,0 +1,43 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumePod { + @JsonProperty("id") + private String id; + @JsonProperty("name") + private String name; + + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java new file mode 100644 index 00000000000..9bc8dec0016 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSource.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumeSource { + @JsonProperty("id") + private String id; + @JsonProperty("name") + private String name; + public FlashArrayVolumeSource() { } + public FlashArrayVolumeSource(String sourceName) { + this.name = sourceName; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java new file mode 100644 index 00000000000..95e148ce89f --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumeSpace.java @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.flasharray; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayVolumeSpace { + @JsonProperty("data_reduction") + private Float dataReduction; + @JsonProperty("snapshots") + private Integer snapshots; + @JsonProperty("snapshots_effective") + private Integer snapshotsEffective; + @JsonProperty("thin_provisioning") + private Float thinProvisioning; + @JsonProperty("total_effective") + private Long totalEffective; + @JsonProperty("total_physical") + private Long totalPhysical; + @JsonProperty("total_provisioned") + private Long totalProvisioned; + @JsonProperty("total_reduction") + private Float totalReduction; + @JsonProperty("unique") + private Long unique; + @JsonProperty("unique_effective") + private Long uniqueEffective; + @JsonProperty("user_provisioned") + private Long usedProvisioned; + @JsonProperty("virtual") + private Long virtual; + public Float getData_reduction() { + return dataReduction; + } + public void setData_reduction(Float dataReduction) { + this.dataReduction = dataReduction; + } + public Integer getSnapshots() { + return snapshots; + } + public void setSnapshots(Integer snapshots) { + this.snapshots = snapshots; + } + public Integer getSnapshotsEffective() { + return snapshotsEffective; + } + public void setSnapshotsEffective(Integer snapshotsEffective) { + this.snapshotsEffective = snapshotsEffective; + } + public Float getThinProvisioning() { + return thinProvisioning; + } + public void setThinProvisioning(Float thinProvisioning) { + this.thinProvisioning = thinProvisioning; + } + public Long getTotalEffective() { + return totalEffective; + } + public void setTotalEffective(Long totalEffective) { + this.totalEffective = totalEffective; + } + public Long getTotalPhysical() { + return totalPhysical; + } + public void setTotal_physical(Long totalPhysical) { + this.totalPhysical = totalPhysical; + } + public Long getTotalProvisioned() { + return totalProvisioned; + } + public void setTotalProvisioned(Long totalProvisioned) { + this.totalProvisioned = totalProvisioned; + } + public Float getTotalReduction() { + return totalReduction; + } + public void setTotalReduction(Float totalReduction) { + this.totalReduction = totalReduction; + } + public Long getUnique() { + return unique; + } + public void setUnique(Long unique) { + this.unique = unique; + } + public Long getUniqueEffective() { + return uniqueEffective; + } + public void setUniqueEffective(Long uniqueEffective) { + this.uniqueEffective = uniqueEffective; + } + public Long getUsedProvisioned() { + return usedProvisioned; + } + public void setUsed_provisioned(Long usedProvisioned) { + this.usedProvisioned = usedProvisioned; + } + public Long getVirtual() { + return virtual; + } + public void setVirtual(Long virtual) { + this.virtual = virtual; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..0750ef2cc27 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/provider/FlashArrayPrimaryDatastoreProviderImpl.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import org.apache.cloudstack.storage.datastore.adapter.flasharray.FlashArrayAdapterFactory; + +public class FlashArrayPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl { + + public FlashArrayPrimaryDatastoreProviderImpl() { + super(new FlashArrayAdapterFactory()); + } + + @Override + public String getName() { + return "Flash Array"; + } + +} diff --git a/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties new file mode 100644 index 00000000000..ac3c1e20b08 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-flasharray +parent=storage diff --git a/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml new file mode 100644 index 00000000000..030e9def26d --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/resources/META-INF/cloudstack/storage-volume-flasharray/spring-storage-volume-flasharray-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/plugins/storage/volume/primera/pom.xml b/plugins/storage/volume/primera/pom.xml new file mode 100644 index 00000000000..da345eeb173 --- /dev/null +++ b/plugins/storage/volume/primera/pom.xml @@ -0,0 +1,52 @@ + + + 4.0.0 + cloud-plugin-storage-volume-primera + Apache CloudStack Plugin - Storage Volume - HPE Primera + + org.apache.cloudstack + cloudstack-plugins + 4.19.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-storage-volume-adaptive + ${project.version} + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java new file mode 100644 index 00000000000..69f98567f72 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -0,0 +1,930 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume.AddressType; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeNamer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering.ProvisioningType; +import org.apache.http.Header; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustAllStrategy; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.log4j.Logger; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class PrimeraAdapter implements ProviderAdapter { + + static final Logger logger = Logger.getLogger(PrimeraAdapter.class); + + public static final String HOSTSET = "hostset"; + public static final String CPG = "cpg"; + public static final String SNAP_CPG = "snapCpg"; + public static final String KEY_TTL = "keyttl"; + public static final String CONNECT_TIMEOUT_MS = "connectTimeoutMs"; + public static final String POST_COPY_WAIT_MS = "postCopyWaitMs"; + public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs"; + + private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); + private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; + private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000; + public static final long BYTES_IN_MiB = 1048576; + + static final ObjectMapper mapper = new ObjectMapper(); + public String cpg = null; + public String snapCpg = null; + public String hostset = null; + private String username; + private String password; + private String key; + private String url; + private long keyExpiration = -1; + private long keyTtl = KEY_TTL_DEFAULT; + private long connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + private long taskWaitTimeoutMs = TASK_WAIT_TIMEOUT_MS_DEFAULT; + private CloseableHttpClient _client = null; + private boolean skipTlsValidation; + + private Map connectionDetails = null; + + public PrimeraAdapter(String url, Map details) { + this.url = url; + this.connectionDetails = details; + login(); + } + + @Override + public void refresh(Map details) { + this.connectionDetails = details; + this.refreshSession(true); + } + + /** + * Validate that the hostgroup and pod from the details data exists. Each + * configuration object/connection needs a distinct set of these 2 things. + */ + @Override + public void validate() { + login(); + if (this.getHostset(hostset) == null) { + throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url + + "], please validate configuration"); + } + + if (this.getCpg(cpg) == null) { + throw new RuntimeException( + "Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration"); + } + } + + @Override + public void disconnect() { + return; + } + + @Override + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, + ProviderAdapterDiskOffering diskOffering, long sizeInBytes) { + PrimeraVolumeRequest request = new PrimeraVolumeRequest(); + String externalName = ProviderVolumeNamer.generateObjectName(context, dataIn); + request.setName(externalName); + request.setCpg(cpg); + request.setSnapCPG(snapCpg); + if (sizeInBytes < BYTES_IN_MiB) { + request.setSizeMiB(1); + } else { + request.setSizeMiB(sizeInBytes/BYTES_IN_MiB); + } + + // determine volume type based on offering + // THIN: tpvv=true, reduce=false + // SPARSE: tpvv=true, reduce=true + // THICK: tpvv=false, tpZeroFill=true (not supported) + if (diskOffering != null) { + if (diskOffering.getType() == ProvisioningType.THIN) { + request.setTpvv(true); + request.setReduce(false); + } else if (diskOffering.getType() == ProvisioningType.SPARSE) { + request.setTpvv(false); + request.setReduce(true); + } else if (diskOffering.getType() == ProvisioningType.FAT) { + throw new RuntimeException("This storage provider does not support FAT provisioned volumes"); + } + + // sets the amount of space allowed for snapshots as a % of the volumes size + if (diskOffering.getHypervisorSnapshotReserve() != null) { + request.setSsSpcAllocLimitPct(diskOffering.getHypervisorSnapshotReserve()); + } + } else { + // default to deduplicated volume + request.setReduce(true); + request.setTpvv(false); + } + + request.setComment(ProviderVolumeNamer.generateObjectComment(context, dataIn)); + POST("/volumes", request, null); + dataIn.setExternalName(externalName); + ProviderVolume volume = getVolume(context, dataIn); + return volume; + } + + @Override + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + assert dataIn.getExternalName() != null : "External name not provided internally on volume attach"; + PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest(); + request.setHostname("set:" + hostset); + request.setVolumeName(dataIn.getExternalName()); + request.setAutoLun(true); + // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 + String location = POST("/vluns", request, new TypeReference() {}); + if (location == null) { + throw new RuntimeException("Attach volume failed with empty location response to vlun add command on storage provider"); + } + String[] toks = location.split(","); + if (toks.length <2) { + throw new RuntimeException("Attach volume failed with invalid location response to vlun add command on storage provider. Provided location: " + location); + } + return toks[1]; + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) { + // we expect to only be attaching one hostset to the vluns, so on detach we'll + // remove ALL vluns we find. + assert request.getExternalName() != null : "External name not provided internally on volume detach"; + removeAllVluns(request.getExternalName()); + } + + public void removeVlun(String name, Integer lunid, String hostString) { + // hostString can be a hostname OR "set:". It is stored this way + // in the appliance and returned as the vlun's name/string. + DELETE("/vluns/" + name + "," + lunid + "," + hostString); + } + + /** + * Removes all vluns - this should only be done when you are sure the volume is no longer in use + * @param name + */ + public void removeAllVluns(String name) { + PrimeraVlunList list = getVolumeHostsets(name); + if (list != null && list.getMembers() != null) { + for (PrimeraVlun vlun: list.getMembers()) { + removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname()); + } + } + } + + public PrimeraVlunList getVolumeHostsets(String name) { + String query = "%22volumeName%20EQ%20" + name + "%22"; + return GET("/vluns?query=" + query, new TypeReference() {}); + } + + @Override + public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request) { + assert request.getExternalName() != null : "External name not provided internally on volume delete"; + + // first remove vluns (take volumes from vluns) from hostset + removeAllVluns(request.getExternalName()); + DELETE("/volumes/" + request.getExternalName()); + } + + @Override + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolumeInfo, + ProviderAdapterDataObject targetVolumeInfo) { + PrimeraVolumeCopyRequest request = new PrimeraVolumeCopyRequest(); + PrimeraVolumeCopyRequestParameters parms = new PrimeraVolumeCopyRequestParameters(); + + assert sourceVolumeInfo.getExternalName() != null: "External provider name not provided on copy request to Primera volume provider"; + + // if we have no external name, treat it as a new volume + if (targetVolumeInfo.getExternalName() == null) { + targetVolumeInfo.setExternalName(ProviderVolumeNamer.generateObjectName(context, targetVolumeInfo)); + } + + ProviderVolume sourceVolume = this.getVolume(context, sourceVolumeInfo); + if (sourceVolume == null) { + throw new RuntimeException("Source volume " + sourceVolumeInfo.getExternalUuid() + " with provider name " + sourceVolumeInfo.getExternalName() + " not found on storage provider"); + } + + ProviderVolume targetVolume = this.getVolume(context, targetVolumeInfo); + if (targetVolume == null) { + this.create(context, targetVolumeInfo, null, sourceVolume.getAllocatedSizeInBytes()); + } + + parms.setDestVolume(targetVolumeInfo.getExternalName()); + parms.setOnline(false); + request.setParameters(parms); + + PrimeraTaskReference taskref = POST("/volumes/" + sourceVolumeInfo.getExternalName(), request, new TypeReference() {}); + if (taskref == null) { + throw new RuntimeException("Unable to retrieve task used to copy to newly created volume"); + } + + waitForTaskToComplete(taskref.getTaskid(), "copy volume " + sourceVolumeInfo.getExternalName() + " to " + + targetVolumeInfo.getExternalName(), taskWaitTimeoutMs); + + return this.getVolume(context, targetVolumeInfo); + } + + private void waitForTaskToComplete(String taskid, String taskDescription, Long timeoutMs) { + // first wait for task to complete + long taskWaitTimeout = System.currentTimeMillis() + timeoutMs; + boolean timedOut = true; + PrimeraTaskStatus status = null; + long starttime = System.currentTimeMillis(); + while (System.currentTimeMillis() <= taskWaitTimeout) { + status = this.getTaskStatus(taskid); + if (status != null && status.isFinished()) { + timedOut = false; + if (!status.isSuccess()) { + throw new RuntimeException("Task " + taskDescription + " was cancelled. TaskID: " + status.getId() + "; Final Status: " + status.getStatusName()); + } + break; + } else { + if (status != null) { + logger.info("Task " + taskDescription + " is still running. TaskID: " + status.getId() + "; Current Status: " + status.getStatusName()); + } + // ugly...to keep from hot-polling API + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + + } + } + } + + if (timedOut) { + if (status != null) { + throw new RuntimeException("Task " + taskDescription + " timed out. TaskID: " + status.getId() + ", Last Known Status: " + status.getStatusName()); + } else { + throw new RuntimeException("Task " + taskDescription + " timed out and a current status could not be retrieved from storage endpoint"); + } + } + + logger.info(taskDescription + " completed in " + ((System.currentTimeMillis() - starttime)/1000) + " seconds"); + } + + private PrimeraTaskStatus getTaskStatus(String taskid) { + return GET("/tasks/" + taskid + "?view=excludeDetail", new TypeReference() { + }); + } + + @Override + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, + ProviderAdapterDataObject targetSnapshot) { + assert sourceVolume.getExternalName() != null : "External name not set"; + PrimeraVolumeSnapshotRequest request = new PrimeraVolumeSnapshotRequest(); + PrimeraVolumeSnapshotRequestParameters parms = new PrimeraVolumeSnapshotRequestParameters(); + parms.setName(ProviderVolumeNamer.generateObjectName(context, targetSnapshot)); + request.setParameters(parms); + POST("/volumes/" + sourceVolume.getExternalName(), request, null); + targetSnapshot.setExternalName(parms.getName()); + return getSnapshot(context, targetSnapshot); + } + + @Override + public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + assert dataIn.getExternalName() != null: "External name not internally set for provided snapshot when requested storage provider to revert"; + // first get original volume + PrimeraVolume snapVol = (PrimeraVolume)getVolume(context, dataIn); + assert snapVol != null: "Storage volume associated with snapshot externally named [" + dataIn.getExternalName() + "] not found"; + assert snapVol.getParentId() != null: "Unable to determine parent volume/snapshot for snapshot named [" + dataIn.getExternalName() + "]"; + + PrimeraVolumeRevertSnapshotRequest request = new PrimeraVolumeRevertSnapshotRequest(); + request.setOnline(true); + request.setPriority(2); + PrimeraTaskReference taskref = PUT("/volumes/" + dataIn.getExternalName(), request, new TypeReference() {}); + if (taskref == null) { + throw new RuntimeException("Unable to retrieve task used to revert snapshot to base volume"); + } + + waitForTaskToComplete(taskref.getTaskid(), "revert snapshot " + dataIn.getExternalName(), taskWaitTimeoutMs); + + return getVolumeById(context, snapVol.getParentId()); + } + + /** + * Resize the volume to the new size. For HPE Primera, the API takes the additional space to add to the volume + * so this method will first retrieve the current volume's size and subtract that from the new size provided + * before calling the API. + * + * This method uses option GROW_VOLUME=3 for the API at this URL: + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html + * + */ + @Override + public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes) { + assert request.getExternalName() != null: "External name not internally set for provided volume when requesting resize of volume"; + + PrimeraVolume existingVolume = (PrimeraVolume) getVolume(context, request); + assert existingVolume != null: "Storage volume resize request not possible as existing volume not found for external provider name: " + request.getExternalName(); + long existingSizeInBytes = existingVolume.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + assert existingSizeInBytes < totalNewSizeInBytes: "Existing volume size is larger than requested new size for volume resize request. The Primera storage system does not support truncating/shrinking volumes."; + long addOnSizeInBytes = totalNewSizeInBytes - existingSizeInBytes; + + PrimeraVolume volume = new PrimeraVolume(); + volume.setSizeMiB((int) (addOnSizeInBytes / PrimeraAdapter.BYTES_IN_MiB)); + volume.setAction(3); + PUT("/volumes/" + request.getExternalName(), volume, null); + } + + @Override + public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request) { + String externalName; + + // if the external name isn't provided, look for the derived contextual name. some failure scenarios + // may result in the volume for this context being created but a subsequent failure causing the external + // name to not be persisted for later use. This is true of template-type objects being cached on primary + // storage + if (request.getExternalName() == null) { + externalName = ProviderVolumeNamer.generateObjectName(context, request); + } else { + externalName = request.getExternalName(); + } + + return GET("/volumes/" + externalName, new TypeReference() { + }); + } + + private ProviderVolume getVolumeById(ProviderAdapterContext context, Integer id) { + String query = "%22id%20EQ%20" + id + "%22"; + return GET("/volumes?query=" + query, new TypeReference() {}); + } + + @Override + public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request) { + assert request.getExternalName() != null: "External name not provided internally when finding snapshot on storage provider"; + return GET("/volumes/" + request.getExternalName(), new TypeReference() { + }); + } + + @Override + public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { + assert address != null: "External volume address not provided"; + assert AddressType.FIBERWWN.equals(addressType): "This volume provider currently does not support address type " + addressType.name(); + String query = "%22wwn%20EQ%20" + address + "%22"; + return GET("/volumes?query=" + query, new TypeReference() {}); + } + + @Override + public ProviderVolumeStorageStats getManagedStorageStats() { + PrimeraCpg cpgobj = getCpg(cpg); + // just in case + if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) { + return null; + } + Long capacityBytes = 0L; + if (cpgobj.getsDGrowth() != null) { + capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + Long usedBytes = 0L; + if (cpgobj.getUsrUsage() != null) { + usedBytes = (cpgobj.getUsrUsage().getRawUsedMiB()) * PrimeraAdapter.BYTES_IN_MiB; + } + ProviderVolumeStorageStats stats = new ProviderVolumeStorageStats(); + stats.setActualUsedInBytes(usedBytes); + stats.setCapacityInBytes(capacityBytes); + return stats; + } + + @Override + public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request) { + PrimeraVolume vol = (PrimeraVolume)getVolume(context, request); + if (vol == null || vol.getSizeMiB() == null || vol.getSizeMiB() == 0) { + return null; + } + + Long virtualSizeInBytes = vol.getHostWriteMiB() * PrimeraAdapter.BYTES_IN_MiB; + Long allocatedSizeInBytes = vol.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + Long actualUsedInBytes = vol.getTotalUsedMiB() * PrimeraAdapter.BYTES_IN_MiB; + ProviderVolumeStats stats = new ProviderVolumeStats(); + stats.setActualUsedInBytes(actualUsedInBytes); + stats.setAllocatedInBytes(allocatedSizeInBytes); + stats.setVirtualUsedInBytes(virtualSizeInBytes); + return stats; + } + + @Override + public boolean canAccessHost(ProviderAdapterContext context, String hostname) { + PrimeraHostset hostset = getHostset(this.hostset); + + List members = hostset.getSetmembers(); + + // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack + // hostname configuration + String shortname; + if (hostname.indexOf('.') > 0) { + shortname = hostname.substring(0, (hostname.indexOf('.'))); + } else { + shortname = hostname; + } + for (String member: members) { + // exact match (short or long names) + if (member.equals(hostname)) { + return true; + } + + // primera has short name and cloudstack had long name + if (member.equals(shortname)) { + return true; + } + + // member has long name but cloudstack had shortname + int index = member.indexOf("."); + if (index > 0) { + if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { + return true; + } + } + } + + return false; + } + + private PrimeraCpg getCpg(String name) { + return GET("/cpgs/" + name, new TypeReference() { + }); + } + + private PrimeraHostset getHostset(String name) { + return GET("/hostsets/" + name, new TypeReference() { + }); + } + + private String getSessionKey() { + refreshSession(false); + return key; + } + + private synchronized void refreshSession(boolean force) { + try { + if (force || keyExpiration < System.currentTimeMillis()) { + // close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing + _client.close();; + _client = null; + login(); + keyExpiration = System.currentTimeMillis() + keyTtl; + } + } catch (Exception e) { + // retry frequently but not every request to avoid DDOS on storage API + logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e); + keyExpiration = System.currentTimeMillis() + (5*1000); + } + } + + private void validateLoginInfo(String urlStr) { + URL urlFull; + try { + urlFull = new URL(urlStr); + } catch (MalformedURLException e) { + throw new RuntimeException("Invalid URL format: " + urlStr, e); + } + ; + + int port = urlFull.getPort(); + if (port <= 0) { + port = 443; + } + this.url = urlFull.getProtocol() + "://" + urlFull.getHost() + ":" + port + urlFull.getPath(); + + Map queryParms = new HashMap(); + if (urlFull.getQuery() != null) { + String[] queryToks = urlFull.getQuery().split("&"); + for (String tok : queryToks) { + if (tok.endsWith("=")) { + continue; + } + int i = tok.indexOf("="); + if (i > 0) { + queryParms.put(tok.substring(0, i), tok.substring(i + 1)); + } + } + } + + cpg = connectionDetails.get(PrimeraAdapter.CPG); + if (cpg == null) { + cpg = queryParms.get(PrimeraAdapter.CPG); + if (cpg == null) { + throw new RuntimeException( + PrimeraAdapter.CPG + " paramater/option required to configure this storage pool"); + } + } + + snapCpg = connectionDetails.get(PrimeraAdapter.SNAP_CPG); + if (snapCpg == null) { + snapCpg = queryParms.get(PrimeraAdapter.SNAP_CPG); + if (snapCpg == null) { + // default to using same CPG as the volume + snapCpg = cpg; + } + } + + hostset = connectionDetails.get(PrimeraAdapter.HOSTSET); + if (hostset == null) { + hostset = queryParms.get(PrimeraAdapter.HOSTSET); + if (hostset == null) { + throw new RuntimeException( + PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool"); + } + } + + String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); + if (connTimeoutStr == null) { + connTimeoutStr = queryParms.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); + } + if (connTimeoutStr == null) { + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } else { + try { + connTimeout = Integer.parseInt(connTimeoutStr); + } catch (NumberFormatException e) { + logger.warn("Connection timeout not formatted correctly, using default", e); + connTimeout = CONNECT_TIMEOUT_MS_DEFAULT; + } + } + + String keyTtlString = connectionDetails.get(PrimeraAdapter.KEY_TTL); + if (keyTtlString == null) { + keyTtlString = queryParms.get(PrimeraAdapter.KEY_TTL); + } + if (keyTtlString == null) { + keyTtl = KEY_TTL_DEFAULT; + } else { + try { + keyTtl = Integer.parseInt(keyTtlString); + } catch (NumberFormatException e) { + logger.warn("Key TTL not formatted correctly, using default", e); + keyTtl = KEY_TTL_DEFAULT; + } + } + + String taskWaitTimeoutMsStr = connectionDetails.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS); + if (taskWaitTimeoutMsStr == null) { + taskWaitTimeoutMsStr = queryParms.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS); + if (taskWaitTimeoutMsStr == null) { + taskWaitTimeoutMs = PrimeraAdapter.TASK_WAIT_TIMEOUT_MS_DEFAULT; + } else { + try { + taskWaitTimeoutMs = Long.parseLong(taskWaitTimeoutMsStr); + } catch (NumberFormatException e) { + logger.warn(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS + " property not set to a proper number, using default value"); + } + } + } + + String skipTlsValidationStr = connectionDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + if (skipTlsValidationStr == null) { + skipTlsValidationStr = queryParms.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + } + + if (skipTlsValidationStr != null) { + skipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } else { + skipTlsValidation = true; + } + } + + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + validateLoginInfo(urlStr); + CloseableHttpResponse response = null; + try { + HttpPost request = new HttpPost(url + "/credentials"); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.setEntity(new StringEntity("{\"user\":\"" + username + "\", \"password\":\"" + password + "\"}")); + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 | statusCode == 201) { + PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class); + key = keyobj.getKey(); + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera [" + url + "] - [" + statusCode + + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("Error creating input for login, check username/password encoding"); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing login response from Primera [" + url + "]", e); + } catch (IOException e) { + throw new RuntimeException("Error sending login request to Primera [" + url + "]", e); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + logger.debug("Error closing response from login attempt to Primera", e); + } + } + } + + private CloseableHttpClient getClient() { + if (_client == null) { + RequestConfig config = RequestConfig.custom() + .setConnectTimeout((int) connTimeout) + .setConnectionRequestTimeout((int) connTimeout) + .setSocketTimeout((int) connTimeout).build(); + + HostnameVerifier verifier = null; + SSLContext sslContext = null; + + if (this.skipTlsValidation) { + try { + verifier = NoopHostnameVerifier.INSTANCE; + sslContext = new SSLContextBuilder().loadTrustMaterial(null, TrustAllStrategy.INSTANCE).build(); + } catch (KeyManagementException e) { + throw new RuntimeException(e); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } catch (KeyStoreException e) { + throw new RuntimeException(e); + } + } + + _client = HttpClients.custom() + .setDefaultRequestConfig(config) + .setSSLHostnameVerifier(verifier) + .setSSLContext(sslContext) + .build(); + } + return _client; + } + + @SuppressWarnings("unchecked") + private T POST(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPost request = new HttpPost(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + try { + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + logger.debug("POST data: " + request.getEntity()); + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new RuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } + + CloseableHttpClient client = getClient(); + try { + response = (CloseableHttpResponse) client + .execute(request); + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + path + "]", e); + } + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + try { + if (type != null) { + Header header = response.getFirstHeader("Location"); + if (type.getType().getTypeName().equals(String.class.getName())) { + if (header != null) { + return (T) header.getValue(); + } else { + return null; + } + } else if (type.getType().getTypeName().equals(PrimeraTaskReference.class.getName())) { + T obj = mapper.readValue(response.getEntity().getContent(), type); + PrimeraTaskReference taskref = (PrimeraTaskReference) obj; + taskref.setLocation(header.getValue()); + return obj; + } else { + return mapper.readValue(response.getEntity().getContent(), type); + } + } + return null; + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Error processing response from Primera [" + url + path + "]", e); + } + } else if (statusCode == 400) { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error 400: " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException( + "Error processing bad request response from Primera [" + url + path + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + try { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error " + statusCode + ": " + payload); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Unexpected HTTP response code from Primera on POST [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private T PUT(String path, Object input, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpPut request = new HttpPut(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + String data = mapper.writeValueAsString(input); + request.setEntity(new StringEntity(data)); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + if (type != null) + return mapper.readValue(response.getEntity().getContent(), type); + return null; + } else if (statusCode == 400) { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() { + }); + throw new RuntimeException("Invalid request error 400: " + payload); + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else { + Map payload = mapper.readValue(response.getEntity().getContent(), + new TypeReference>() {}); + throw new RuntimeException("Invalid request error from Primera on PUT [" + url + path + "]" + statusCode + ": " + + response.getStatusLine().getReasonPhrase() + " - " + payload); + } + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new RuntimeException( + "Error processing request payload to [" + url + "] for path [" + path + "]", e); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing bad request response from Primera [" + url + "]", + e); + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private T GET(String path, final TypeReference type) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpGet request = new HttpGet(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200) { + try { + return mapper.readValue(response.getEntity().getContent(), type); + } catch (UnsupportedOperationException | IOException e) { + throw new RuntimeException("Error processing response from Primera [" + url + "]", e); + } + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 404) { + return null; + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera on GET [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + } catch (UnsupportedOperationException e) { + throw new RuntimeException("Error processing response from Primera [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + private void DELETE(String path) { + CloseableHttpResponse response = null; + try { + this.refreshSession(false); + HttpDelete request = new HttpDelete(url + path); + request.addHeader("Content-Type", "application/json"); + request.addHeader("Accept", "application/json"); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + + CloseableHttpClient client = getClient(); + response = (CloseableHttpResponse) client.execute(request); + final int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 404 || statusCode == 400) { + // this means the volume was deleted successfully, or doesn't exist (effective delete), or + // the volume name is malformed or too long - meaning it never got created to begin with (effective delete) + return; + } else if (statusCode == 401 || statusCode == 403) { + throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + + "] failed, unable to retrieve session token"); + } else if (statusCode == 409) { + throw new RuntimeException("The volume cannot be deleted at this time due to existing dependencies. Validate that all snapshots associated with this volume have been deleted and try again." ); + } else { + throw new RuntimeException("Unexpected HTTP response code from Primera on DELETE [" + url + path + "] - [" + + statusCode + "] - " + response.getStatusLine().getReasonPhrase()); + } + } catch (IOException e) { + throw new RuntimeException("Error sending request to Primera [" + url + "]", e); + } finally { + if (response != null) { + try { + response.close(); + } catch (IOException e) { + logger.debug("Unexpected failure closing response to Primera API", e); + } + } + } + } + + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java new file mode 100644 index 00000000000..81ae442b38d --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; + +public class PrimeraAdapterFactory implements ProviderAdapterFactory { + + @Override + public String getProviderName() { + return "Primera"; + } + + @Override + public ProviderAdapter create(String url, Map details) { + return new PrimeraAdapter(url, details); + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java new file mode 100644 index 00000000000..6ac969766c8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpg.java @@ -0,0 +1,203 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpg { + private long ddsRsvdMiB; + private String tdvvVersion; + private PrimeraCpgSAGrowth sAGrowth; + private PrimeraCpgSAUsage sAUsage; + private PrimeraCpgSDGrowth sDGrowth; + private PrimeraCpgSDUsage sDUsage; + private PrimeraCpgUsrUsage usrUsage; + private ArrayList additionalStates; + private boolean dedupCapable; + private ArrayList degradedStates; + private ArrayList failedStates; + private int freeSpaceMiB; + private String name; + private int numFPVVs; + private int numTDVVs; + private int numTPVVs; + private PrimeraCpgPrivateSpaceMiB privateSpaceMiB; + private int rawFreeSpaceMiB; + private int rawSharedSpaceMiB; + private int rawTotalSpaceMiB; + private int sharedSpaceMiB; + private int state; + private int totalSpaceMiB; + private String uuid; + private int id; + public long getDdsRsvdMiB() { + return ddsRsvdMiB; + } + public void setDdsRsvdMiB(long ddsRsvdMiB) { + this.ddsRsvdMiB = ddsRsvdMiB; + } + public String getTdvvVersion() { + return tdvvVersion; + } + public void setTdvvVersion(String tdvvVersion) { + this.tdvvVersion = tdvvVersion; + } + public PrimeraCpgSAGrowth getsAGrowth() { + return sAGrowth; + } + public void setsAGrowth(PrimeraCpgSAGrowth sAGrowth) { + this.sAGrowth = sAGrowth; + } + public PrimeraCpgSAUsage getsAUsage() { + return sAUsage; + } + public void setsAUsage(PrimeraCpgSAUsage sAUsage) { + this.sAUsage = sAUsage; + } + public PrimeraCpgSDGrowth getsDGrowth() { + return sDGrowth; + } + public void setsDGrowth(PrimeraCpgSDGrowth sDGrowth) { + this.sDGrowth = sDGrowth; + } + public PrimeraCpgSDUsage getsDUsage() { + return sDUsage; + } + public void setsDUsage(PrimeraCpgSDUsage sDUsage) { + this.sDUsage = sDUsage; + } + public PrimeraCpgUsrUsage getUsrUsage() { + return usrUsage; + } + public void setUsrUsage(PrimeraCpgUsrUsage usrUsage) { + this.usrUsage = usrUsage; + } + public ArrayList getAdditionalStates() { + return additionalStates; + } + public void setAdditionalStates(ArrayList additionalStates) { + this.additionalStates = additionalStates; + } + public boolean isDedupCapable() { + return dedupCapable; + } + public void setDedupCapable(boolean dedupCapable) { + this.dedupCapable = dedupCapable; + } + public ArrayList getDegradedStates() { + return degradedStates; + } + public void setDegradedStates(ArrayList degradedStates) { + this.degradedStates = degradedStates; + } + public ArrayList getFailedStates() { + return failedStates; + } + public void setFailedStates(ArrayList failedStates) { + this.failedStates = failedStates; + } + public int getFreeSpaceMiB() { + return freeSpaceMiB; + } + public void setFreeSpaceMiB(int freeSpaceMiB) { + this.freeSpaceMiB = freeSpaceMiB; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public int getNumFPVVs() { + return numFPVVs; + } + public void setNumFPVVs(int numFPVVs) { + this.numFPVVs = numFPVVs; + } + public int getNumTDVVs() { + return numTDVVs; + } + public void setNumTDVVs(int numTDVVs) { + this.numTDVVs = numTDVVs; + } + public int getNumTPVVs() { + return numTPVVs; + } + public void setNumTPVVs(int numTPVVs) { + this.numTPVVs = numTPVVs; + } + public PrimeraCpgPrivateSpaceMiB getPrivateSpaceMiB() { + return privateSpaceMiB; + } + public void setPrivateSpaceMiB(PrimeraCpgPrivateSpaceMiB privateSpaceMiB) { + this.privateSpaceMiB = privateSpaceMiB; + } + public int getRawFreeSpaceMiB() { + return rawFreeSpaceMiB; + } + public void setRawFreeSpaceMiB(int rawFreeSpaceMiB) { + this.rawFreeSpaceMiB = rawFreeSpaceMiB; + } + public int getRawSharedSpaceMiB() { + return rawSharedSpaceMiB; + } + public void setRawSharedSpaceMiB(int rawSharedSpaceMiB) { + this.rawSharedSpaceMiB = rawSharedSpaceMiB; + } + public int getRawTotalSpaceMiB() { + return rawTotalSpaceMiB; + } + public void setRawTotalSpaceMiB(int rawTotalSpaceMiB) { + this.rawTotalSpaceMiB = rawTotalSpaceMiB; + } + public int getSharedSpaceMiB() { + return sharedSpaceMiB; + } + public void setSharedSpaceMiB(int sharedSpaceMiB) { + this.sharedSpaceMiB = sharedSpaceMiB; + } + public int getState() { + return state; + } + public void setState(int state) { + this.state = state; + } + public int getTotalSpaceMiB() { + return totalSpaceMiB; + } + public void setTotalSpaceMiB(int totalSpaceMiB) { + this.totalSpaceMiB = totalSpaceMiB; + } + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + public int getId() { + return id; + } + public void setId(int id) { + this.id = id; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java new file mode 100644 index 00000000000..3bb8d4c1079 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgDiskPattern.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgDiskPattern { + private int diskType; + + public int getDiskType() { + return diskType; + } + + public void setDiskType(int diskType) { + this.diskType = diskType; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java new file mode 100644 index 00000000000..770480f2004 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgLDLayout.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgLDLayout { + private int rAIDType; + private ArrayList diskPatterns; + private int hA; + public int getrAIDType() { + return rAIDType; + } + public void setrAIDType(int rAIDType) { + this.rAIDType = rAIDType; + } + public ArrayList getDiskPatterns() { + return diskPatterns; + } + public void setDiskPatterns(ArrayList diskPatterns) { + this.diskPatterns = diskPatterns; + } + public int gethA() { + return hA; + } + public void sethA(int hA) { + this.hA = hA; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java new file mode 100644 index 00000000000..b38aa10c7f1 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgPrivateSpaceMiB.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgPrivateSpaceMiB { + private int base; + private int rawBase; + private int rawSnapshot; + private int snapshot; + public int getBase() { + return base; + } + public void setBase(int base) { + this.base = base; + } + public int getRawBase() { + return rawBase; + } + public void setRawBase(int rawBase) { + this.rawBase = rawBase; + } + public int getRawSnapshot() { + return rawSnapshot; + } + public void setRawSnapshot(int rawSnapshot) { + this.rawSnapshot = rawSnapshot; + } + public int getSnapshot() { + return snapshot; + } + public void setSnapshot(int snapshot) { + this.snapshot = snapshot; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java new file mode 100644 index 00000000000..83f67f945ae --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAGrowth.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSAGrowth { + private PrimeraCpgLDLayout lDLayout; + private int incrementMiB; + public PrimeraCpgLDLayout getlDLayout() { + return lDLayout; + } + public void setlDLayout(PrimeraCpgLDLayout lDLayout) { + this.lDLayout = lDLayout; + } + public int getIncrementMiB() { + return incrementMiB; + } + public void setIncrementMiB(int incrementMiB) { + this.incrementMiB = incrementMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java new file mode 100644 index 00000000000..11b1df668cd --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSAUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSAUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java new file mode 100644 index 00000000000..fc54e6380b7 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDGrowth.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSDGrowth { + private PrimeraCpgLDLayout lDLayout; + private int incrementMiB; + private int limitMiB; + private int warningMiB; + public PrimeraCpgLDLayout getlDLayout() { + return lDLayout; + } + public void setlDLayout(PrimeraCpgLDLayout lDLayout) { + this.lDLayout = lDLayout; + } + public int getIncrementMiB() { + return incrementMiB; + } + public void setIncrementMiB(int incrementMiB) { + this.incrementMiB = incrementMiB; + } + public int getLimitMiB() { + return limitMiB; + } + public void setLimitMiB(int limitMiB) { + this.limitMiB = limitMiB; + } + public int getWarningMiB() { + return warningMiB; + } + public void setWarningMiB(int warningMiB) { + this.warningMiB = warningMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java new file mode 100644 index 00000000000..5de74fed7bb --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgSDUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgSDUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java new file mode 100644 index 00000000000..2cce6c94769 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraCpgUsrUsage.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraCpgUsrUsage { + private int rawTotalMiB; + private int rawUsedMiB; + private int totalMiB; + private int usedMiB; + public int getRawTotalMiB() { + return rawTotalMiB; + } + public void setRawTotalMiB(int rawTotalMiB) { + this.rawTotalMiB = rawTotalMiB; + } + public int getRawUsedMiB() { + return rawUsedMiB; + } + public void setRawUsedMiB(int rawUsedMiB) { + this.rawUsedMiB = rawUsedMiB; + } + public int getTotalMiB() { + return totalMiB; + } + public void setTotalMiB(int totalMiB) { + this.totalMiB = totalMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java new file mode 100644 index 00000000000..e062f0782af --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHostset { + + private String comment; + private Integer id; + private String name; + private List setmembers = new ArrayList(); + private String uuid; + private Map additionalProperties = new LinkedHashMap(); + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public Integer getId() { + return id; + } + + public void setId(Integer id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getSetmembers() { + return setmembers; + } + + public void setSetmembers(List setmembers) { + this.setmembers = setmembers; + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public Map getAdditionalProperties() { + return additionalProperties; + } + + public void setAdditionalProperties(Map additionalProperties) { + this.additionalProperties = additionalProperties; + } + + // adds members to a hostset + public static class PrimeraHostsetVLUNRequest { + private String volumeName; + private Boolean autoLun = true; + private Integer lun = 0; + private Integer maxAutoLun = 0; + /** + * This can be a single hostname OR the set of hosts in the format + * "set:". + * For the purposes of this driver, its expected that the predominate usecase is + * to use + * a hostset that is aligned with a CloudStack Cluster. + */ + private String hostname; + + public String getVolumeName() { + return volumeName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + + public Boolean getAutoLun() { + return autoLun; + } + + public void setAutoLun(Boolean autoLun) { + this.autoLun = autoLun; + } + + public Integer getLun() { + return lun; + } + + public void setLun(Integer lun) { + this.lun = lun; + } + + public Integer getMaxAutoLun() { + return maxAutoLun; + } + + public void setMaxAutoLun(Integer maxAutoLun) { + this.maxAutoLun = maxAutoLun; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java new file mode 100644 index 00000000000..0fc050e9844 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraKey.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraKey { + private String key; + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java new file mode 100644 index 00000000000..0a312038e9a --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskReference.java @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraTaskReference { + private String taskid; + /** + * not really returned, but if there is a Location header in a + * response we'll add it automatically if this is the type + **/ + private String location; + public String getTaskid() { + return taskid; + } + public void setTaskid(String taskid) { + this.taskid = taskid; + } + public String getLocation() { + return location; + } + public void setLocation(String location) { + this.location = location; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java new file mode 100644 index 00000000000..293efb149d4 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraTaskStatus.java @@ -0,0 +1,174 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraTaskStatus { + private Integer id; + private Integer type; + private String name; + private Integer status; + private Integer completedPhases; + private Integer totalPhases; + private Integer completedSteps; + private Integer totalSteps; + private String startTime; + private String finishTime; + private Integer priority; + private String user; + private String detailedStatus; + public static final Integer STATUS_DONE = 1; + public static final Integer STATUS_ACTIVE = 2; + public static final Integer STATUS_CANCELLED = 3; + public static final Integer STATUS_FAILED = 4; + + public boolean isFinished() { + if (status != STATUS_ACTIVE) { + return true; + } + return false; + } + + public boolean isSuccess() { + if (status == STATUS_DONE) { + return true; + } + return false; + } + + public String getStatusName() { + if (status == PrimeraTaskStatus.STATUS_DONE) { + return "DONE"; + } else if (status == PrimeraTaskStatus.STATUS_ACTIVE) { + return "ACTIVE"; + } else if (status == PrimeraTaskStatus.STATUS_CANCELLED) { + return "CANCELLED"; + } else if (status == PrimeraTaskStatus.STATUS_FAILED) { + return "FAILED"; + } else { + return "UNKNOWN"; + } + } + + public Integer getId() { + return id; + } + + public void setId(Integer id) { + this.id = id; + } + + public Integer getType() { + return type; + } + + public void setType(Integer type) { + this.type = type; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Integer getStatus() { + return status; + } + + public void setStatus(Integer status) { + this.status = status; + } + + public Integer getCompletedPhases() { + return completedPhases; + } + + public void setCompletedPhases(Integer completedPhases) { + this.completedPhases = completedPhases; + } + + public Integer getTotalPhases() { + return totalPhases; + } + + public void setTotalPhases(Integer totalPhases) { + this.totalPhases = totalPhases; + } + + public Integer getCompletedSteps() { + return completedSteps; + } + + public void setCompletedSteps(Integer completedSteps) { + this.completedSteps = completedSteps; + } + + public Integer getTotalSteps() { + return totalSteps; + } + + public void setTotalSteps(Integer totalSteps) { + this.totalSteps = totalSteps; + } + + public String getStartTime() { + return startTime; + } + + public void setStartTime(String startTime) { + this.startTime = startTime; + } + + public String getFinishTime() { + return finishTime; + } + + public void setFinishTime(String finishTime) { + this.finishTime = finishTime; + } + + public Integer getPriority() { + return priority; + } + + public void setPriority(Integer priority) { + this.priority = priority; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getDetailedStatus() { + return detailedStatus; + } + + public void setDetailedStatus(String detailedStatus) { + this.detailedStatus = detailedStatus; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java new file mode 100644 index 00000000000..d35b16c048b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlun.java @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVlun { + private int lun; + private String volumeName; + private String hostname; + private String remoteName; + private int type; + private String serial; + private PrimeraPortPosition portPos; + private String volumeWWN; + private int multipathing; + private int failedPathPol; + private int failedPathInterval; + private String hostDeviceName; + @JsonProperty("Subsystem_NQN") + private String subsystemNQN; + private boolean active; + + public static class PrimeraPortPosition { + private int node; + private int slot; + private int cardPort; + public int getNode() { + return node; + } + public void setNode(int node) { + this.node = node; + } + public int getSlot() { + return slot; + } + public void setSlot(int slot) { + this.slot = slot; + } + public int getCardPort() { + return cardPort; + } + public void setCardPort(int cardPort) { + this.cardPort = cardPort; + } + + } + + public int getLun() { + return lun; + } + + public void setLun(int lun) { + this.lun = lun; + } + + public String getVolumeName() { + return volumeName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public String getRemoteName() { + return remoteName; + } + + public void setRemoteName(String remoteName) { + this.remoteName = remoteName; + } + + public int getType() { + return type; + } + + public void setType(int type) { + this.type = type; + } + + public String getSerial() { + return serial; + } + + public void setSerial(String serial) { + this.serial = serial; + } + + public PrimeraPortPosition getPortPos() { + return portPos; + } + + public void setPortPos(PrimeraPortPosition portPos) { + this.portPos = portPos; + } + + public String getVolumeWWN() { + return volumeWWN; + } + + public void setVolumeWWN(String volumeWWN) { + this.volumeWWN = volumeWWN; + } + + public int getMultipathing() { + return multipathing; + } + + public void setMultipathing(int multipathing) { + this.multipathing = multipathing; + } + + public int getFailedPathPol() { + return failedPathPol; + } + + public void setFailedPathPol(int failedPathPol) { + this.failedPathPol = failedPathPol; + } + + public int getFailedPathInterval() { + return failedPathInterval; + } + + public void setFailedPathInterval(int failedPathInterval) { + this.failedPathInterval = failedPathInterval; + } + + public String getHostDeviceName() { + return hostDeviceName; + } + + public void setHostDeviceName(String hostDeviceName) { + this.hostDeviceName = hostDeviceName; + } + + public String getSubsystemNQN() { + return subsystemNQN; + } + + public void setSubsystemNQN(String subsystemNQN) { + this.subsystemNQN = subsystemNQN; + } + + public boolean isActive() { + return active; + } + + public void setActive(boolean active) { + this.active = active; + } + + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java new file mode 100644 index 00000000000..d50fdfef4aa --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVlunList.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVlunList { + private int total; + private String serial; + private List members; + public int getTotal() { + return total; + } + public void setTotal(int total) { + this.total = total; + } + public String getSerial() { + return serial; + } + public void setSerial(String serial) { + this.serial = serial; + } + public List getMembers() { + return members; + } + public void setMembers(List members) { + this.members = members; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java new file mode 100644 index 00000000000..9ae58e42c17 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolume.java @@ -0,0 +1,420 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.ArrayList; +import java.util.Date; + +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolume implements ProviderSnapshot { + @JsonIgnore + private AddressType addressType = AddressType.FIBERWWN; + @JsonIgnore + private String connectionId; + @JsonIgnore + private Integer priority = 0; + + private String physParentId = null; + private Integer parentId = null; + private String copyOf = null; + private Integer roChildId = null; + private Integer rwChildId = null; + private String snapCPG = null; + private Long total = null; + /** + * Actions are enumerated and listed at + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html + */ + private Integer action = null; + private String comment = null; + private Integer id = null; + private String name = null; + private Integer deduplicationState = null; + private Integer compressionState = null; + private Integer provisioningType = null; + private Integer copyType = null; + private Integer baseId = null; + private Boolean readOnly = null; + private Integer state = null; + private ArrayList failedStates = null; + private ArrayList degradedStates = null; + private ArrayList additionalStates = null; + private PrimeraVolumeAdminSpace adminSpace = null; + private PrimeraVolumeSnapshotSpace snapshotSpace = null; + private PrimeraVolumeUserSpace userSpace = null; + private Integer totalReservedMiB = null; + private Integer totalUsedMiB = null; + private Integer sizeMiB = null; + private Integer hostWriteMiB = null; + private String wwn = null; + private Integer creationTimeSec = null; + private Date creationTime8601 = null; + private Integer ssSpcAllocWarningPct; + private Integer ssSpcAllocLimitPct = null; + private Integer usrSpcAllocWarningPct = null; + private Integer usrSpcAllocLimitPct = null; + private PrimeraVolumePolicies policies = null; + private String userCPG = null; + private String uuid = null; + private Integer sharedParentId = null; + private Integer udid = null; + private PrimeraVolumeCapacityEfficiency capacityEfficiency = null; + private Integer rcopyStatus = null; + private ArrayList links = null; + public String getPhysParentId() { + return physParentId; + } + public void setPhysParentId(String physParentId) { + this.physParentId = physParentId; + } + public Integer getParentId() { + return parentId; + } + public void setParentId(Integer parentId) { + this.parentId = parentId; + } + public String getCopyOf() { + return copyOf; + } + public void setCopyOf(String copyOf) { + this.copyOf = copyOf; + } + public Integer getRoChildId() { + return roChildId; + } + public void setRoChildId(Integer roChildId) { + this.roChildId = roChildId; + } + public Integer getRwChildId() { + return rwChildId; + } + public void setRwChildId(Integer rwChildId) { + this.rwChildId = rwChildId; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Long getTotal() { + return total; + } + public void setTotal(Long total) { + this.total = total; + } + public Integer getAction() { + return action; + } + public void setAction(Integer action) { + this.action = action; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Integer getDeduplicationState() { + return deduplicationState; + } + public void setDeduplicationState(Integer deduplicationState) { + this.deduplicationState = deduplicationState; + } + public Integer getCompressionState() { + return compressionState; + } + public void setCompressionState(Integer compressionState) { + this.compressionState = compressionState; + } + public Integer getProvisioningType() { + return provisioningType; + } + public void setProvisioningType(Integer provisioningType) { + this.provisioningType = provisioningType; + } + public Integer getCopyType() { + return copyType; + } + public void setCopyType(Integer copyType) { + this.copyType = copyType; + } + public Integer getBaseId() { + return baseId; + } + public void setBaseId(Integer baseId) { + this.baseId = baseId; + } + public Boolean getReadOnly() { + return readOnly; + } + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + public String getState() { + if (state != null) { + return state.toString(); + } + return null; + } + public void setState(Integer state) { + this.state = state; + } + public ArrayList getFailedStates() { + return failedStates; + } + public void setFailedStates(ArrayList failedStates) { + this.failedStates = failedStates; + } + public ArrayList getDegradedStates() { + return degradedStates; + } + public void setDegradedStates(ArrayList degradedStates) { + this.degradedStates = degradedStates; + } + public ArrayList getAdditionalStates() { + return additionalStates; + } + public void setAdditionalStates(ArrayList additionalStates) { + this.additionalStates = additionalStates; + } + public PrimeraVolumeAdminSpace getAdminSpace() { + return adminSpace; + } + public void setAdminSpace(PrimeraVolumeAdminSpace adminSpace) { + this.adminSpace = adminSpace; + } + public PrimeraVolumeSnapshotSpace getSnapshotSpace() { + return snapshotSpace; + } + public void setSnapshotSpace(PrimeraVolumeSnapshotSpace snapshotSpace) { + this.snapshotSpace = snapshotSpace; + } + public PrimeraVolumeUserSpace getUserSpace() { + return userSpace; + } + public void setUserSpace(PrimeraVolumeUserSpace userSpace) { + this.userSpace = userSpace; + } + public Integer getTotalReservedMiB() { + return totalReservedMiB; + } + public void setTotalReservedMiB(Integer totalReservedMiB) { + this.totalReservedMiB = totalReservedMiB; + } + public Integer getTotalUsedMiB() { + return totalUsedMiB; + } + public void setTotalUsedMiB(Integer totalUsedMiB) { + this.totalUsedMiB = totalUsedMiB; + } + public Integer getSizeMiB() { + return sizeMiB; + } + public void setSizeMiB(Integer sizeMiB) { + this.sizeMiB = sizeMiB; + } + public Integer getHostWriteMiB() { + return hostWriteMiB; + } + public void setHostWriteMiB(Integer hostWriteMiB) { + this.hostWriteMiB = hostWriteMiB; + } + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public Integer getCreationTimeSec() { + return creationTimeSec; + } + public void setCreationTimeSec(Integer creationTimeSec) { + this.creationTimeSec = creationTimeSec; + } + public Date getCreationTime8601() { + return creationTime8601; + } + public void setCreationTime8601(Date creationTime8601) { + this.creationTime8601 = creationTime8601; + } + public Integer getSsSpcAllocWarningPct() { + return ssSpcAllocWarningPct; + } + public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) { + this.ssSpcAllocWarningPct = ssSpcAllocWarningPct; + } + public Integer getSsSpcAllocLimitPct() { + return ssSpcAllocLimitPct; + } + public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) { + this.ssSpcAllocLimitPct = ssSpcAllocLimitPct; + } + public Integer getUsrSpcAllocWarningPct() { + return usrSpcAllocWarningPct; + } + public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) { + this.usrSpcAllocWarningPct = usrSpcAllocWarningPct; + } + public Integer getUsrSpcAllocLimitPct() { + return usrSpcAllocLimitPct; + } + public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) { + this.usrSpcAllocLimitPct = usrSpcAllocLimitPct; + } + public PrimeraVolumePolicies getPolicies() { + return policies; + } + public void setPolicies(PrimeraVolumePolicies policies) { + this.policies = policies; + } + public String getUserCPG() { + return userCPG; + } + public void setUserCPG(String userCPG) { + this.userCPG = userCPG; + } + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + public Integer getSharedParentId() { + return sharedParentId; + } + public void setSharedParentId(Integer sharedParentId) { + this.sharedParentId = sharedParentId; + } + public Integer getUdid() { + return udid; + } + public void setUdid(Integer udid) { + this.udid = udid; + } + public PrimeraVolumeCapacityEfficiency getCapacityEfficiency() { + return capacityEfficiency; + } + public void setCapacityEfficiency(PrimeraVolumeCapacityEfficiency capacityEfficiency) { + this.capacityEfficiency = capacityEfficiency; + } + public Integer getRcopyStatus() { + return rcopyStatus; + } + public void setRcopyStatus(Integer rcopyStatus) { + this.rcopyStatus = rcopyStatus; + } + public ArrayList getLinks() { + return links; + } + public void setLinks(ArrayList links) { + this.links = links; + } + @Override + @JsonIgnore + public Boolean isDestroyed() { + return false; + } + @Override + public void setId(String id) { + this.id = Integer.parseInt(id); + } + public String getId() { + if (id != null) { + return Integer.toString(id); + } + return null; + } + @Override + public Integer getPriority() { + return priority; + } + @Override + public void setPriority(Integer priority) { + this.priority = priority; + } + @Override + public AddressType getAddressType() { + return addressType; + } + @Override + public void setAddressType(AddressType addressType) { + this.addressType = addressType; + } + @Override + public String getAddress() { + return this.wwn; + } + @Override + @JsonIgnore + public Long getAllocatedSizeInBytes() { + if (this.getSizeMiB() != null) { + return this.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + return 0L; + } + @Override + @JsonIgnore + public Long getUsedBytes() { + if (this.getTotalReservedMiB() != null) { + return this.getTotalReservedMiB() * PrimeraAdapter.BYTES_IN_MiB; + } + return 0L; + } + @Override + @JsonIgnore + public String getExternalUuid() { + return uuid; + } + public void setExternalUuid(String uuid) { + this.uuid = uuid; + } + @Override + @JsonIgnore + public String getExternalName() { + return name; + } + public void setExternalName(String name) { + this.name = name; + } + @Override + @JsonIgnore + public String getExternalConnectionId() { + return connectionId; + } + public void setExternalConnection(String connectionId) { + this.connectionId = connectionId; + } + @Override + @JsonIgnore + public Boolean canAttachDirectly() { + return true; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java new file mode 100644 index 00000000000..63ddf09d20e --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeAdminSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeAdminSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java new file mode 100644 index 00000000000..b058902d318 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCapacityEfficiency.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCapacityEfficiency { + private double compaction; + private double deduplication; + public double getCompaction() { + return compaction; + } + public void setCompaction(double compaction) { + this.compaction = compaction; + } + public double getDeduplication() { + return deduplication; + } + public void setDeduplication(double deduplication) { + this.deduplication = deduplication; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java new file mode 100644 index 00000000000..779064f6e9b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequest.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCopyRequest { + private String action = "createPhysicalCopy"; + private PrimeraVolumeCopyRequestParameters parameters; + public String getAction() { + return action; + } + public void setAction(String action) { + this.action = action; + } + public PrimeraVolumeCopyRequestParameters getParameters() { + return parameters; + } + public void setParameters(PrimeraVolumeCopyRequestParameters parameters) { + this.parameters = parameters; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java new file mode 100644 index 00000000000..33ad0d445f8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html + */ + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeCopyRequestParameters { + private String destVolume = null; + private String destCPG = null; + private Boolean online = false; + private String wwn = null; + private Boolean tpvv = null; + private Boolean reduce = null; + private String snapCPG = null; + private Boolean skipZero = null; + private Boolean saveSnapshot = null; + /** 1=HIGH, 2=MED, 3=LOW */ + private Integer priority = null; + public String getDestVolume() { + return destVolume; + } + public void setDestVolume(String destVolume) { + this.destVolume = destVolume; + } + public String getDestCPG() { + return destCPG; + } + public void setDestCPG(String destCPG) { + this.destCPG = destCPG; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public Boolean getTpvv() { + return tpvv; + } + public void setTpvv(Boolean tpvv) { + this.tpvv = tpvv; + } + public Boolean getReduce() { + return reduce; + } + public void setReduce(Boolean reduce) { + this.reduce = reduce; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Boolean getSkipZero() { + return skipZero; + } + public void setSkipZero(Boolean skipZero) { + this.skipZero = skipZero; + } + public Boolean getSaveSnapshot() { + return saveSnapshot; + } + public void setSaveSnapshot(Boolean saveSnapshot) { + this.saveSnapshot = saveSnapshot; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java new file mode 100644 index 00000000000..27b19bcc9e8 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLink.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeLink { + private String href; + private String rel; + public String getHref() { + return href; + } + public void setHref(String href) { + this.href = href; + } + public String getRel() { + return rel; + } + public void setRel(String rel) { + this.rel = rel; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java new file mode 100644 index 00000000000..01bec70acc3 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeLinkList.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeLinkList { + private List list; + + public List getList() { + return list; + } + + public void setList(List list) { + this.list = list; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java new file mode 100644 index 00000000000..2baa9a2ddca --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePolicies.java @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumePolicies { + private Boolean tpZeroFill; + private Boolean staleSS; + private Boolean oneHost; + private Boolean zeroDetect; + private Boolean system; + private Boolean caching; + private Boolean fsvc; + private Integer hostDIF; + public Boolean getTpZeroFill() { + return tpZeroFill; + } + public void setTpZeroFill(Boolean tpZeroFill) { + this.tpZeroFill = tpZeroFill; + } + public Boolean getStaleSS() { + return staleSS; + } + public void setStaleSS(Boolean staleSS) { + this.staleSS = staleSS; + } + public Boolean getOneHost() { + return oneHost; + } + public void setOneHost(Boolean oneHost) { + this.oneHost = oneHost; + } + public Boolean getZeroDetect() { + return zeroDetect; + } + public void setZeroDetect(Boolean zeroDetect) { + this.zeroDetect = zeroDetect; + } + public Boolean getSystem() { + return system; + } + public void setSystem(Boolean system) { + this.system = system; + } + public Boolean getCaching() { + return caching; + } + public void setCaching(Boolean caching) { + this.caching = caching; + } + public Boolean getFsvc() { + return fsvc; + } + public void setFsvc(Boolean fsvc) { + this.fsvc = fsvc; + } + public Integer getHostDIF() { + return hostDIF; + } + public void setHostDIF(Integer hostDIF) { + this.hostDIF = hostDIF; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java new file mode 100644 index 00000000000..48898c27277 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumePromoteRequest { + /** + * Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html + */ + private Integer action = 4; + private Boolean online = true; + private Integer priority = 2; // MEDIUM + private Boolean allowRemoteCopyParent = true; + public Integer getAction() { + return action; + } + public void setAction(Integer action) { + this.action = action; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + public Boolean getAllowRemoteCopyParent() { + return allowRemoteCopyParent; + } + public void setAllowRemoteCopyParent(Boolean allowRemoteCopyParent) { + this.allowRemoteCopyParent = allowRemoteCopyParent; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java new file mode 100644 index 00000000000..b8f67fbb562 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRequest.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeRequest { + private String name; + private String cpg; + private long sizeMiB; + private String comment; + private String snapCPG = null; + private Boolean reduce; + private Boolean tpvv; + private Integer ssSpcAllocLimitPct; + private Integer ssSpcAllocWarningPct; + private Integer usrSpcAllocWarningPct; + private Integer usrSpcAllocLimitPct; + private PrimeraVolumePolicies policies; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getCpg() { + return cpg; + } + public void setCpg(String cpg) { + this.cpg = cpg; + } + public long getSizeMiB() { + return sizeMiB; + } + public void setSizeMiB(long sizeMiB) { + this.sizeMiB = sizeMiB; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public String getSnapCPG() { + return snapCPG; + } + public void setSnapCPG(String snapCPG) { + this.snapCPG = snapCPG; + } + public Boolean getReduce() { + return reduce; + } + public void setReduce(Boolean reduce) { + this.reduce = reduce; + } + public Boolean getTpvv() { + return tpvv; + } + public void setTpvv(Boolean tpvv) { + this.tpvv = tpvv; + } + public Integer getSsSpcAllocLimitPct() { + return ssSpcAllocLimitPct; + } + public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) { + this.ssSpcAllocLimitPct = ssSpcAllocLimitPct; + } + public Integer getSsSpcAllocWarningPct() { + return ssSpcAllocWarningPct; + } + public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) { + this.ssSpcAllocWarningPct = ssSpcAllocWarningPct; + } + public Integer getUsrSpcAllocWarningPct() { + return usrSpcAllocWarningPct; + } + public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) { + this.usrSpcAllocWarningPct = usrSpcAllocWarningPct; + } + public Integer getUsrSpcAllocLimitPct() { + return usrSpcAllocLimitPct; + } + public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) { + this.usrSpcAllocLimitPct = usrSpcAllocLimitPct; + } + public PrimeraVolumePolicies getPolicies() { + return policies; + } + public void setPolicies(PrimeraVolumePolicies policies) { + this.policies = policies; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java new file mode 100644 index 00000000000..fcdd7a81b6b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeRevertSnapshotRequest.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeRevertSnapshotRequest { + private int action = 4; //PROMOTE_VIRTUAL_COPY + private Boolean online = true; + private Integer priority = 2; + public int getAction() { + return action; + } + public void setAction(int action) { + this.action = action; + } + public Boolean getOnline() { + return online; + } + public void setOnline(Boolean online) { + this.online = online; + } + public Integer getPriority() { + return priority; + } + public void setPriority(Integer priority) { + this.priority = priority; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java new file mode 100644 index 00000000000..6fb0f195e8b --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequest.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotRequest { + private String action = "createSnapshot"; + private PrimeraVolumeSnapshotRequestParameters parameters; + public String getAction() { + return action; + } + public void setAction(String action) { + this.action = action; + } + public PrimeraVolumeSnapshotRequestParameters getParameters() { + return parameters; + } + public void setParameters(PrimeraVolumeSnapshotRequestParameters parameters) { + this.parameters = parameters; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java new file mode 100644 index 00000000000..de2fe24d638 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotRequestParameters.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +/** + * https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html + */ +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotRequestParameters { + private String name = null; + private String id = null; + private String comment = null; + private Boolean readOnly = false; + private Integer expirationHours = null; + private Integer retentionHours = null; + private String addToSet = null; + private Boolean syncSnapRcopy = false; + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getId() { + return id; + } + public void setId(String id) { + this.id = id; + } + public String getComment() { + return comment; + } + public void setComment(String comment) { + this.comment = comment; + } + public Boolean getReadOnly() { + return readOnly; + } + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + public Integer getExpirationHours() { + return expirationHours; + } + public void setExpirationHours(Integer expirationHours) { + this.expirationHours = expirationHours; + } + public Integer getRetentionHours() { + return retentionHours; + } + public void setRetentionHours(Integer retentionHours) { + this.retentionHours = retentionHours; + } + public String getAddToSet() { + return addToSet; + } + public void setAddToSet(String addToSet) { + this.addToSet = addToSet; + } + public Boolean getSyncSnapRcopy() { + return syncSnapRcopy; + } + public void setSyncSnapRcopy(Boolean syncSnapRcopy) { + this.syncSnapRcopy = syncSnapRcopy; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java new file mode 100644 index 00000000000..2cb0d53844a --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeSnapshotSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeSnapshotSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java new file mode 100644 index 00000000000..07b3425d126 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUpdateRequest.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeUpdateRequest { + private String comment; + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java new file mode 100644 index 00000000000..e4cea1781f3 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeUserSpace.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraVolumeUserSpace { + private int reservedMiB; + private int rawReservedMiB; + private int usedMiB; + private int freeMiB; + public int getReservedMiB() { + return reservedMiB; + } + public void setReservedMiB(int reservedMiB) { + this.reservedMiB = reservedMiB; + } + public int getRawReservedMiB() { + return rawReservedMiB; + } + public void setRawReservedMiB(int rawReservedMiB) { + this.rawReservedMiB = rawReservedMiB; + } + public int getUsedMiB() { + return usedMiB; + } + public void setUsedMiB(int usedMiB) { + this.usedMiB = usedMiB; + } + public int getFreeMiB() { + return freeMiB; + } + public void setFreeMiB(int freeMiB) { + this.freeMiB = freeMiB; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java new file mode 100644 index 00000000000..e5ce327c5b1 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/provider/PrimeraPrimaryDatastoreProviderImpl.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import org.apache.cloudstack.storage.datastore.adapter.primera.PrimeraAdapterFactory; + +public class PrimeraPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl { + + public PrimeraPrimaryDatastoreProviderImpl() { + super(new PrimeraAdapterFactory()); + } + + @Override + public String getName() { + return "Primera"; + } + +} diff --git a/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties new file mode 100644 index 00000000000..8b4bb66df78 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-primera +parent=storage diff --git a/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml new file mode 100644 index 00000000000..92f0bf23394 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/resources/META-INF/cloudstack/storage-volume-primera/spring-storage-volume-primera-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/scripts/storage/multipath/cleanStaleMaps.sh b/scripts/storage/multipath/cleanStaleMaps.sh new file mode 100644 index 00000000000..90b9bef5a8d --- /dev/null +++ b/scripts/storage/multipath/cleanStaleMaps.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +############################################################################################# +# +# Clean old multipath maps that have 0 paths available +# +############################################################################################# + +cd $(dirname $0) + +for WWID in $(multipathd list maps status | awk '{ if ($4 == 0) { print substr($1,2); }}'); do + ./removeVolume.sh ${WWID} +done + +exit 0 diff --git a/scripts/storage/multipath/connectVolume.sh b/scripts/storage/multipath/connectVolume.sh new file mode 100644 index 00000000000..fb8387ece47 --- /dev/null +++ b/scripts/storage/multipath/connectVolume.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +##################################################################################### +# +# Given a lun # and a WWID for a volume provisioned externally, find the volume +# through the SCSI bus and make sure its visable via multipath +# +##################################################################################### + + +LUN=${1:?"LUN required"} +WWID=${2:?"WWID required"} + +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started" + exit 1 +} + +echo "$(date): Looking for ${WWID} on lun ${LUN}" + +# get vendor OUI. we will only delete a device on the designated lun if it matches the +# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the +# host on different fiber channel hosts with the same LUN +INCOMING_OUI=$(echo ${WWID} | cut -c2-7) +echo "$(date): Incoming OUI: ${INCOMING_OUI}" + +# first we need to check if any stray references are left from a previous use of this lun +for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do + lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g') + + if [ ! -z "${lingering_devs}" ]; then + for dev in ${lingering_devs}; do + LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g') + FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8) + if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then + continue; + fi + dev=$(echo $dev | awk -F: '{ print $1}') + logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up" + MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}') + MP_WWID=${MP_WWID:1} # strip first character (3) off + # don't do this if the WWID passed in matches the WWID from multipath + if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then + # run full removal again so all devices and multimap are cleared + $(dirname $0)/disconnectVolume.sh ${MP_WWID} + # we don't have a multimap but we may still have some stranded devices to clean up + elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then + echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete + fi + done + sleep 3 + fi +done + +logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}" + +# wait for multipath to map the new lun to the WWID +echo "$(date): Waiting for multipath entry to show up for the WWID" +while true; do + ls /dev/mapper/3${WWID} >/dev/null 2>&1 + if [ $? == 0 ]; then + break + fi + + logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan" + + # instruct bus to scan for new lun + for fchost in $(ls /sys/class/fc_host); do + echo " --> Scanning ${fchost}" + echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan + done + + multipath -v2 2>/dev/null + + ls /dev/mapper/3${WWID} >/dev/null 2>&1 + if [ $? == 0 ]; then + break + fi + + sleep 5 +done + +echo "$(date): Doing a recan to make sure we have proper current size locally" +for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do + echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan; +done + +sleep 3 + +multipathd reconfigure + +sleep 3 + +# cleanup any old/faulty paths +delete_needed=false +multipath -l 3${WWID} +for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do + logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing" + echo 1 > /sys/block/${dev}/device/delete; + delete_needed=true +done + +if [ "${delete_needed}" == "true" ]; then + sleep 10 + multipath -v2 >/dev/null +fi + +multipath -l 3${WWID} + +logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available" + +echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}" + +exit 0 diff --git a/scripts/storage/multipath/copyVolume.sh b/scripts/storage/multipath/copyVolume.sh new file mode 100644 index 00000000000..d169198251b --- /dev/null +++ b/scripts/storage/multipath/copyVolume.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +OUTPUT_FORMAT=${1:?"Output format is required"} +INPUT_FILE=${2:?"Input file/path is required"} +OUTPUT_FILE=${3:?"Output file/path is required"} + +echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}" + +qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { + # if its a block device make sure we flush caches before exiting + lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && { + blockdev --flushbufs ${OUTPUT_FILE} + hdparm -F ${OUTPUT_FILE} + } + exit 0 +} diff --git a/scripts/storage/multipath/disconnectVolume.sh b/scripts/storage/multipath/disconnectVolume.sh new file mode 100644 index 00000000000..067e561f8a3 --- /dev/null +++ b/scripts/storage/multipath/disconnectVolume.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +######################################################################################### +# +# Given a WWID, cleanup/remove any multipath and devices associated with this WWID. This +# may not always have lasting result because if the storage array still has the volume +# visable to the host, it may be rediscovered. The cleanupStaleMaps.sh script should +# catch those cases +# +######################################################################################### + +WWID=${1:?"WWID required"} +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +echo "$(date): Removing ${WWID}" + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_REMOVE" "${WWID} cannot be disconnected from this host because multipathd is not currently running and cannot be started" + exit 1 +} + +# first get dm- name +DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}') +SLAVE_DEVS="" +if [ -z "${DM_NAME}" ]; then + logger -t CS_SCSI_VOL_REMOVE "${WWID} has no active multimap so no removal performed" + logger -t CS_SCSI_VOL_REMOVE "WARN: dm name could not be found for ${WWID}" + dmsetup remove /dev/mapper/*${WWID} + logger -t CS_SCSI_VOL_REMOVE "${WWID} removal via dmsetup remove /dev/mapper/${WWID} finished with return code $?" +else + # now look for slave devices and save for deletion + for dev in $(ls /sys/block/${DM_NAME}/slaves/ 2>/dev/null); do + SLAVE_DEVS="${SLAVE_DEVS} ${dev}" + done +fi + +# delete the path map last +multipath -f 3${WWID} + +# now delete slave devices +# https://bugzilla.redhat.com/show_bug.cgi?id=1949369 +if [ ! -z "${SLAVE_DEVS}" ]; then + for dev in ${SLAVE_DEVS}; do + multipathd del path /dev/${dev} + echo "1" > /sys/block/${dev}/device/delete + logger -t CS_SCSI_VOL_REMOVE "${WWID} removal of device ${dev} complete" + done +fi + +logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices" + +echo "$(date): ${WWID} removed" + +exit 0 diff --git a/scripts/storage/multipath/resizeVolume.sh b/scripts/storage/multipath/resizeVolume.sh new file mode 100644 index 00000000000..1b44a71b46a --- /dev/null +++ b/scripts/storage/multipath/resizeVolume.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +notifyqemu() { + if `virsh help 2>/dev/null | grep -q blockresize` + then + if `virsh domstate $VMNAME >/dev/null 2>&1` + then + sizeinkb=$(($NEWSIZE/1024)) + devicepath=$(virsh domblklist $VMNAME | grep ${WWID} | awk '{print $1}') + virsh blockresize --path $devicepath --size $sizeinkb ${VMNAME} >/dev/null 2>&1 + retval=$? + if [ -z $retval ] || [ $retval -ne 0 ] + then + log "failed to live resize $path to size of $sizeinkb kb" 1 + else + liveresize='true' + fi + fi + fi +} + +WWID=${1:?"WWID required"} +VMNAME=${2:?"VMName required"} +NEWSIZE=${3:?"New size required in bytes"} + +WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]') + +export WWID VMNAME NEWSIZE + +systemctl is-active multipathd || systemctl restart multipathd || { + echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume." + logger -t "CS_SCSI_VOL_RESIZE" "Unable to notify running VM of resize for ${WWID} because multipathd is not currently running and cannot be started" + exit 1 +} + +logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} STARTING" + +for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do + echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan; +done + +sleep 3 + +multipathd reconfigure + +sleep 3 + +multipath -ll 3${WWID} + +notifyqemu + +logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} COMPLETE" + +exit 0 diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index a3532a79af4..3cade046c74 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1294,7 +1294,7 @@ public class ApiDBUtils { type = HypervisorType.Hyperv; } } if (format == ImageFormat.RAW) { - // Currently, KVM only supports RBD and PowerFlex images of type RAW. + // Currently, KVM only supports RBD, PowerFlex, and FiberChannel images of type RAW. // This results in a weird collision with OVM volumes which // can only be raw, thus making KVM RBD volumes show up as OVM // rather than RBD. This block of code can (hopefully) by checking to @@ -1306,10 +1306,12 @@ public class ApiDBUtils { ListIterator itr = pools.listIterator(); while(itr.hasNext()) { StoragePoolVO pool = itr.next(); - if(pool.getPoolType() == StoragePoolType.RBD || - pool.getPoolType() == StoragePoolType.PowerFlex || - pool.getPoolType() == StoragePoolType.CLVM || - pool.getPoolType() == StoragePoolType.Linstor) { + + if(List.of(StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.CLVM, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(pool.getPoolType())) { // This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse, // If this check is not passed, the hypervisor type will remain OVM. type = HypervisorType.KVM; diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java index 45d4ed7f773..009d88a983b 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java @@ -92,7 +92,9 @@ public class ParamGenericValidationWorker implements DispatchWorker { break; } } - if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && !((String)actualParamName).equalsIgnoreCase("signatureversion")) { + if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && + !((String)actualParamName).equalsIgnoreCase("signatureversion") && + !((String)actualParamName).equalsIgnoreCase("projectid")) { errorMsg.append(" ").append(actualParamName); foundUnknownParam= true; } diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 91410198e2f..c885bce1afc 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -114,6 +114,9 @@ import com.cloud.org.Cluster; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.serializer.GsonHelper; +import com.cloud.server.StatsCollector.AbstractStatsCollector; +import com.cloud.server.StatsCollector.AutoScaleMonitor; +import com.cloud.server.StatsCollector.StorageCollector; import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -1620,7 +1623,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc for (StoragePoolVO pool : pools) { List volumes = _volsDao.findByPoolId(pool.getId(), null); for (VolumeVO volume : volumes) { - if (volume.getFormat() != ImageFormat.QCOW2 && volume.getFormat() != ImageFormat.VHD && volume.getFormat() != ImageFormat.OVA && (volume.getFormat() != ImageFormat.RAW || pool.getPoolType() != Storage.StoragePoolType.PowerFlex)) { + if (!List.of(ImageFormat.QCOW2, ImageFormat.VHD, ImageFormat.OVA, ImageFormat.RAW).contains(volume.getFormat()) && + !List.of(Storage.StoragePoolType.PowerFlex, Storage.StoragePoolType.FiberChannel).contains(pool.getPoolType())) { LOGGER.warn("Volume stats not implemented for this format type " + volume.getFormat()); break; } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 481c200c49d..4bc471c5084 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1052,36 +1052,56 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolTagsDao.persist(pool.getId(), storagePoolTags, cmd.isTagARule()); } + boolean changes = false; Long updatedCapacityBytes = null; Long capacityBytes = cmd.getCapacityBytes(); if (capacityBytes != null) { if (capacityBytes != pool.getCapacityBytes()) { updatedCapacityBytes = capacityBytes; + changes = true; } } Long updatedCapacityIops = null; Long capacityIops = cmd.getCapacityIops(); - if (capacityIops != null) { if (!capacityIops.equals(pool.getCapacityIops())) { updatedCapacityIops = capacityIops; + changes = true; } } - if (updatedCapacityBytes != null || updatedCapacityIops != null) { + // retrieve current details and merge/overlay input to capture changes + Map inputDetails = extractApiParamAsMap(cmd.getDetails()); + Map details = null; + if (inputDetails == null) { + details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + } else { + details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + details.putAll(inputDetails); + changes = true; + } + + if (changes) { StoragePoolVO storagePool = _storagePoolDao.findById(id); DataStoreProvider dataStoreProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName()); DataStoreLifeCycle dataStoreLifeCycle = dataStoreProvider.getDataStoreLifeCycle(); if (dataStoreLifeCycle instanceof PrimaryDataStoreLifeCycle) { - Map details = new HashMap(); - - details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null); - details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null); - - ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).updateStoragePool(storagePool, details); + if (updatedCapacityBytes != null) { + details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null); + _storagePoolDao.updateCapacityBytes(id, updatedCapacityBytes); + } + if (updatedCapacityIops != null) { + details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null); + _storagePoolDao.updateCapacityIops(id, updatedCapacityIops); + } + if (cmd.getUrl() != null) { + details.put("url", cmd.getUrl()); + } + _storagePoolDao.update(id, storagePool); + _storagePoolDao.updateDetails(id, details); } } @@ -1094,14 +1114,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - if (updatedCapacityBytes != null) { - _storagePoolDao.updateCapacityBytes(id, capacityBytes); - } - - if (updatedCapacityIops != null) { - _storagePoolDao.updateCapacityIops(id, capacityIops); - } - return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 69b5f984081..ba24ea3be54 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1253,7 +1253,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (storagePoolId != null) { StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - if (storagePoolVO.isManaged() && !storagePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex)) { + if (storagePoolVO.isManaged() && !List.of( + Storage.StoragePoolType.PowerFlex, + Storage.StoragePoolType.FiberChannel).contains(storagePoolVO.getPoolType())) { Long instanceId = volume.getInstanceId(); if (instanceId != null) { diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index e6650e9bb51..c798ed8dc0f 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1551,6 +1551,10 @@ "label.presetup": "PreSetup", "label.prev": "Prev", "label.previous": "Previous", +"label.primera.username.tooltip": "The username with edit privileges", +"label.primera.url.tooltip": "URL designating the Primera storage array endpoint, formatted as: http[s]://HOSTNAME:PORT?cpg=NAME&hostset=NAME[&skipTlsValidation=true][&snapCPG=NAME][&taskWaitTimeoutMs=#][&keyttl=#][&connectTimeoutMs=#] where values in [] are optional.", +"label.flashArray.username.tooltip": "The username with edit privileges", +"label.flashArray.url.tooltip": "URL designating the Flash Array endpoint, formatted as: http[s]://HOSTNAME:PORT?pod=NAME&hostgroup=NAME[&skipTlsValidation=true][&postCopyWaitMs=#][&keyttl=#][&connectTimeoutMs=#][&apiLoginVersion=#][&apiVersion=#] where values in [] are optional.", "label.primary": "Primary", "label.primary.storage": "Primary storage", "label.primary.storage.allocated": "Primary storage allocated", diff --git a/ui/src/views/infra/AddPrimaryStorage.vue b/ui/src/views/infra/AddPrimaryStorage.vue index 91c0dcbf42e..730a806307c 100644 --- a/ui/src/views/infra/AddPrimaryStorage.vue +++ b/ui/src/views/infra/AddPrimaryStorage.vue @@ -231,7 +231,7 @@ -
+