FiberChannel Multipath for KVM + Pure Flash Array and HPE-Primera Support (#7889)

This PR provides a new primary storage volume type called "FiberChannel" that allows access to volumes connected to hosts over fiber channel connections. It requires Multipath to provide path discovery and failover. Second, the PR adds an AdaptivePrimaryDatastoreProvider that abstracts how volumes are managed/orchestrated from the connector to communicate with the primary storage provider, using a ProviderAdapter interface, allowing the code interacting with the primary storage provider API's to be simpler and have no direct dependencies on Cloudstack code. Lastly, the PR provides an implementation of the ProviderAdapter classes for the HP Enterprise Primera line of storage solutions and the Pure Flash Array line of storage solutions.
This commit is contained in:
Rene Glover 2023-12-09 00:01:33 -06:00 committed by GitHub
parent 4e46f5ad17
commit 1031c31e6a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
103 changed files with 10465 additions and 153 deletions

View File

@ -77,13 +77,18 @@ public class Storage {
}
public static enum Capability {
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION");
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"),
ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS");
private final String capability;
private Capability(String capability) {
this.capability = capability;
}
public String toString() {
return this.capability;
}
}
public static enum ProvisioningType {
@ -150,7 +155,8 @@ public class Storage {
ManagedNFS(true, false, false),
Linstor(true, true, false),
DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters
StorPool(true, true, true);
StorPool(true, true, true),
FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-<wwnvalue>)
private final boolean shared;
private final boolean overprovisioning;

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.api.command.admin.storage;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.log4j.Logger;
@ -32,6 +33,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse;
import com.cloud.storage.StoragePool;
import com.cloud.user.Account;
@SuppressWarnings("rawtypes")
@APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class UpdateStoragePoolCmd extends BaseCmd {
@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd {
" enable it back.")
private Boolean enabled;
@Parameter(name = ApiConstants.DETAILS,
type = CommandType.MAP,
required = false,
description = "the details for the storage pool",
since = "4.19.0")
private Map details;
@Parameter(name = ApiConstants.URL,
type = CommandType.STRING,
required = false,
description = "the URL of the storage pool",
since = "4.19.0")
private String url;
@Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE)
private Boolean isTagARule;
@ -115,6 +131,22 @@ public class UpdateStoragePoolCmd extends BaseCmd {
return ApiCommandResourceType.StoragePool;
}
public Map<String,String> getDetails() {
return details;
}
public void setDetails(Map<String,String> details) {
this.details = details;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
@Override
public void execute() {
StoragePool result = _storageService.updateStoragePool(this);

View File

@ -111,6 +111,16 @@
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-primera</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-flasharray</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-server</artifactId>

View File

@ -2957,6 +2957,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
* <ul>
* <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here.
* <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception.
* <li> If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools
* </ul>
*/
protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) {
@ -2966,6 +2967,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (currentPool.getId() == targetPool.getId()) {
return;
}
Map<String, String> details = _storagePoolDao.getDetails(currentPool.getId());
if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) {
return;
}
throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].",
volume.getUuid(), currentPool.getUuid(), targetPool.getUuid()));
}

View File

@ -193,7 +193,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
destData.getType() == DataObjectType.TEMPLATE)) {
// volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools
// Delete cache in order to certainly transfer a latest image.
s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
", uuid: " + cacheUuid + ")");
cacheMgr.deleteCacheObject(srcForCopy);
} else {
@ -205,7 +205,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
", uuid: " + cacheUuid + ")");
cacheMgr.deleteCacheObject(srcForCopy);
} else {
s_logger.debug("Decrease reference count of " + cacheType +
if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType +
" cache(id: " + cacheId + ", uuid: " + cacheUuid + ")");
cacheMgr.releaseCacheObject(srcForCopy);
}
@ -213,7 +213,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("copy object failed: ", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e);
if (cacheData != null) {
cacheMgr.deleteCacheObject(cacheData);
}
@ -331,7 +331,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("Failed to send to storage pool", e);
if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e);
throw new CloudRuntimeException("Failed to send to storage pool", e);
}
}
@ -388,7 +388,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
if (answer == null || !answer.getResult()) {
if (answer != null) {
s_logger.debug("copy to image store failed: " + answer.getDetails());
if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails());
}
objOnImageStore.processEvent(Event.OperationFailed);
imageStore.delete(objOnImageStore);
@ -411,7 +411,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
if (answer == null || !answer.getResult()) {
if (answer != null) {
s_logger.debug("copy to primary store failed: " + answer.getDetails());
if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails());
}
objOnImageStore.processEvent(Event.OperationFailed);
imageStore.delete(objOnImageStore);
@ -471,13 +471,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
s_logger.error(errMsg);
answer = new Answer(command, false, errMsg);
} else {
if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep);
answer = ep.sendMessage(command);
if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer);
}
if (answer == null || !answer.getResult()) {
throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool);
} else {
// Update the volume details after migration.
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume");
VolumeVO volumeVo = volDao.findById(volume.getId());
Long oldPoolId = volume.getPoolId();
volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath());
@ -496,6 +500,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
volumeVo.setFolder(folder);
volDao.update(volume.getId(), volumeVo);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete");
}
return answer;
@ -507,7 +513,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
Answer answer = null;
String errMsg = null;
try {
s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
answer = copyVolumeFromSnapshot(srcData, destData);
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) {
@ -516,11 +522,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
answer = cloneVolume(srcData, destData);
} else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME &&
srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) {
if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources");
if (srcData.getId() == destData.getId()) {
// The volume has to be migrated across storage pools.
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING");
answer = migrateVolumeToPool(srcData, destData);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult());
} else {
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING");
answer = copyVolumeBetweenPools(srcData, destData);
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult());
}
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) {
answer = copySnapshot(srcData, destData);
@ -532,7 +543,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
errMsg = answer.getDetails();
}
} catch (Exception e) {
s_logger.debug("copy failed", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e);
errMsg = e.toString();
}
CopyCommandResult result = new CopyCommandResult(null, answer);
@ -627,7 +638,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
return answer;
} catch (Exception e) {
s_logger.debug("copy snasphot failed: ", e);
if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e);
if (cacheData != null) {
cacheMgr.deleteCacheObject(cacheData);
}

View File

@ -106,6 +106,7 @@ import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
@ -186,6 +187,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private EndPointSelector selector;
@Inject
VMTemplatePoolDao templatePoolDao;
@Inject
private VolumeDataFactory _volFactory;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@ -400,15 +403,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
} else if (!isVolumeOnManagedStorage(destVolumeInfo)) {
handleVolumeMigrationFromManagedStorageToNonManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
} else {
String errMsg = "The source volume to migrate and the destination volume are both on managed storage. " +
"Migration in this case is not yet supported.";
handleError(errMsg, callback);
handleVolumeMigrationFromManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
}
} else if (!isVolumeOnManagedStorage(destVolumeInfo)) {
String errMsg = "The 'StorageSystemDataMotionStrategy' does not support this migration use case.";
handleError(errMsg, callback);
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString());
handleError(errMsg, callback);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
} else {
handleVolumeMigrationFromNonManagedStorageToManagedStorage(srcVolumeInfo, destVolumeInfo, callback);
}
@ -453,7 +456,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
String volumePath = null;
try {
if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " +
"from managed storage to non-managed storage.");
}
@ -485,7 +488,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeCopyFromManagedStorageToSecondaryStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -512,12 +515,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
}
private void handleVolumeMigrationFromManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
String errMsg = String.format("Currently migrating volumes between managed storage providers is not supported on %s hypervisor", srcVolumeInfo.getHypervisorType().toString());
handleError(errMsg, callback);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
}
private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) {
String errMsg = null;
try {
if (!ImageFormat.QCOW2.equals(srcVolumeInfo.getFormat())) {
if (!HypervisorType.KVM.equals(srcVolumeInfo.getHypervisorType())) {
throw new CloudRuntimeException("Currently, only the KVM hypervisor type is supported for the migration of a volume " +
"from managed storage to non-managed storage.");
}
@ -525,10 +538,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HypervisorType hypervisorType = HypervisorType.KVM;
VirtualMachine vm = srcVolumeInfo.getAttachedVM();
if (vm != null && vm.getState() != VirtualMachine.State.Stopped) {
throw new CloudRuntimeException("Currently, if a volume to migrate from managed storage to non-managed storage is attached to " +
"a VM, the VM must be in the Stopped state.");
}
checkAvailableForMigration(vm);
long destStoragePoolId = destVolumeInfo.getPoolId();
StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(destStoragePoolId);
@ -553,7 +563,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromManagedStorageToNonManagedStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -579,9 +589,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) {
if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 &&
!(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) {
throw new CloudRuntimeException("Only the following image types are currently supported: " +
ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)");
!(imageFormat == ImageFormat.RAW && (StoragePoolType.PowerFlex == poolType ||
StoragePoolType.FiberChannel == poolType))) {
throw new CloudRuntimeException(String.format("Only the following image types are currently supported: %s, %s, %s, %s (for PowerFlex and FiberChannel)",
ImageFormat.VHD.toString(), ImageFormat.OVA.toString(), ImageFormat.QCOW2.toString(), ImageFormat.RAW.toString()));
}
}
@ -685,14 +696,14 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo);
}
else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo);
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
}
catch (Exception ex) {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
@ -826,24 +837,73 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
_volumeDao.update(srcVolumeInfo.getId(), volumeVO);
}
private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
VirtualMachine vm = srcVolumeInfo.getAttachedVM();
if (vm != null && vm.getState() != VirtualMachine.State.Stopped) {
throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " +
"a VM, the VM must be in the Stopped state.");
checkAvailableForMigration(vm);
String errMsg = null;
try {
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
updatePathFromScsiName(volumeVO);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
// migrate the volume via the hypervisor
String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
updateVolumePath(destVolumeInfo.getId(), path);
volumeVO = _volumeDao.findById(destVolumeInfo.getId());
// only set this if it was not set. default to QCOW2 for KVM
if (volumeVO.getFormat() == null) {
volumeVO.setFormat(ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
}
} catch (Exception ex) {
errMsg = "Primary storage migration failed due to an unexpected error: " +
ex.getMessage();
if (ex instanceof CloudRuntimeException) {
throw ex;
} else {
throw new CloudRuntimeException(errMsg, ex);
}
} finally {
CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg);
}
else {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
DataTO dataTO = destVolumeInfo.getTO();
copyCmdAnswer = new CopyCmdAnswer(dataTO);
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
private void checkAvailableForMigration(VirtualMachine vm) {
if (vm != null && (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Migrating)) {
throw new CloudRuntimeException("Currently, if a volume to migrate from non-managed storage to managed storage on KVM is attached to " +
"a VM, the VM must be in the Stopped or Migrating state.");
}
}
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setPath(volumeVO.get_iScsiName());
_volumeDao.update(volumeVO.getId(), volumeVO);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
/**
* Only update the path from the iscsiName if the iscsiName is set. Otherwise take no action to avoid nullifying the path
* with a previously set path value.
*/
private void updatePathFromScsiName(VolumeVO volumeVO) {
if (volumeVO.get_iScsiName() != null) {
volumeVO.setPath(volumeVO.get_iScsiName());
_volumeDao.update(volumeVO.getId(), volumeVO);
}
}
private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
long srcStoragePoolId = srcVolumeInfo.getPoolId();
StoragePoolVO srcStoragePoolVO = _storagePoolDao.findById(srcStoragePoolId);
@ -856,14 +916,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false);
}
// migrate the volume via the hypervisor
migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setFormat(ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
return hostVO;
}
/**
@ -1075,7 +1128,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (usingBackendSnapshot) {
@ -1293,7 +1346,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateManagedVolumeFromNonManagedSnapshot': " + ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
@ -1674,6 +1727,42 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return copyCmdAnswer;
}
/**
* Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot)
* @param volumeVO
* @param snapshotInfo
*/
public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
try {
volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
volumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(volumeVO);
VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
// save the "temp" volume info into the snapshot details (we need this to clean up at the end)
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true);
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true);
// NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO()
// whenever the TemporaryVolumeCopyPath is set.
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
} catch (Throwable e) {
// cleanup temporary volume
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
throw e;
}
}
/**
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
@ -1685,8 +1774,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
*/
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
prepTempVolumeForCopyFromSnapshot(snapshotInfo);
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
}
@ -1701,6 +1795,24 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* invocation of createVolumeFromSnapshot(SnapshotInfo).
*/
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
// cleanup any temporary volume previously created for copy from a snapshot
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
SnapshotDetailsVO tempUuid = null;
tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
if (tempUuid == null || tempUuid.getValue() == null) {
return;
}
volumeVO = _volumeDao.findByUuid(tempUuid.getValue());
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
_snapshotDetailsDao.remove(tempUuid.getId());
_snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
try {
@ -2363,7 +2475,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
try {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) {
if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) &&
!(ImageFormat.RAW.equals(volumeInfo.getFormat()) && (
StoragePoolType.PowerFlex == storagePoolVO.getPoolType() ||
StoragePoolType.FiberChannel == storagePoolVO.getPoolType()))) {
throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently.");
}
@ -2506,7 +2621,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
long snapshotId = snapshotInfo.getId();
if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) {
// if the snapshot required a temporary volume be created check if the UUID is set so we can
// retrieve the temporary volume's path to use during remote copy
List<SnapshotDetailsVO> storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath");
if (storedDetails != null && storedDetails.size() > 0) {
String value = storedDetails.get(0).getValue();
snapshotDetails.put(DiskTO.PATH, value);
} else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath());
} else {
snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN));
@ -2718,8 +2839,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, HostVO hostVO, String errMsg) {
boolean srcVolumeDetached = srcVolumeInfo.getAttachedVM() == null;
try {
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
@ -2727,16 +2846,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(),
srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value());
if (srcVolumeDetached) {
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
MigrateVolumeAnswer migrateVolumeAnswer = (MigrateVolumeAnswer)agentManager.send(hostVO.getId(), migrateVolumeCommand);
if (migrateVolumeAnswer == null || !migrateVolumeAnswer.getResult()) {
if (migrateVolumeAnswer != null && StringUtils.isNotEmpty(migrateVolumeAnswer.getDetails())) {
throw new CloudRuntimeException(migrateVolumeAnswer.getDetails());
@ -2745,42 +2859,22 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(errMsg);
}
}
if (srcVolumeDetached) {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
}
try {
_volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
catch (Exception e) {
// This volume should be deleted soon, so just log a warning here.
LOGGER.warn(e.getMessage(), e);
}
return migrateVolumeAnswer.getVolumePath();
}
catch (Exception ex) {
} catch (CloudRuntimeException ex) {
throw ex;
} catch (Exception ex) {
throw new CloudRuntimeException("Unexpected error during volume migration: " + ex.getMessage(), ex);
} finally {
try {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
}
catch (Exception e) {
// This volume should be deleted soon, so just log a warning here.
LOGGER.warn(e.getMessage(), e);
}
if (srcVolumeDetached) {
_volumeService.revokeAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
} catch (Throwable e) {
LOGGER.warn("During cleanup post-migration and exception occured: " + e);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Exception during post-migration cleanup.", e);
}
}
String msg = "Failed to perform volume migration : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
}
finally {
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
}
}

View File

@ -882,9 +882,7 @@ public class VolumeServiceImpl implements VolumeService {
*/
private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) {
// create a template volume on primary storage
AsyncCallFuture<VolumeApiResult> createTemplateFuture = new AsyncCallFuture<>();
TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo, srcTemplateInfo.getDeployAsIsConfiguration());
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration());
if (templatePoolRef == null) {
@ -897,7 +895,6 @@ public class VolumeServiceImpl implements VolumeService {
// At this point, we have an entry in the DB that points to our cached template.
// We need to lock it as there may be other VMs that may get started using the same template.
// We want to avoid having to create multiple cache copies of the same template.
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
long templatePoolRefId = templatePoolRef.getId();
@ -909,28 +906,27 @@ public class VolumeServiceImpl implements VolumeService {
try {
// create a cache volume on the back-end
templateOnPrimary.processEvent(Event.CreateOnlyRequested);
CreateAsyncCompleteCallback callback = new CreateAsyncCompleteCallback();
CreateVolumeContext<CreateCmdResult> createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> createCaller = AsyncCallbackDispatcher.create(this);
createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext);
destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller);
VolumeApiResult result = createTemplateFuture.get();
if (result.isFailed()) {
String errMesg = result.getResult();
destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, callback);
// validate we got a good result back
if (callback.result == null || callback.result.isFailed()) {
String errMesg;
if (callback.result == null) {
errMesg = "Unknown/unable to determine result";
} else {
errMesg = callback.result.getResult();
}
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg);
}
templateOnPrimary.processEvent(Event.OperationSuccessed);
} catch (Throwable e) {
s_logger.debug("Failed to create template volume on storage", e);
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
@ -939,6 +935,17 @@ public class VolumeServiceImpl implements VolumeService {
return templateOnPrimary;
}
private static class CreateAsyncCompleteCallback implements AsyncCompletionCallback<CreateCmdResult> {
public CreateCmdResult result;
@Override
public void complete(CreateCmdResult result) {
this.result = result;
}
}
/**
* This function copies a template from secondary storage to a template volume
* created on managed storage. This template volume will be used as a cache.
@ -1464,6 +1471,16 @@ public class VolumeServiceImpl implements VolumeService {
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost);
}
} catch (Exception e) {
if (templateOnPrimary != null) {
templateOnPrimary.processEvent(Event.OperationFailed);
}
VolumeApiResult result = new VolumeApiResult(volumeInfo);
result.setResult(e.getLocalizedMessage());
result.setSuccess(false);
future.complete(result);
s_logger.warn("Failed to create template on primary storage", e);
return future;
} finally {
if (lock != null) {
lock.unlock();

View File

@ -61,7 +61,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
@Override
public boolean isEnabled() {
if (!roleService.isEnabled()) {
LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker.");
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker.");
}
}
return roleService.isEnabled();
}
@ -119,7 +121,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Account userAccount = accountService.getAccount(user.getAccountId());
if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) {
LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName()));
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName()));
}
return true;
}

View File

@ -279,6 +279,10 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVo
Map<String, String> srcDetails = command.getSrcDetails();
String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath();
// its possible a volume has details but is not using IQN addressing...
if (srcPath == null) {
srcPath = srcVolumeObjectTO.getPath();
}
VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData();
PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore();

View File

@ -50,6 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.MultipathSCSIPool;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.StoragePoolType;
@ -84,6 +85,10 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
if (pool instanceof MultipathSCSIPool) {
return handleMultipathSCSIResize(command, pool);
}
if (spool.getType().equals(StoragePoolType.PowerFlex)) {
pool.connectPhysicalDisk(volumeId, null);
}
@ -225,4 +230,9 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
}
}
private Answer handleMultipathSCSIResize(ResizeVolumeCommand command, KVMStoragePool pool) {
((MultipathSCSIPool)pool).resize(command.getPath(), command.getInstanceName(), command.getNewSize());
return new ResizeVolumeAnswer(command, true, "");
}
}

View File

@ -0,0 +1,88 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel)
public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
public FiberChannelAdapter() {
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
}
@Override
public KVMStoragePool getStoragePool(String uuid) {
KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid);
if (pool == null) {
// return a dummy pool - this adapter doesn't care about connectivity information
pool = new MultipathSCSIPool(uuid, this);
MapStorageUuidToStoragePool.put(uuid, pool);
}
LOGGER.info("FiberChannelAdapter return storage pool [" + uuid + "]");
return pool;
}
public String getName() {
return "FiberChannelAdapter";
}
public boolean isStoragePoolTypeSupported(Storage.StoragePoolType type) {
if (Storage.StoragePoolType.FiberChannel.equals(type)) {
return true;
}
return false;
}
@Override
public AddressInfo parseAndValidatePath(String inPath) {
// type=FIBERWWN; address=<address>; connid=<connid>
String type = null;
String address = null;
String connectionId = null;
String path = null;
String[] parts = inPath.split(";");
// handle initial code of wwn only
if (parts.length == 1) {
type = "FIBERWWN";
address = parts[0];
} else {
for (String part: parts) {
String[] pair = part.split("=");
if (pair.length == 2) {
String key = pair[0].trim();
String value = pair[1].trim();
if (key.equals("type")) {
type = value.toUpperCase();
} else if (key.equals("address")) {
address = value;
} else if (key.equals("connid")) {
connectionId = value;
}
}
}
}
if ("FIBERWWN".equals(type)) {
path = "/dev/mapper/3" + address;
} else {
throw new CloudRuntimeException("Invalid address type provided for target disk: " + type);
}
return new AddressInfo(type, address, connectionId, path);
}
}

View File

@ -290,9 +290,12 @@ public class KVMStorageProcessor implements StorageProcessor {
final TemplateObjectTO newTemplate = new TemplateObjectTO();
newTemplate.setPath(primaryVol.getName());
newTemplate.setSize(primaryVol.getSize());
if (primaryPool.getType() == StoragePoolType.RBD ||
primaryPool.getType() == StoragePoolType.PowerFlex ||
primaryPool.getType() == StoragePoolType.Linstor) {
if(List.of(
StoragePoolType.RBD,
StoragePoolType.PowerFlex,
StoragePoolType.Linstor,
StoragePoolType.FiberChannel).contains(primaryPool.getType())) {
newTemplate.setFormat(ImageFormat.RAW);
} else {
newTemplate.setFormat(ImageFormat.QCOW2);
@ -584,7 +587,9 @@ public class KVMStorageProcessor implements StorageProcessor {
public Answer createTemplateFromVolume(final CopyCommand cmd) {
Map<String, String> details = cmd.getOptions();
if (details != null && details.get(DiskTO.IQN) != null) {
// handle cases where the managed storage driver had to make a temporary volume from
// the snapshot in order to support the copy
if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) {
// use the managed-storage approach
return createTemplateFromVolumeOrSnapshot(cmd);
}
@ -712,7 +717,7 @@ public class KVMStorageProcessor implements StorageProcessor {
public Answer createTemplateFromSnapshot(CopyCommand cmd) {
Map<String, String> details = cmd.getOptions();
if (details != null && details.get(DiskTO.IQN) != null) {
if (details != null && (details.get(DiskTO.IQN) != null || details.get(DiskTO.PATH) != null)) {
// use the managed-storage approach
return createTemplateFromVolumeOrSnapshot(cmd);
}
@ -750,12 +755,15 @@ public class KVMStorageProcessor implements StorageProcessor {
KVMStoragePool secondaryStorage = null;
try {
// look for options indicating an overridden path or IQN. Used when snapshots have to be
// temporarily copied on the manaaged storage device before the actual copy to target object
Map<String, String> details = cmd.getOptions();
String path = details != null ? details.get(DiskTO.IQN) : null;
String path = details != null ? details.get(DiskTO.PATH) : null;
if (path == null) {
new CloudRuntimeException("The 'path' field must be specified.");
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details);
@ -2188,7 +2196,16 @@ public class KVMStorageProcessor implements StorageProcessor {
Map<String, String> details = cmd.getOptions2();
String path = details != null ? details.get(DiskTO.IQN) : null;
String path = cmd.getDestTO().getPath();
if (path == null) {
path = details != null ? details.get(DiskTO.PATH) : null;
if (path == null) {
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
}
storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details);

View File

@ -0,0 +1,758 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import org.apache.commons.lang3.StringUtils;
import org.libvirt.LibvirtException;
import org.joda.time.Duration;
public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class);
static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
/**
* A lock to avoid any possiblity of multiple requests for a scan
*/
static byte[] CLEANUP_LOCK = new byte[0];
/**
* Property keys and defaults
*/
static final Property<Integer> CLEANUP_FREQUENCY_SECS = new Property<Integer>("multimap.cleanup.frequency.secs", 60);
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 4);
static final Property<Boolean> CLEANUP_ENABLED = new Property<Boolean>("multimap.cleanup.enabled", true);
static final Property<String> CLEANUP_SCRIPT = new Property<String>("multimap.cleanup.script", "cleanStaleMaps.sh");
static final Property<String> CONNECT_SCRIPT = new Property<String>("multimap.connect.script", "connectVolume.sh");
static final Property<String> COPY_SCRIPT = new Property<String>("multimap.copy.script", "copyVolume.sh");
static final Property<String> DISCONNECT_SCRIPT = new Property<String>("multimap.disconnect.script", "disconnectVolume.sh");
static final Property<String> RESIZE_SCRIPT = new Property<String>("multimap.resize.script", "resizeVolume.sh");
static final Property<Integer> DISK_WAIT_SECS = new Property<Integer>("multimap.disk.wait.secs", 240);
static final Property<String> STORAGE_SCRIPTS_DIR = new Property<String>("multimap.storage.scripts.dir", "scripts/storage/multipath");
static Timer cleanupTimer = new Timer();
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
private static String copyScript = COPY_SCRIPT.getFinalValue();
private static int diskWaitTimeSecs = DISK_WAIT_SECS.getFinalValue();
/**
* Initialize static program-wide configurations and background jobs
*/
static {
long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000;
boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue();
connectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), connectScript);
if (connectScript == null) {
throw new Error("Unable to find the connectVolume.sh script");
}
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
if (disconnectScript == null) {
throw new Error("Unable to find the disconnectVolume.sh script");
}
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (resizeScript == null) {
throw new Error("Unable to find the resizeVolume.sh script");
}
copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript);
if (copyScript == null) {
throw new Error("Unable to find the copyVolume.sh script");
}
if (cleanupEnabled) {
cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript);
if (cleanupScript == null) {
throw new Error("Unable to find the cleanStaleMaps.sh script and " + CLEANUP_ENABLED.getName() + " is true");
}
TimerTask task = new TimerTask() {
@Override
public void run() {
try {
MultipathSCSIAdapterBase.cleanupStaleMaps();
} catch (Throwable e) {
LOGGER.warn("Error running stale multipath map cleanup", e);
}
}
};
cleanupTimer = new Timer("MultipathMapCleanupJob");
cleanupTimer.scheduleAtFixedRate(task, 0, cleanupFrequency);
}
}
@Override
public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
return getStoragePool(uuid);
}
public abstract String getName();
public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type);
/**
* We expect WWN values in the volumePath so need to convert it to an actual physical path
*/
public abstract AddressInfo parseAndValidatePath(String path);
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("getPhysicalDisk(volumePath,pool) called with args (%s,%s)", volumePath, pool));
if (StringUtils.isEmpty(volumePath) || pool == null) {
LOGGER.error("Unable to get physical disk, volume path or pool not specified");
return null;
}
AddressInfo address = parseAndValidatePath(volumePath);
return getPhysicalDisk(address, pool);
}
private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool) {
LOGGER.debug(String.format("getPhysicalDisk(addressInfo,pool) called with args (%s,%s)", address.getPath(), pool));
KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool);
disk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
long diskSize = getPhysicalDiskSize(address.getPath());
disk.setSize(diskSize);
disk.setVirtualSize(diskSize);
LOGGER.debug("Physical disk " + disk.getPath() + " with format " + disk.getFormat() + " and size " + disk.getSize() + " provided");
return disk;
}
@Override
public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map<String, String> details) {
LOGGER.info(String.format("createStoragePool(uuid,host,port,path,type) called with args (%s, %s, %s, %s, %s)", uuid, host, ""+port, path, type));
MultipathSCSIPool storagePool = new MultipathSCSIPool(uuid, host, port, path, type, details, this);
MapStorageUuidToStoragePool.put(uuid, storagePool);
return storagePool;
}
@Override
public boolean deleteStoragePool(String uuid) {
return MapStorageUuidToStoragePool.remove(uuid) != null;
}
@Override
public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String, String> details) {
LOGGER.info("connectPhysicalDisk called for [" + volumePath + "]");
if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined");
}
if (pool == null) {
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set");
}
AddressInfo address = this.parseAndValidatePath(volumePath);
int waitTimeInSec = diskWaitTimeSecs;
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
if (StringUtils.isNotEmpty(waitTime)) {
waitTimeInSec = Integer.valueOf(waitTime).intValue();
}
}
return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec);
}
@Override
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
AddressInfo address = this.parseAndValidatePath(volumePath);
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true;
}
@Override
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
return false;
}
@Override
public boolean disconnectPhysicalDiskByPath(String localPath) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath));
ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", ""));
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true;
}
@Override
public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) {
LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString()));
return true;
}
@Override
public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) {
LOGGER.info(String.format("createTemplateFromDisk(disk,name,format,size,destPool) called with args (%s, %s, %s, %s, %s) [not implemented]", disk.getPath(), name, format.toString(), ""+size, destPool.getUuid()));
return null;
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) {
LOGGER.info(String.format("listPhysicalDisks(uuid,pool) called with args (%s, %s) [not implemented]", storagePoolUuid, pool.getUuid()));
return null;
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) {
return copyPhysicalDisk(disk, name, destPool, timeout, null, null, null);
}
@Override
public boolean refresh(KVMStoragePool pool) {
LOGGER.info(String.format("refresh(pool) called with args (%s)", pool.getUuid()));
return true;
}
@Override
public boolean deleteStoragePool(KVMStoragePool pool) {
LOGGER.info(String.format("deleteStroagePool(pool) called with args (%s)", pool.getUuid()));
return deleteStoragePool(pool.getUuid());
}
@Override
public boolean createFolder(String uuid, String path) {
LOGGER.info(String.format("createFolder(uuid,path) called with args (%s, %s) [not implemented]", uuid, path));
return createFolder(uuid, path, null);
}
@Override
public boolean createFolder(String uuid, String path, String localPath) {
LOGGER.info(String.format("createFolder(uuid,path,localPath) called with args (%s, %s, %s) [not implemented]", uuid, path, localPath));
return true;
}
/**
* Validate inputs and return the source file for a template copy
* @param templateFilePath
* @param destTemplatePath
* @param destPool
* @param format
* @return
*/
File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) {
if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) {
LOGGER.error("Unable to create template from direct download template file due to insufficient data");
throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data");
}
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
File sourceFile = new File(templateFilePath);
if (!sourceFile.exists()) {
throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host");
}
if (destTemplatePath == null || destTemplatePath.isEmpty()) {
LOGGER.error("Failed to create template, target template disk path not provided");
throw new CloudRuntimeException("Target template disk path not provided");
}
if (this.isStoragePoolTypeSupported(destPool.getType())) {
throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString());
}
if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) {
LOGGER.error("Failed to create template, unsupported template format: " + format.toString());
throw new CloudRuntimeException("Unsupported template format: " + format.toString());
}
return sourceFile;
}
String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) {
String srcTemplateFilePath = templateFilePath;
if (isTemplateExtractable(templateFilePath)) {
srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString();
LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath);
String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath);
Script.runSimpleBashScript(extractCommand);
Script.runSimpleBashScript("rm -f " + templateFilePath);
}
return srcTemplateFilePath;
}
QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) {
if (format == Storage.ImageFormat.RAW) {
return QemuImg.PhysicalDiskFormat.RAW;
} else if (format == Storage.ImageFormat.QCOW2) {
return QemuImg.PhysicalDiskFormat.QCOW2;
} else {
return QemuImg.PhysicalDiskFormat.RAW;
}
}
@Override
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format);
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath());
return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN);
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout,
byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) {
validateForDiskCopy(disk, name, destPool);
LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat());
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name);
if (destDisk == null) {
LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid());
throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid());
}
if (srcPassphrase != null || dstPassphrase != null) {
throw new CloudRuntimeException("Storage provider does not support user-space encrypted source or destination volumes");
}
destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
destDisk.setVirtualSize(disk.getVirtualSize());
destDisk.setSize(disk.getSize());
LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat());
QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath());
ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName());
int rc = result.getExitCode();
if (rc != 0) {
throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult());
}
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult());
return destDisk;
}
void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
LOGGER.error("Unable to copy physical disk due to insufficient data");
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
}
}
/**
* Copy a disk path to another disk path using QemuImg command
* @param disk
* @param destDisk
* @param name
* @param timeout
*/
void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) {
QemuImg qemu;
try {
qemu = new QemuImg(timeout);
} catch (LibvirtException | QemuImgException e) {
throw new CloudRuntimeException (e);
}
QemuImgFile srcFile = null;
QemuImgFile destFile = null;
try {
srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
qemu.convert(srcFile, destFile, true);
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
} catch (QemuImgException | LibvirtException e) {
try {
Map<String, String> srcInfo = qemu.info(srcFile);
LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo));
} catch (Exception ignored) {
LOGGER.warn("Unable to get info from source disk: " + disk.getName());
}
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
}
@Override
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size,
KVMStoragePool destPool, int timeout, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplate'");
}
@Override
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, long size,
KVMStoragePool destPool, int timeout, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createDiskFromTemplateBacking'");
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) {
throw new UnsupportedOperationException("Unimplemented method 'createPhysicalDisk'");
}
boolean isTemplateExtractable(String templatePath) {
ScriptResult result = runScript("file", 5000L, templatePath, "| awk -F' ' '{print $2}'");
String type = result.getResult();
return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip");
}
String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) {
if (downloadedTemplateFile.endsWith(".zip")) {
return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile;
} else if (downloadedTemplateFile.endsWith(".bz2")) {
return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile;
} else if (downloadedTemplateFile.endsWith(".gz")) {
return "gunzip -c " + downloadedTemplateFile + " > " + templateFile;
} else {
throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile);
}
}
private static final ScriptResult runScript(String script, long timeout, String...args) {
ScriptResult result = new ScriptResult();
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
cmd.add(args);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
if (output != null && output.contains("Unable to execute the command")) {
result.setResult(output);
result.setExitCode(-1);
return result;
}
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
}
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
long maxTries = 10; // how many max retries to attempt the script
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
int timeBetweenTries = 1000; // how long to sleep between tries
// wait at least 60 seconds even if input was lower
if (waitTimeInSec < 60) {
waitTimeInSec = 60;
}
KVMPhysicalDisk physicalDisk = null;
// Rescan before checking for the physical disk
int tries = 0;
while (waitTimeInMillis > 0 && tries < maxTries) {
tries++;
long start = System.currentTimeMillis();
String lun;
if (address.getConnectionId() == null) {
lun = "-";
} else {
lun = address.getConnectionId();
}
Process p = null;
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
p = builder.start();
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
int rc = p.exitValue();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
physicalDisk = getPhysicalDisk(address, pool);
if (physicalDisk != null && physicalDisk.getSize() > 0) {
LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return true;
}
break;
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} else {
LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries);
}
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e);
} finally {
if (p != null && p.isAlive()) {
p.destroyForcibly();
}
}
long elapsed = System.currentTimeMillis() - start;
waitTimeInMillis = waitTimeInMillis - elapsed;
try {
Thread.sleep(timeBetweenTries);
} catch (Exception ex) {
// don't do anything
}
}
LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return false;
}
void runConnectScript(String lun, AddressInfo address) {
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
Process p = builder.start();
int rc = p.waitFor();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} catch (IOException | InterruptedException e) {
throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e);
}
}
void sleep(long sleepTimeMs) {
try {
Thread.sleep(sleepTimeMs);
} catch (Exception ex) {
// don't do anything
}
}
long getPhysicalDiskSize(String diskPath) {
if (StringUtils.isEmpty(diskPath)) {
return 0;
}
Script diskCmd = new Script("blockdev", LOGGER);
diskCmd.add("--getsize64", diskPath);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String result = diskCmd.execute(parser);
if (result != null) {
LOGGER.debug("Unable to get the disk size at path: " + diskPath);
return 0;
}
Long size = Long.parseLong(parser.getLine());
if (size <= 0) {
// its possible the path can't be seen on the host yet, lets rescan
// now rerun the command
parser = new OutputInterpreter.OneLineParser();
result = diskCmd.execute(parser);
if (result != null) {
LOGGER.debug("Unable to get the disk size at path: " + diskPath);
return 0;
}
size = Long.parseLong(parser.getLine());
}
return size;
}
public void resize(String path, String vmName, long newSize) {
if (LOGGER.isDebugEnabled()) LOGGER.debug("Executing resize of " + path + " to " + newSize + " bytes for VM " + vmName);
// extract wwid
AddressInfo address = parseAndValidatePath(path);
if (address == null || address.getAddress() == null) {
LOGGER.error("Unable to resize volume, address value is not valid");
throw new CloudRuntimeException("Unable to resize volume, address value is not valid");
}
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("Running %s %s %s %s", resizeScript, address.getAddress(), vmName, newSize));
// call resizeVolume.sh <wwid>
ScriptResult result = runScript(resizeScript, 60000L, address.getAddress(), vmName, ""+newSize);
if (result.getExitCode() != 0) {
throw new CloudRuntimeException("Failed to resize volume at address " + address.getAddress() + " to " + newSize + " bytes for VM " + vmName + ": " + result.getResult());
}
LOGGER.info("Resize of volume at address " + address.getAddress() + " completed successfully: " + result.getResult());
}
static void cleanupStaleMaps() {
synchronized(CLEANUP_LOCK) {
long start = System.currentTimeMillis();
ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000);
LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null);
}
}
public static final class AddressInfo {
String type;
String address;
String connectionId;
String path;
public AddressInfo(String type, String address, String connectionId, String path) {
this.type = type;
this.address = address;
this.connectionId = connectionId;
this.path = path;
}
public String getType() {
return type;
}
public String getAddress() {
return address;
}
public String getConnectionId() {
return connectionId;
}
public String getPath() {
return path;
}
public String toString() {
return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId());
}
}
public static class Property <T> {
private String name;
private T defaultValue;
Property(String name, T value) {
this.name = name;
this.defaultValue = value;
}
public String getName() {
return this.name;
}
public T getDefaultValue() {
return this.defaultValue;
}
public T getFinalValue() {
File agentPropertiesFile = PropertiesUtil.findConfigFile("agent.properties");
if (agentPropertiesFile == null) {
LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", "agent.properties", name, defaultValue));
return defaultValue;
} else {
try {
String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name);
if (StringUtils.isBlank(configValue)) {
LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue));
return defaultValue;
} else {
if (defaultValue instanceof Integer) {
return (T)Integer.getInteger(configValue);
} else if (defaultValue instanceof Long) {
return (T)Long.getLong(configValue);
} else if (defaultValue instanceof String) {
return (T)configValue;
} else if (defaultValue instanceof Boolean) {
return (T)Boolean.valueOf(configValue);
} else {
return null;
}
}
} catch (IOException var5) {
LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), var5);
return defaultValue;
}
}
}
}
public static class ScriptResult {
private int exitCode = -1;
private String result = null;
public int getExitCode() {
return exitCode;
}
public void setExitCode(int exitCode) {
this.exitCode = exitCode;
}
public String getResult() {
return result;
}
public void setResult(String result) {
this.result = result;
}
}
}

View File

@ -0,0 +1,241 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.joda.time.Duration;
import com.cloud.agent.api.to.HostTO;
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ProvisioningType;
public class MultipathSCSIPool implements KVMStoragePool {
private String uuid;
private String sourceHost;
private int sourcePort;
private String sourceDir;
private Storage.StoragePoolType storagePoolType;
private StorageAdaptor storageAdaptor;
private long capacity;
private long used;
private long available;
private Map<String, String> details;
public MultipathSCSIPool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map<String, String> poolDetails, StorageAdaptor adaptor) {
this.uuid = uuid;
sourceHost = host;
sourcePort = port;
sourceDir = path;
storagePoolType = poolType;
storageAdaptor = adaptor;
capacity = 0;
used = 0;
available = 0;
details = poolDetails;
}
public MultipathSCSIPool(String uuid, StorageAdaptor adapter) {
this.uuid = uuid;
sourceHost = null;
sourcePort = -1;
sourceDir = null;
storagePoolType = Storage.StoragePoolType.FiberChannel;
details = new HashMap<String,String>();
this.storageAdaptor = adapter;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String arg0, ProvisioningType arg1, long arg2, byte[] arg3) {
return null;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String arg0, PhysicalDiskFormat arg1, ProvisioningType arg2, long arg3,
byte[] arg4) {
return null;
}
@Override
public boolean connectPhysicalDisk(String volumeUuid, Map<String, String> details) {
return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details);
}
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumeId) {
return storageAdaptor.getPhysicalDisk(volumeId, this);
}
@Override
public boolean disconnectPhysicalDisk(String volumeUuid) {
return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
}
@Override
public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) {
return true;
}
@Override
public List<KVMPhysicalDisk> listPhysicalDisks() {
return null;
}
@Override
public String getUuid() {
return uuid;
}
public void setCapacity(long capacity) {
this.capacity = capacity;
}
@Override
public long getCapacity() {
return this.capacity;
}
public void setUsed(long used) {
this.used = used;
}
@Override
public long getUsed() {
return this.used;
}
public void setAvailable(long available) {
this.available = available;
}
@Override
public long getAvailable() {
return this.available;
}
@Override
public boolean refresh() {
return false;
}
@Override
public boolean isExternalSnapshot() {
return true;
}
@Override
public String getLocalPath() {
return null;
}
@Override
public String getSourceHost() {
return this.sourceHost;
}
@Override
public String getSourceDir() {
return this.sourceDir;
}
@Override
public int getSourcePort() {
return this.sourcePort;
}
@Override
public String getAuthUserName() {
return null;
}
@Override
public String getAuthSecret() {
return null;
}
@Override
public Storage.StoragePoolType getType() {
return storagePoolType;
}
@Override
public boolean delete() {
return false;
}
@Override
public QemuImg.PhysicalDiskFormat getDefaultFormat() {
return QemuImg.PhysicalDiskFormat.RAW;
}
@Override
public boolean createFolder(String path) {
return false;
}
@Override
public boolean supportsConfigDriveIso() {
return false;
}
@Override
public Map<String, String> getDetails() {
return this.details;
}
@Override
public boolean isPoolSupportHA() {
return false;
}
@Override
public String getHearthBeatPath() {
return null;
}
@Override
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
boolean hostValidation) {
return null;
}
@Override
public String getStorageNodeId() {
return null;
}
@Override
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
return null;
}
@Override
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout,
String volumeUUIDListString, String vmActivityCheckPath, long duration) {
return null;
}
public void resize(String path, String vmName, long newSize) {
((MultipathSCSIAdapterBase)storageAdaptor).resize(path, vmName, newSize);
}
}

View File

@ -133,6 +133,9 @@
<module>storage/volume/scaleio</module>
<module>storage/volume/linstor</module>
<module>storage/volume/storpool</module>
<module>storage/volume/adaptive</module>
<module>storage/volume/flasharray</module>
<module>storage/volume/primera</module>
<module>storage/object/minio</module>
<module>storage/object/simulator</module>

View File

@ -0,0 +1,58 @@
# CloudStack Volume Provider Adaptive Plugin Base
The Adaptive Plugin Base is an abstract volume storage provider that
provides a generic implementation for managing volumes that are exposed
to hosts through FiberChannel and similar methods but managed independently
through a storage API or interface. The ProviderAdapter, and associated
classes, provide a decoupled interface from the rest of
Cloudstack that covers the exact actions needed
to interface with a storage provider. Each storage provider can extend
and implement the ProviderAdapter without needing to understand the internal
logic of volume management, database structure, etc.
## Implement the Provider Interface
To implement a provider, create another module -- or a standalone project --
and implement the following interfaces from the **org.apache.cloudstack.storage.datastore.adapter** package:
1. **ProviderAdapter** - this is the primary interface used to communicate with the storage provider when volume management actions are required.
2. **ProviderAdapterFactory** - the implementation of this class creates the correct ProviderAdapter when needed.
Follow Javadoc for each class on further instructions for implementing each function.
## Implement the Primary Datastore Provider Plugin
Once the provider interface is implemented, you will need to extend the **org.apache.cloudstack.storage.datastore.provider.AdaptiveProviderDatastoreProviderImpl** class. When extending it, you simply need to implement a default
constructor that creates an instance of the ProviderAdapterFactory implementation created in #2 above. Once created, you need to call the parent constructor and pass the factory object.
## Provide the Configuration for the Provider Plugin
Lastly, you need to include a module file and Spring configuration for your Primary Datastore Provider Plugin class so Cloudstack will load it during startup.
### Module Properties
This provides the hint to Cloudstack to load this as a module during startup.
```
#resources/META-INF/cloudstack/storage-volume-<providername>/module.properties
name=storage-volume-<providername>
parent=storage
```
### Spring Bean Context Configuration
This provides instructions of which provider implementation class to load when the Spring bean initilization is running.
```
<!-- resources/META-INF/cloudstack/storage-volume-<providername>/spring-storage-volume-<providername>-context.xml -->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="<providername>DataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.<providername>PrimaryDatastoreProviderImpl">
</bean>
</beans>
```
## Build and Deploy the Jar
Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading
all configured modules.

View File

@ -0,0 +1,62 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<name>Apache CloudStack Plugin - Storage Volume Adaptive Base Provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-snapshot</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,157 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map;
/**
* A simple DataStore adaptive interface. This interface allows the ManagedVolumeDataStoreDriverImpl
* to interact with the external provider without the provider needing to interface with any CloudStack
* objects, factories or database tables, simplifying the implementation and maintenance of the provider
* interface.
*/
public interface ProviderAdapter {
// some common keys across providers. Provider code determines what to do with it
public static final String API_USERNAME_KEY = "api_username";
public static final String API_PASSWORD_KEY = "api_password";
public static final String API_TOKEN_KEY = "api_token";
public static final String API_PRIVATE_KEY = "api_privatekey";
public static final String API_URL_KEY = "api_url";
public static final String API_SKIP_TLS_VALIDATION_KEY = "api_skiptlsvalidation";
// one of: basicauth (default), apitoken, privatekey
public static final String API_AUTHENTICATION_TYPE_KEY = "api_authn_type";
/**
* Refresh the connector with the provided details
* @param details
*/
public void refresh(Map<String,String> details);
/**
* Return if currently connected/configured properly, otherwise throws a RuntimeException
* with information about what is misconfigured
* @return
*/
public void validate();
/**
* Forcefully remove/disconnect
*/
public void disconnect();
/**
* Create a new volume on the storage provider
* @param context
* @param volume
* @param diskOffering
* @param sizeInBytes
* @return
*/
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject volume, ProviderAdapterDiskOffering diskOffering, long sizeInBytes);
/**
* Attach the volume to the target object for the provided context. Returns the scope-specific connection value (for example, the LUN)
* @param context
* @param request
* @return
*/
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Detach the host from the storage context
* @param context
* @param request
*/
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Delete the provided volume/object
* @param context
* @param request
*/
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Copy a source object to a destination volume. The source object can be a Volume, Snapshot, or Template
*/
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume);
/**
* Make a device-specific snapshot of the provided volume
*/
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetSnapshot);
/**
* Revert the snapshot to its base volume. Replaces the base volume with the snapshot point on the storage array
* @param context
* @param request
* @return
*/
public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Resize a volume
* @param context
* @param request
* @param totalNewSizeInBytes
*/
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes);
/**
* Return the managed volume info from storage system.
* @param context
* @param request
* @return ProviderVolume object or null if the object was not found but no errors were encountered.
*/
public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Return the managed snapshot info from storage system
* @param context
* @param request
* @return ProviderSnapshot object or null if the object was not found but no errors were encountered.
*/
public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Given an array-specific address, find the matching volume information from the array
* @param addressType
* @param address
* @return
*/
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, ProviderVolume.AddressType addressType, String address);
/**
* Returns stats about the managed storage where the volumes and snapshots are created/managed
* @return
*/
public ProviderVolumeStorageStats getManagedStorageStats();
/**
* Returns stats about a specific volume
* @return
*/
public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request);
/**
* Returns true if the given hostname is accessible to the storage provider.
* @param context
* @param request
* @return
*/
public boolean canAccessHost(ProviderAdapterContext context, String hostname);
}

View File

@ -0,0 +1,22 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderAdapterConstants {
public static final String EXTERNAL_UUID = "external_uuid";
public static final String EXTERNAL_NAME = "external_name";
}

View File

@ -0,0 +1,83 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderAdapterContext {
private String domainUuid;
private String domainName;
private Long domainId;
private String zoneUuid;
private String zoneName;
private Long zoneId;
private String accountUuid;
private String accountName;
private Long accountId;
public String getDomainUuid() {
return domainUuid;
}
public void setDomainUuid(String domainUuid) {
this.domainUuid = domainUuid;
}
public String getDomainName() {
return domainName;
}
public void setDomainName(String domainName) {
this.domainName = domainName;
}
public Long getDomainId() {
return domainId;
}
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
public String getZoneUuid() {
return zoneUuid;
}
public void setZoneUuid(String zoneUuid) {
this.zoneUuid = zoneUuid;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
public String getAccountUuid() {
return accountUuid;
}
public void setAccountUuid(String accountUuid) {
this.accountUuid = accountUuid;
}
public String getAccountName() {
return accountName;
}
public void setAccountName(String accountName) {
this.accountName = accountName;
}
public Long getAccountId() {
return accountId;
}
public void setAccountId(Long accountId) {
this.accountId = accountId;
}
}

View File

@ -0,0 +1,159 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
/**
* Represents a translation object for transmitting meta-data about a volume,
* snapshot or template between cloudstack and the storage provider
*/
public class ProviderAdapterDataObject {
public enum Type {
VOLUME(),
SNAPSHOT(),
TEMPLATE(),
ARCHIVE()
}
/**
* The cloudstack UUID of the object
*/
private String uuid;
/**
* The cloudstack name of the object (generated or user provided)
*/
private String name;
/**
* The type of the object
*/
private Type type;
/**
* The internal local ID of the object (not globally unique)
*/
private Long id;
/**
* The external name assigned on the storage array. it may be dynamiically
* generated or derived from cloudstack data
*/
private String externalName;
/**
* The external UUID of the object on the storage array. This may be different
* or the same as the cloudstack UUID depending on implementation.
*/
private String externalUuid;
/**
* The internal (non-global) ID of the datastore this object is defined in
*/
private Long dataStoreId;
/**
* The global ID of the datastore this object is defined in
*/
private String dataStoreUuid;
/**
* The name of the data store this object is defined in
*/
private String dataStoreName;
/**
* Represents the device connection id, typically a LUN, used to find the volume in conjunction with Address and AddressType.
*/
private String externalConnectionId;
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public String getExternalName() {
return externalName;
}
public void setExternalName(String externalName) {
this.externalName = externalName;
}
public String getExternalUuid() {
return externalUuid;
}
public void setExternalUuid(String externalUuid) {
this.externalUuid = externalUuid;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getDataStoreId() {
return dataStoreId;
}
public void setDataStoreId(Long dataStoreId) {
this.dataStoreId = dataStoreId;
}
public String getDataStoreUuid() {
return dataStoreUuid;
}
public void setDataStoreUuid(String dataStoreUuid) {
this.dataStoreUuid = dataStoreUuid;
}
public String getDataStoreName() {
return dataStoreName;
}
public void setDataStoreName(String dataStoreName) {
this.dataStoreName = dataStoreName;
}
public String getExternalConnectionId() {
return externalConnectionId;
}
public void setExternalConnectionId(String externalConnectionId) {
this.externalConnectionId = externalConnectionId;
}
}

View File

@ -0,0 +1,194 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Date;
import org.apache.commons.lang.NotImplementedException;
import com.cloud.offering.DiskOffering;
/**
* Wrapper Disk Offering that masks the cloudstack-dependent classes from the storage provider code
*/
public class ProviderAdapterDiskOffering {
private ProvisioningType type;
private DiskCacheMode diskCacheMode;
private DiskOffering hiddenDiskOffering;
private State state;
public ProviderAdapterDiskOffering(DiskOffering hiddenDiskOffering) {
this.hiddenDiskOffering = hiddenDiskOffering;
if (hiddenDiskOffering.getProvisioningType() != null) {
this.type = ProvisioningType.getProvisioningType(hiddenDiskOffering.getProvisioningType().toString());
}
if (hiddenDiskOffering.getCacheMode() != null) {
this.diskCacheMode = DiskCacheMode.getDiskCasehMode(hiddenDiskOffering.getCacheMode().toString());
}
if (hiddenDiskOffering.getState() != null) {
this.state = State.valueOf(hiddenDiskOffering.getState().toString());
}
}
public Long getBytesReadRate() {
return hiddenDiskOffering.getBytesReadRate();
}
public Long getBytesReadRateMax() {
return hiddenDiskOffering.getBytesReadRateMax();
}
public Long getBytesReadRateMaxLength() {
return hiddenDiskOffering.getBytesReadRateMaxLength();
}
public Long getBytesWriteRate() {
return hiddenDiskOffering.getBytesWriteRate();
}
public Long getBytesWriteRateMax() {
return hiddenDiskOffering.getBytesWriteRateMax();
}
public Long getBytesWriteRateMaxLength() {
return hiddenDiskOffering.getBytesWriteRateMaxLength();
}
public DiskCacheMode getCacheMode() {
return diskCacheMode;
}
public Date getCreated() {
return hiddenDiskOffering.getCreated();
}
public long getDiskSize() {
return hiddenDiskOffering.getDiskSize();
}
public boolean getDiskSizeStrictness() {
return hiddenDiskOffering.getDiskSizeStrictness();
}
public String getDisplayText() {
return hiddenDiskOffering.getDisplayText();
}
public boolean getEncrypt() {
return hiddenDiskOffering.getEncrypt();
}
public Integer getHypervisorSnapshotReserve() {
return hiddenDiskOffering.getHypervisorSnapshotReserve();
}
public long getId() {
return hiddenDiskOffering.getId();
}
public Long getIopsReadRate() {
return hiddenDiskOffering.getIopsReadRate();
}
public Long getIopsReadRateMax() {
return hiddenDiskOffering.getIopsReadRateMax();
}
public Long getIopsReadRateMaxLength() {
return hiddenDiskOffering.getIopsReadRateMaxLength();
}
public Long getIopsWriteRate() {
return hiddenDiskOffering.getIopsWriteRate();
}
public Long getIopsWriteRateMax() {
return hiddenDiskOffering.getIopsWriteRateMax();
}
public Long getIopsWriteRateMaxLength() {
return hiddenDiskOffering.getIopsWriteRateMaxLength();
}
public Long getMaxIops() {
return hiddenDiskOffering.getMaxIops();
}
public Long getMinIops() {
return hiddenDiskOffering.getMinIops();
}
public String getName() {
return hiddenDiskOffering.getName();
}
public State getState() {
return state;
}
public String getTags() {
return hiddenDiskOffering.getTags();
}
public String[] getTagsArray() {
return hiddenDiskOffering.getTagsArray();
}
public String getUniqueName() {
return hiddenDiskOffering.getUniqueName();
}
public String getUuid() {
return hiddenDiskOffering.getUuid();
}
public ProvisioningType getType() {
return type;
}
public void setType(ProvisioningType type) {
this.type = type;
}
public static enum ProvisioningType {
THIN("thin"),
SPARSE("sparse"),
FAT("fat");
private final String provisionType;
private ProvisioningType(String provisionType){
this.provisionType = provisionType;
}
public String toString(){
return this.provisionType;
}
public static ProvisioningType getProvisioningType(String provisioningType){
if(provisioningType.equals(THIN.provisionType)){
return ProvisioningType.THIN;
} else if(provisioningType.equals(SPARSE.provisionType)){
return ProvisioningType.SPARSE;
} else if (provisioningType.equals(FAT.provisionType)){
return ProvisioningType.FAT;
} else {
throw new NotImplementedException("Invalid provisioning type specified: " + provisioningType);
}
}
}
enum State {
Inactive, Active,
}
enum DiskCacheMode {
NONE("none"), WRITEBACK("writeback"), WRITETHROUGH("writethrough");
private final String _diskCacheMode;
DiskCacheMode(String cacheMode) {
_diskCacheMode = cacheMode;
}
@Override
public String toString() {
return _diskCacheMode;
}
public static DiskCacheMode getDiskCasehMode(String cacheMode) {
if (cacheMode.equals(NONE._diskCacheMode)) {
return NONE;
} else if (cacheMode.equals(WRITEBACK._diskCacheMode)) {
return WRITEBACK;
} else if (cacheMode.equals(WRITETHROUGH._diskCacheMode)) {
return WRITETHROUGH;
} else {
throw new NotImplementedException("Invalid cache mode specified: " + cacheMode);
}
}
};
}

View File

@ -0,0 +1,24 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map;
public interface ProviderAdapterFactory {
public String getProviderName();
public ProviderAdapter create(String url, Map<String, String> details);
}

View File

@ -0,0 +1,28 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public interface ProviderSnapshot extends ProviderVolume {
/**
* Returns true if the provider supports directly attaching the snapshot.
* If false is returned, it indicates that cloudstack needs to perform
* a temporary volume copy prior to copying the snapshot to a new
* volume on another provider
* @return
*/
public Boolean canAttachDirectly();
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public interface ProviderVolume {
public Boolean isDestroyed();
public String getId();
public void setId(String id);
public String getName();
public void setName(String name);
public Integer getPriority();
public void setPriority(Integer priority);
public String getState();
public AddressType getAddressType();
public void setAddressType(AddressType addressType);
public String getAddress();
public Long getAllocatedSizeInBytes();
public Long getUsedBytes();
public String getExternalUuid();
public String getExternalName();
public String getExternalConnectionId();
public enum AddressType {
FIBERWWN
}
}

View File

@ -0,0 +1,58 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeNamer {
private static final String SNAPSHOT_PREFIX = "snap";
private static final String VOLUME_PREFIX = "vol";
private static final String TEMPLATE_PREFIX = "tpl";
/** Simple method to allow sharing storage setup, primarily in lab/testing environment */
private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier");
public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) {
ProviderAdapterDataObject.Type objType = obj.getType();
String prefix = null;
if (objType == ProviderAdapterDataObject.Type.SNAPSHOT) {
prefix = SNAPSHOT_PREFIX;
} else if (objType == ProviderAdapterDataObject.Type.VOLUME) {
prefix = VOLUME_PREFIX;
} else if (objType == ProviderAdapterDataObject.Type.TEMPLATE) {
prefix = TEMPLATE_PREFIX;
} else {
throw new RuntimeException("Unknown ManagedDataObject type provided: " + obj.getType());
}
if (ENV_PREFIX != null) {
prefix = ENV_PREFIX + "-" + prefix;
}
return prefix + "-" + obj.getDataStoreId() + "-" + context.getDomainId() + "-" + context.getAccountId() + "-" + obj.getId();
}
public static String generateObjectComment(ProviderAdapterContext context, ProviderAdapterDataObject obj) {
return "CSInfo [Account=" + context.getAccountName()
+ "; Domain=" + context.getDomainName()
+ "; DomainUUID=" + context.getDomainUuid()
+ "; Account=" + context.getAccountName()
+ "; AccountUUID=" + context.getAccountUuid()
+ "; ObjectEndUserName=" + obj.getName()
+ "; ObjectUUID=" + obj.getUuid() + "]";
}
}

View File

@ -0,0 +1,55 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeStats {
private Long allocatedInBytes;
private Long virtualUsedInBytes;
private Long actualUsedInBytes;
private Long iops;
private Long throughput;
public Long getAllocatedInBytes() {
return allocatedInBytes;
}
public void setAllocatedInBytes(Long allocatedInBytes) {
this.allocatedInBytes = allocatedInBytes;
}
public Long getVirtualUsedInBytes() {
return virtualUsedInBytes;
}
public void setVirtualUsedInBytes(Long virtualUsedInBytes) {
this.virtualUsedInBytes = virtualUsedInBytes;
}
public Long getActualUsedInBytes() {
return actualUsedInBytes;
}
public void setActualUsedInBytes(Long actualUsedInBytes) {
this.actualUsedInBytes = actualUsedInBytes;
}
public Long getIops() {
return iops;
}
public void setIops(Long iops) {
this.iops = iops;
}
public Long getThroughput() {
return throughput;
}
public void setThroughput(Long throughput) {
this.throughput = throughput;
}
}

View File

@ -0,0 +1,71 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter;
public class ProviderVolumeStorageStats {
/**
* Total capacity in bytes currently physically used on the storage system within the scope of given API configuration
*/
private long capacityInBytes;
/**
* Virtual amount of bytes allocated for use. Typically what the users of the volume think they have before
* any compression, deduplication, or thin-provisioning semantics are accounted for.
*/
private Long virtualUsedInBytes;
/**
* Actual physical bytes used on the storage system within the scope of the given API configuration
*/
private Long actualUsedInBytes;
/**
* Current IOPS
*/
private Long iops;
/**
* Current raw throughput
*/
private Long throughput;
public Long getVirtualUsedInBytes() {
return virtualUsedInBytes;
}
public void setVirtualUsedInBytes(Long virtualUsedInBytes) {
this.virtualUsedInBytes = virtualUsedInBytes;
}
public Long getActualUsedInBytes() {
return actualUsedInBytes;
}
public void setActualUsedInBytes(Long actualUsedInBytes) {
this.actualUsedInBytes = actualUsedInBytes;
}
public Long getIops() {
return iops;
}
public void setIops(Long iops) {
this.iops = iops;
}
public Long getThroughput() {
return throughput;
}
public void setThroughput(Long throughput) {
this.throughput = throughput;
}
public Long getCapacityInBytes() {
return capacityInBytes;
}
public void setCapacityInBytes(Long capacityInBytes) {
this.capacityInBytes = capacityInBytes;
}
}

View File

@ -0,0 +1,901 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.driver;
import java.util.Map;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import java.util.HashMap;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.projects.dao.ProjectDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.dao.AccountDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl {
static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class);
private String providerName = null;
@Inject
AccountManager _accountMgr;
@Inject
DiskOfferingDao _diskOfferingDao;
@Inject
VolumeDao _volumeDao;
@Inject
PrimaryDataStoreDao _storagePoolDao;
@Inject
ProjectDao _projectDao;
@Inject
SnapshotDataStoreDao _snapshotDataStoreDao;
@Inject
SnapshotDetailsDao _snapshotDetailsDao;
@Inject
VolumeDetailsDao _volumeDetailsDao;
@Inject
VMTemplatePoolDao _vmTemplatePoolDao;
@Inject
AccountDao _accountDao;
@Inject
StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject
SnapshotDao _snapshotDao;
@Inject
VMTemplateDao _vmTemplateDao;
@Inject
DataCenterDao _datacenterDao;
@Inject
DomainDao _domainDao;
@Inject
VolumeService _volumeService;
private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null;
public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
this._adapterFactoryMap = factoryMap;
}
@Override
public DataTO getTO(DataObject data) {
return null;
}
@Override
public DataStoreTO getStoreTO(DataStore store) {
return null;
}
public ProviderAdapter getAPI(StoragePool pool, Map<String, String> details) {
return _adapterFactoryMap.getAPI(pool.getUuid(), pool.getStorageProviderName(), details);
}
@Override
public void createAsync(DataStore dataStore, DataObject dataObject,
AsyncCompletionCallback<CreateCmdResult> callback) {
CreateCmdResult result = null;
try {
s_logger.info("Volume creation starting for data store [" + dataStore.getName() +
"] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]");
// quota size of the cloudbyte volume will be increased with the given
// HypervisorSnapshotReserve
Long volumeSizeBytes = dataObject.getSize();
// cloudstack talks bytes, primera talks MiB
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
ProviderAdapterDiskOffering inDiskOffering = null;
// only get the offering if its a volume type. If its a template type we skip this.
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
// get the disk offering as provider may need to see details of this to
// provision the correct type of volume
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeVO.getDiskOfferingId());
if (diskOffering.isUseLocalStorage()) {
throw new CloudRuntimeException(
"Disk offering requires local storage but this storage provider does not suppport local storage. Please contact the cloud adminstrator to have the disk offering configuration updated to avoid this conflict.");
}
inDiskOffering = new ProviderAdapterDiskOffering(diskOffering);
}
// if its a template and it already exist, just return the info -- may mean a previous attempt to
// copy this template failed after volume creation and its state has not advanced yet.
ProviderVolume volume = null;
if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
volume = api.getVolume(context, dataIn);
if (volume != null) {
s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]");
}
}
// create the volume if it didn't already exist
if (volume == null) {
// klunky - if this fails AND this detail property is set, it means upstream may have already created it
// in VolumeService and DataMotionStrategy tries to do it again before copying...
try {
volume = api.create(context, dataIn, inDiskOffering, volumeSizeBytes);
} catch (Exception e) {
VolumeDetailVO csId = _volumeDetailsDao.findDetail(dataObject.getId(), "cloneOfTemplate");
if (csId != null && csId.getId() > 0) {
volume = api.getVolume(context, dataIn);
} else {
throw e;
}
}
s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]");
}
// set these from the discovered or created volume before proceeding
dataIn.setExternalName(volume.getExternalName());
dataIn.setExternalUuid(volume.getExternalUuid());
// add the volume to the host set
String connectionId = api.attach(context, dataIn);
// update the cloudstack metadata about the volume
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId);
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true);
s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]");
} catch (Throwable e) {
s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e);
result = new CreateCmdResult(null, new Answer(null));
result.setResult(e.toString());
result.setSuccess(false);
throw new CloudRuntimeException(e.getMessage());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void deleteAsync(DataStore dataStore, DataObject dataObject,
AsyncCompletionCallback<CommandResult> callback) {
s_logger.debug("Delete volume started");
CommandResult result = new CommandResult();
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject inData = newManagedDataObject(dataObject, storagePool);
// skip adapter delete if neither external identifier is set. Probably means the volume
// create failed before this chould be set
if (!(inData.getExternalName() == null && inData.getExternalUuid() == null)) {
api.delete(context, inData);
}
result.setResult("Successfully deleted volume");
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Result to volume delete failed with exception", e);
result.setResult(e.toString());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void copyAsync(DataObject srcdata, DataObject destdata,
AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCommandResult result = null;
try {
s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]");
if (!canCopy(srcdata, destdata)) {
throw new CloudRuntimeException(
"The data store provider is unable to perform copy operations because the source or destination object is not the correct type of volume");
}
try {
StoragePoolVO storagePool = _storagePoolDao.findById(srcdata.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid());
ProviderVolume outVolume;
ProviderAdapterContext context = newManagedVolumeContext(destdata);
ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool);
ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool);
outVolume = api.copy(context, sourceIn, destIn);
// populate this data - it may be needed later
destIn.setExternalName(outVolume.getExternalName());
destIn.setExternalConnectionId(outVolume.getExternalConnectionId());
destIn.setExternalUuid(outVolume.getExternalUuid());
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
// we won't, however, shrink a volume if its smaller.
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
s_logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize());
api.resize(context, destIn, destdata.getSize());
}
String connectionId = api.attach(context, destIn);
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase());
}
persistVolumeData(storagePool, details, destdata, outVolume, connectionId);
s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]");
VolumeObjectTO voto = new VolumeObjectTO();
voto.setPath(finalPath);
result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto));
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Result to volume copy failed with exception", e);
result = new CopyCommandResult(null, null);
result.setSuccess(false);
result.setResult(e.toString());
}
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
copyAsync(srcData, destData, callback);
}
@Override
public boolean canCopy(DataObject srcData, DataObject destData) {
s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":"
+ srcData.getDataStore().getId() + " AND destData ["
+ destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]");
try {
if (!isSameProvider(srcData)) {
s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
return false;
}
if (!isSameProvider(destData)) {
s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!");
return false;
}
s_logger.debug(
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(srcData.getDataStore().getId());
ProviderAdapter api = getAPI(poolVO, details);
/**
* The storage provider generates its own names for snapshots which we store and
* retrieve when needed
*/
ProviderAdapterContext context = newManagedVolumeContext(srcData);
ProviderAdapterDataObject srcDataObject = newManagedDataObject(srcData, poolVO);
if (srcData instanceof SnapshotObject) {
ProviderSnapshot snapshot = api.getSnapshot(context, srcDataObject);
if (snapshot == null) {
return false;
} else {
return true;
}
} else {
ProviderVolume vol = api.getVolume(context, srcDataObject);
if (vol == null) {
return false;
} else {
return true;
}
}
} catch (Throwable e) {
s_logger.warn("Problem checking if we canCopy", e);
return false;
}
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
s_logger.debug("Resize volume started");
CreateCmdResult result = null;
try {
// Boolean status = false;
VolumeObject vol = (VolumeObject) data;
StoragePool pool = (StoragePool) data.getDataStore();
ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload();
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
if (!(poolVO.isManaged())) {
super.resize(data, callback);
return;
}
try {
Map<String, String> details = _storagePoolDao.getDetails(pool.getId());
ProviderAdapter api = getAPI(pool, details);
// doesn't support shrink (maybe can truncate but separate API calls to
// investigate)
if (vol.getSize() > resizeParameter.newSize) {
throw new CloudRuntimeException("Storage provider does not support shrinking an existing volume");
}
ProviderAdapterContext context = newManagedVolumeContext(data);
ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO);
if (s_logger.isDebugEnabled()) s_logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize);
api.resize(context, dataIn, resizeParameter.newSize);
if (vol.isAttachedVM()) {
if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) {
if (s_logger.isDebugEnabled()) s_logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize);
_volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName());
}
}
result = new CreateCmdResult(data.getUuid(), new Answer(null));
result.setSuccess(true);
} catch (Throwable e) {
s_logger.error("Resize volume failed, please contact cloud support.", e);
result = new CreateCmdResult(null, new Answer(null));
result.setResult(e.toString());
result.setSuccess(false);
}
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo,
QualityOfServiceState qualityOfServiceState) {
s_logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " +
volumeInfo.getPath() + ": " + qualityOfServiceState.toString());
}
@Override
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
VolumeInfo volume = (VolumeInfo) dataObject;
long volumeSize = volume.getSize();
Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
if (hypervisorSnapshotReserve != null) {
if (hypervisorSnapshotReserve < 25) {
hypervisorSnapshotReserve = 25;
}
volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f);
}
return volumeSize;
}
@Override
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@Override
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
CreateCmdResult result = null;
try {
s_logger.debug("taking volume snapshot");
SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO();
VolumeInfo baseVolume = snapshot.getBaseVolume();
DataStore ds = baseVolume.getDataStore();
StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId());
Map<String, String> details = _storagePoolDao.getDetails(ds.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(snapshot);
ProviderAdapterDataObject inVolumeDO = newManagedDataObject(baseVolume, storagePool);
ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool);
ProviderSnapshot outSnapshot = api.snapshot(context, inVolumeDO, inSnapshotDO);
// add the snapshot to the host group (needed for copying to non-provider storage
// to create templates, etc)
String connectionId = null;
String finalAddress = outSnapshot.getAddress();
if (outSnapshot.canAttachDirectly()) {
connectionId = api.attach(context, inSnapshotDO);
if (connectionId != null) {
finalAddress = finalAddress + "::" + connectionId;
}
}
snapshotTO.setPath(finalAddress);
snapshotTO.setName(outSnapshot.getName());
snapshotTO.setHypervisorType(HypervisorType.KVM);
// unclear why this is needed vs snapshotTO.setPath, but without it the path on
// the target snapshot object isn't set
// so a volume created from it also is not set and can't be attached to a VM
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
DiskTO.PATH, finalAddress, true);
_snapshotDetailsDao.persist(snapshotDetail);
// save the name (reuse on revert)
snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_NAME, outSnapshot.getExternalName(), true);
_snapshotDetailsDao.persist(snapshotDetail);
// save the uuid (reuse on revert)
snapshotDetail = new SnapshotDetailsVO(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_UUID, outSnapshot.getExternalUuid(), true);
_snapshotDetailsDao.persist(snapshotDetail);
result = new CreateCmdResult(finalAddress, new CreateObjectAnswer(snapshotTO));
result.setResult("Snapshot completed with new WWN " + finalAddress);
result.setSuccess(true);
} catch (Throwable e) {
s_logger.debug("Failed to take snapshot: " + e.getMessage());
result = new CreateCmdResult(null, null);
result.setResult(e.toString());
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore,
AsyncCompletionCallback<CommandResult> callback) {
CommandResult result = new CommandResult();
ProviderAdapter api = null;
try {
DataStore ds = snapshotOnPrimaryStore.getDataStore();
StoragePoolVO storagePool = _storagePoolDao.findById(ds.getId());
Map<String, String> details = _storagePoolDao.getDetails(ds.getId());
api = getAPI(storagePool, details);
String externalName = null;
String externalUuid = null;
List<SnapshotDetailsVO> list = _snapshotDetailsDao.findDetails(snapshot.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _snapshotDetailsDao.findDetails(snapshot.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
ProviderAdapterContext context = newManagedVolumeContext(snapshot);
ProviderAdapterDataObject inSnapshotDO = newManagedDataObject(snapshot, storagePool);
inSnapshotDO.setExternalName(externalName);
inSnapshotDO.setExternalUuid(externalUuid);
// perform promote (async, wait for job to finish)
api.revert(context, inSnapshotDO);
// set command as success
result.setSuccess(true);
} catch (Throwable e) {
s_logger.warn("revertSnapshot failed", e);
result.setResult(e.toString());
result.setSuccess(false);
} finally {
if (callback != null)
callback.complete(result);
}
}
@Override
public long getUsedBytes(StoragePool storagePool) {
long usedSpaceBytes = 0;
// Volumes
List<VolumeVO> volumes = _volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready);
if (volumes != null) {
for (VolumeVO volume : volumes) {
usedSpaceBytes += volume.getSize();
long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0
: volume.getVmSnapshotChainSize();
usedSpaceBytes += vmSnapshotChainSize;
}
}
// Snapshots
List<SnapshotDataStoreVO> snapshots = _snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(),
ObjectInDataStoreStateMachine.State.Ready);
if (snapshots != null) {
for (SnapshotDataStoreVO snapshot : snapshots) {
usedSpaceBytes += snapshot.getSize();
}
}
// Templates
List<VMTemplateStoragePoolVO> templates = _vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(),
ObjectInDataStoreStateMachine.State.Ready);
if (templates != null) {
for (VMTemplateStoragePoolVO template : templates) {
usedSpaceBytes += template.getTemplateSize();
}
}
s_logger.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes));
return usedSpaceBytes;
}
@Override
public long getUsedIops(StoragePool storagePool) {
return super.getUsedIops(storagePool);
}
@Override
public Map<String, String> getCapabilities() {
Map<String, String> mapCapabilities = new HashMap<String, String>();
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
// indicates the datastore can create temporary volumes for use when copying
// data from a snapshot
mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString());
return mapCapabilities;
}
@Override
public boolean canProvideStorageStats() {
return true;
}
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
String capacityBytesStr = details.get("capacityBytes");
Long capacityBytes = null;
if (capacityBytesStr == null) {
ProviderAdapter api = getAPI(storagePool, details);
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (stats == null) {
return null;
}
capacityBytes = stats.getCapacityInBytes();
} else {
capacityBytes = Long.parseLong(capacityBytesStr);
}
Long usedBytes = this.getUsedBytes(storagePool);
return new Pair<Long, Long>(capacityBytes, usedBytes);
}
@Override
public boolean canProvideVolumeStats() {
return true;
}
public String getProviderName() {
return providerName;
}
public void setProviderName(String providerName) {
this.providerName = providerName;
}
@Override
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumePath) {
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderVolume.AddressType addressType = null;
if (volumePath.indexOf(";") > 1) {
String[] fields = volumePath.split(";");
if (fields.length > 0) {
for (String field: fields) {
if (field.trim().startsWith("address=")) {
String[] toks = field.split("=");
if (toks.length > 1) {
volumePath = toks[1];
}
} else if (field.trim().startsWith("type=")) {
String[] toks = field.split("=");
if (toks.length > 1) {
addressType = ProviderVolume.AddressType.valueOf(toks[1]);
}
}
}
}
} else {
addressType = ProviderVolume.AddressType.FIBERWWN;
}
// limited context since this is not at an account level
ProviderAdapterContext context = new ProviderAdapterContext();
context.setZoneId(storagePool.getDataCenterId());
ProviderVolume volume = api.getVolumeByAddress(context, addressType, volumePath);
if (volume == null) {
return null;
}
ProviderAdapterDataObject object = new ProviderAdapterDataObject();
object.setExternalUuid(volume.getExternalUuid());
object.setExternalName(volume.getExternalName());
object.setType(ProviderAdapterDataObject.Type.VOLUME);
ProviderVolumeStats stats = api.getVolumeStats(context, object);
Long provisionedSizeInBytes = stats.getActualUsedInBytes();
Long allocatedSizeInBytes = stats.getAllocatedInBytes();
if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) {
return null;
}
return new Pair<Long, Long>(provisionedSizeInBytes, allocatedSizeInBytes);
}
@Override
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
Map<String, String> details = _storagePoolDao.getDetails(pool.getId());
ProviderAdapter api = getAPI(pool, details);
ProviderAdapterContext context = new ProviderAdapterContext();
context.setZoneId(host.getDataCenterId());
return api.canAccessHost(context, host.getName());
}
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
DataObject dataObject, ProviderVolume volume, String connectionId) {
if (dataObject.getType() == DataObjectType.VOLUME) {
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
}
}
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume managedVolume, String connectionId) {
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
// if its null check if the storage provider returned one that is already set
if (connectionId == null) {
connectionId = managedVolume.getExternalConnectionId();
}
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase());
}
volumeVO.setPath(finalPath);
volumeVO.setFormat(ImageFormat.RAW);
volumeVO.setPoolId(storagePool.getId());
volumeVO.setExternalUuid(managedVolume.getExternalUuid());
volumeVO.setDisplay(true);
volumeVO.setDisplayVolume(true);
_volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = _volumeDao.findById(volumeVO.getId());
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
DiskTO.PATH, finalPath, true);
_volumeDetailsDao.persist(volumeDetailVO);
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
_volumeDetailsDao.persist(volumeDetailVO);
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
_volumeDetailsDao.persist(volumeDetailVO);
}
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume volume, String connectionId) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null);
// template pool ref doesn't have a details object so we'll save:
// 1. external name ==> installPath
// 2. address ==> local download path
if (connectionId == null) {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase()));
} else {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase(), connectionId));
}
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
ProviderAdapterContext ctx = new ProviderAdapterContext();
if (obj instanceof VolumeInfo) {
VolumeVO vol = _volumeDao.findById(obj.getId());
ctx.setAccountId(vol.getAccountId());
ctx.setDomainId(vol.getDomainId());
} else if (obj instanceof SnapshotInfo) {
SnapshotVO snap = _snapshotDao.findById(obj.getId());
ctx.setAccountId(snap.getAccountId());
ctx.setDomainId(snap.getDomainId());
} else if (obj instanceof TemplateInfo) {
VMTemplateVO template = _vmTemplateDao.findById(obj.getId());
ctx.setAccountId(template.getAccountId());
// templates don't have a domain ID so always set to 0
ctx.setDomainId(0L);
}
if (ctx.getAccountId() != null) {
AccountVO acct = _accountDao.findById(ctx.getAccountId());
if (acct != null) {
ctx.setAccountUuid(acct.getUuid());
ctx.setAccountName(acct.getName());
}
}
if (ctx.getDomainId() != null) {
DomainVO domain = _domainDao.findById(ctx.getDomainId());
if (domain != null) {
ctx.setDomainUuid(domain.getUuid());
ctx.setDomainName(domain.getName());
}
}
return ctx;
}
boolean isSameProvider(DataObject obj) {
StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId());
if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) {
return true;
} else {
return false;
}
}
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
if (data instanceof VolumeInfo) {
List<VolumeDetailVO> list = _volumeDetailsDao.findDetails(data.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
String externalName = null;
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _volumeDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
String externalUuid = null;
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
dataIn.setName(((VolumeInfo) data).getName());
dataIn.setExternalName(externalName);
dataIn.setExternalUuid(externalUuid);
} else if (data instanceof SnapshotInfo) {
List<SnapshotDetailsVO> list = _snapshotDetailsDao.findDetails(data.getId(),
ProviderAdapterConstants.EXTERNAL_NAME);
String externalName = null;
if (list != null && list.size() > 0) {
externalName = list.get(0).getValue();
}
list = _snapshotDetailsDao.findDetails(data.getId(), ProviderAdapterConstants.EXTERNAL_UUID);
String externalUuid = null;
if (list != null && list.size() > 0) {
externalUuid = list.get(0).getValue();
}
dataIn = new ProviderAdapterDataObject();
dataIn.setName(((SnapshotInfo) data).getName());
dataIn.setExternalName(externalName);
dataIn.setExternalUuid(externalUuid);
} else if (data instanceof TemplateInfo) {
TemplateInfo ti = (TemplateInfo)data;
dataIn.setName(ti.getName());
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), ti.getId(), null);
dataIn.setExternalName(templatePoolRef.getLocalDownloadPath());
}
dataIn.setId(data.getId());
dataIn.setDataStoreId(data.getDataStore().getId());
dataIn.setDataStoreUuid(data.getDataStore().getUuid());
dataIn.setDataStoreName(data.getDataStore().getName());
dataIn.setUuid(data.getUuid());
dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString()));
return dataIn;
}
}

View File

@ -0,0 +1,407 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.host.Host;
/**
* Manages the lifecycle of a Managed Data Store in CloudStack
*/
public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject
private PrimaryDataStoreDao _storagePoolDao;
private static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreLifeCycleImpl.class);
@Inject
PrimaryDataStoreHelper _dataStoreHelper;
@Inject
protected ResourceManager _resourceMgr;
@Inject
private StoragePoolAutomation _storagePoolAutomation;
@Inject
private PrimaryDataStoreDao _primaryDataStoreDao;
@Inject
private StorageManager _storageMgr;
@Inject
private ClusterDao _clusterDao;
AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap;
public AdaptiveDataStoreLifeCycleImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
_adapterFactoryMap = factoryMap;
}
/**
* Initialize the storage pool
* https://hostname:port?cpg=<cpgname>&snapcpg=<snapcpg>&hostset=<hostsetname>&disabletlsvalidation=true&
*/
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
// https://hostanme:443/cpgname/hostsetname. hostset should map to the cluster or zone (all nodes in the cluster or zone MUST be in the hostset and be configured outside cloudstack for now)
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long)dsInfos.get("podId");
Long clusterId = (Long)dsInfos.get("clusterId");
String dsName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long)dsInfos.get("capacityIops");
String tags = (String)dsInfos.get("tags");
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
// validate inputs are valid/provided as required
if (zoneId == null) throw new CloudRuntimeException("Zone Id must be specified.");
URL uri = null;
try {
uri = new URL(url);
} catch (Exception ignored) {
throw new CloudRuntimeException(url + " is not a valid uri");
}
String username = null;
String password = null;
String token = null;
String userInfo = uri.getUserInfo();
if (userInfo == null || userInfo.split(":").length < 2) {
// check if it was passed in the details object
username = details.get(ProviderAdapter.API_USERNAME_KEY);
if (username != null) {
password = details.get(ProviderAdapter.API_PASSWORD_KEY);
userInfo = username + ":" + password;
} else {
token = details.get(ProviderAdapter.API_TOKEN_KEY);
}
} else {
try {
userInfo = java.net.URLDecoder.decode(userInfo, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
throw new CloudRuntimeException("Unexpected error parsing the provided user info; check that it does not include any invalid characters");
}
username = userInfo.split(":")[0];
password = userInfo.split(":")[1];
}
s_logger.info("Registering block storage provider with user=" + username);
if (clusterId != null) {
Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!hypervisorType.equals(HypervisorType.KVM)) {
throw new CloudRuntimeException("Unsupported hypervisor type for provided cluster: " + hypervisorType.toString());
}
// Primary datastore is cluster-wide, check and set the podId and clusterId parameters
if (podId == null) {
throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage.");
}
s_logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host");
} else if (podId != null) {
throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage.");
}
// validate we don't have any duplication going on
List<StoragePoolVO> storagePoolVO = _primaryDataStoreDao.findPoolsByProvider(providerName);
if (CollectionUtils.isNotEmpty(storagePoolVO)) {
for (StoragePoolVO poolVO : storagePoolVO) {
Map <String, String> poolDetails = _primaryDataStoreDao.getDetails(poolVO.getId());
String otherPoolUrl = poolDetails.get(ProviderAdapter.API_URL_KEY);
if (dsName.equals(poolVO.getName())) {
throw new InvalidParameterValueException("A pool with the name [" + dsName + "] already exists, choose another name");
}
if (uri.toString().equals(otherPoolUrl)) {
throw new IllegalArgumentException("Provider URL [" + otherPoolUrl + "] is already in use by another storage pool named [" + poolVO.getName() + "], please validate you have correct API and CPG");
}
}
}
s_logger.info("Validated no other pool exists with this name: " + dsName);
try {
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(uri.getHost());
parameters.setPort(uri.getPort());
parameters.setPath(uri.getPath() + "?" + uri.getQuery());
parameters.setType(StoragePoolType.FiberChannel);
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(dsName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.KVM);
parameters.setTags(tags);
parameters.setUserInfo(userInfo);
parameters.setUuid(UUID.randomUUID().toString());
details.put(ProviderAdapter.API_URL_KEY, uri.toString());
if (username != null) {
details.put(ProviderAdapter.API_USERNAME_KEY, username);
}
if (password != null) {
details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.encrypt(password));
}
if (token != null) {
details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.encrypt(details.get(ProviderAdapter.API_TOKEN_KEY)));
}
// this appears to control placing the storage pool above network file system based storage pools in priority
details.put(Storage.Capability.HARDWARE_ACCELERATION.toString(), "true");
// this new capablity indicates the storage pool allows volumes to migrate to/from other pools (i.e. to/from NFS pools)
details.put(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString(), "true");
parameters.setDetails(details);
// make sure the storage array is connectable and the pod and hostgroup objects exist
ProviderAdapter api = _adapterFactoryMap.getAPI(parameters.getUuid(), providerName, details);
// validate the provided details are correct/valid for the provider
api.validate();
// if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (capacityBytes != null && capacityBytes != 0) {
if (stats.getCapacityInBytes() > 0) {
if (stats.getCapacityInBytes() < capacityBytes) {
throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes());
}
}
parameters.setCapacityBytes(capacityBytes);
}
// if we have no user-provided capacity bytes, use the ones provided by storage
else {
if (stats.getCapacityInBytes() <= 0) {
throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified");
}
parameters.setCapacityBytes(stats.getCapacityInBytes());
}
s_logger.info("Persisting [" + dsName + "] storage pool metadata to database");
return _dataStoreHelper.createPrimaryDataStore(parameters);
} catch (Throwable e) {
s_logger.error("Problem persisting storage pool", e);
throw new CloudRuntimeException(e);
}
}
/**
* Get the type of Hypervisor from the cluster id
* @param clusterId
* @return
*/
private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) {
ClusterVO cluster = _clusterDao.findById(clusterId);
if (cluster == null) {
throw new CloudRuntimeException("Unable to locate the specified cluster: " + clusterId);
}
return cluster.getHypervisorType();
}
/**
* Attach the pool to a cluster (all hosts in a single cluster)
*/
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
s_logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]");
_dataStoreHelper.attachCluster(store);
StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId());
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
_primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
}
if (dataStoreVO.isManaged()) {
//boolean success = false;
for (HostVO h : allHosts) {
s_logger.debug("adding host " + h.getName() + " to storage pool " + store.getName());
}
}
s_logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
try {
_storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId());
poolHosts.add(h);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId());
_primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("Failed to access storage pool");
}
return true;
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
s_logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]");
_dataStoreHelper.attachHost(store, scope, existingInfo);
return true;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
s_logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]");
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hosts) {
try {
_storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
poolHosts.add(host);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + dataStore + " in this zone.");
_primaryDataStoreDao.expunge(dataStore.getId());
throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts.");
}
_dataStoreHelper.attachZone(dataStore, hypervisorType);
return true;
}
/**
* Put the storage pool in maintenance mode
*/
@Override
public boolean maintain(DataStore store) {
s_logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode");
if (_storagePoolAutomation.maintain(store)) {
return _dataStoreHelper.maintain(store);
} else {
return false;
}
}
/**
* Cancel maintenance mode
*/
@Override
public boolean cancelMaintain(DataStore store) {
s_logger.info("Canceling storage pool maintainence for [" + store.getName() + "]");
if (_dataStoreHelper.cancelMaintain(store)) {
return _storagePoolAutomation.cancelMaintain(store);
} else {
return false;
}
}
/**
* Delete the data store
*/
@Override
public boolean deleteDataStore(DataStore store) {
s_logger.info("Delete datastore called for [" + store.getName() + "]");
return _dataStoreHelper.deletePrimaryDataStore(store);
}
/**
* Migrate objects in this store to another store
*/
@Override
public boolean migrateToObjectStore(DataStore store) {
s_logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time");
return false;
}
/**
* Update the storage pool configuration
*/
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details);
}
/**
* Enable the storage pool (allows volumes from this pool)
*/
@Override
public void enableStoragePool(DataStore store) {
s_logger.info("Enabling storage pool [" + store.getName() + "]");
_dataStoreHelper.enable(store);
}
/**
* Disable storage pool (stops new volume provisioning from pool)
*/
@Override
public void disableStoragePool(DataStore store) {
s_logger.info("Disabling storage pool [" + store.getName() + "]");
_dataStoreHelper.disable(store);
}
}

View File

@ -0,0 +1,134 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashMap;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.log4j.Logger;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
public class AdaptivePrimaryDatastoreAdapterFactoryMap {
private final Logger logger = Logger.getLogger(ProviderAdapter.class);
private Map<String,ProviderAdapterFactory> factoryMap = new HashMap<String,ProviderAdapterFactory>();
private Map<String,ProviderAdapter> apiMap = new HashMap<String,ProviderAdapter>();
public AdaptivePrimaryDatastoreAdapterFactoryMap() {
}
/**
* Given a storage pool return current client. Reconfigure if changes are
* discovered
*/
public final ProviderAdapter getAPI(String uuid, String providerName, Map<String, String> details) {
ProviderAdapter api = apiMap.get(uuid);
if (api == null) {
synchronized (this) {
api = apiMap.get(uuid);
if (api == null) {
api = createNewAdapter(uuid, providerName, details);
apiMap.put(uuid, api);
logger.debug("Cached the new ProviderAdapter for storage pool " + uuid);
}
}
}
return api;
}
/**
* Update the API with the given UUID. allows for URL changes and authentication updates
* @param uuid
* @param providerName
* @param details
*/
public final void updateAPI(String uuid, String providerName, Map<String, String> details) {
// attempt to create (which validates) the new info before updating the cache
ProviderAdapter adapter = createNewAdapter(uuid, providerName, details);
// if its null its likely because no action has occured yet to trigger the API object to be loaded
if (adapter == null) {
throw new CloudRuntimeException("Adapter configruation failed for an unknown reason");
}
ProviderAdapter oldAdapter = apiMap.get(uuid);
apiMap.put(uuid, adapter);
try {
if (oldAdapter != null) oldAdapter.disconnect();
} catch (Throwable e) {
logger.debug("Failure closing the old ProviderAdapter during an update of the cached data after validation of the new adapter configuration, likely the configuration is no longer valid", e);
}
}
public void register(ProviderAdapterFactory factory) {
factoryMap.put(factory.getProviderName(), factory);
}
protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map<String, String> details) {
String authnType = details.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY);
if (authnType == null) authnType = "basicauth";
String lookupKey = null;
if (authnType.equals("basicauth")) {
lookupKey = details.get(ProviderAdapter.API_USERNAME_KEY);
if (lookupKey == null) {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_USERNAME_KEY + "] is required when using authentication type [" + authnType + "]");
}
} else if (authnType.equals("apitoken")) {
lookupKey = details.get(ProviderAdapter.API_TOKEN_KEY);
if (lookupKey == null) {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_TOKEN_KEY + "] is required when using authentication type [" + authnType + "]");
}
} else {
throw new RuntimeException("Storage provider configuration property [" + ProviderAdapter.API_AUTHENTICATION_TYPE_KEY + "] not set to valid value");
}
String url = details.get(ProviderAdapter.API_URL_KEY);
if (url == null) {
throw new RuntimeException("URL required when configuring a Managed Block API storage provider");
}
logger.debug("Looking for Provider [" + providerName + "] at [" + url + "]");
ProviderAdapterFactory factory = factoryMap.get(providerName);
if (factory == null) {
throw new RuntimeException("Unable to find a storage provider API factory for provider: " + providerName);
}
// decrypt password or token before sending to provider
if (authnType.equals("basicauth")) {
try {
details.put(ProviderAdapter.API_PASSWORD_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_PASSWORD_KEY)));
} catch (Exception e) {
logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_PASSWORD_KEY + "], trying to use as-is");
}
} else if (authnType.equals("apitoken")) {
try {
details.put(ProviderAdapter.API_TOKEN_KEY, DBEncryptionUtil.decrypt(details.get(ProviderAdapter.API_TOKEN_KEY)));
} catch (Exception e) {
logger.warn("Failed to decrypt managed block API property: [" + ProviderAdapter.API_TOKEN_KEY + "], trying to use as-is");
}
}
ProviderAdapter api = factory.create(url, details);
api.validate();
logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url);
return api;
}
}

View File

@ -0,0 +1,86 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.cloudstack.storage.datastore.driver.AdaptiveDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.AdaptiveDataStoreLifeCycleImpl;
@Component
public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
static final Logger s_logger = Logger.getLogger(AdaptivePrimaryDatastoreProviderImpl.class);
AdaptiveDataStoreDriverImpl driver;
HypervisorHostListener listener;
AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap = new AdaptivePrimaryDatastoreAdapterFactoryMap();
DataStoreLifeCycle lifecycle;
AdaptivePrimaryDatastoreProviderImpl(ProviderAdapterFactory f) {
s_logger.info("Creating " + f.getProviderName());
factoryMap.register(f);
}
@Override
public DataStoreLifeCycle getDataStoreLifeCycle() {
return this.lifecycle;
}
@Override
public boolean configure(Map<String, Object> params) {
s_logger.info("Configuring " + getName());
driver = new AdaptiveDataStoreDriverImpl(factoryMap);
driver.setProviderName(getName());
lifecycle = ComponentContext.inject(new AdaptiveDataStoreLifeCycleImpl(factoryMap));
driver = ComponentContext.inject(driver);
listener = ComponentContext.inject(new AdaptivePrimaryHostListener(factoryMap));
return true;
}
@Override
public PrimaryDataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public HypervisorHostListener getHostListener() {
return this.listener;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -0,0 +1,83 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.log4j.Logger;
import com.cloud.exception.StorageConflictException;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
public class AdaptivePrimaryHostListener implements HypervisorHostListener {
static final Logger s_logger = Logger.getLogger(AdaptivePrimaryHostListener.class);
@Inject
StoragePoolHostDao storagePoolHostDao;
public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) {
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
s_logger.debug("hostAboutToBeRemoved called");
return true;
}
@Override
public boolean hostAdded(long hostId) {
s_logger.debug("hostAdded called");
return true;
}
@Override
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
s_logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]");
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost == null) {
storagePoolHost = new StoragePoolHostVO(poolId, hostId, "");
storagePoolHostDao.persist(storagePoolHost);
}
return true;
}
@Override
public boolean hostDisconnected(long hostId, long poolId) {
s_logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]");
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost != null) {
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
}
return true;
}
@Override
public boolean hostEnabled(long hostId) {
s_logger.debug("hostEnabled called");
return true;
}
@Override
public boolean hostRemoved(long hostId, long clusterId) {
s_logger.debug("hostRemoved called");
return true;
}
}

View File

@ -0,0 +1,52 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-flasharray</artifactId>
<name>Apache CloudStack Plugin - Storage Volume - Pure Flash Array</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
public class FlashArrayAdapterFactory implements ProviderAdapterFactory {
@Override
public String getProviderName() {
return "Flash Array";
}
@Override
public ProviderAdapter create(String url, Map<String, String> details) {
return new FlashArrayAdapter(url, details);
}
}

View File

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayApiToken {
@JsonProperty("api_token")
private String apiToken;
public void setApiToken(String apiToken) {
this.apiToken = apiToken;
}
public String getApiToken() {
return apiToken;
}
}

View File

@ -0,0 +1,68 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnection {
@JsonProperty("host_group")
private FlashArrayConnectionHostgroup hostGroup;
@JsonProperty("host")
private FlashArrayConnectionHost host;
@JsonProperty("volume")
private FlashArrayVolume volume;
@JsonProperty("lun")
private Integer lun;
public FlashArrayConnectionHostgroup getHostGroup() {
return hostGroup;
}
public void setHostGroup(FlashArrayConnectionHostgroup hostGroup) {
this.hostGroup = hostGroup;
}
public FlashArrayConnectionHost getHost() {
return host;
}
public void setHost(FlashArrayConnectionHost host) {
this.host = host;
}
public FlashArrayVolume getVolume() {
return volume;
}
public void setVolume(FlashArrayVolume volume) {
this.volume = volume;
}
public Integer getLun() {
return lun;
}
public void setLun(Integer lun) {
this.lun = lun;
}
}

View File

@ -0,0 +1,39 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnectionHost {
@JsonProperty("name")
private String name;
public FlashArrayConnectionHost() {}
public FlashArrayConnectionHost(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayConnectionHostgroup {
@JsonProperty("name")
private String name;
public FlashArrayConnectionHostgroup() {}
public FlashArrayConnectionHostgroup(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,72 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayGroupMemberReference {
@JsonProperty("group")
private FlashArrayGroupNameWrapper group;
@JsonProperty("member")
private FlashArrayGroupMemberNameWrapper member;
public static class FlashArrayGroupNameWrapper {
@JsonProperty("name")
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static class FlashArrayGroupMemberNameWrapper {
@JsonProperty("name")
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public FlashArrayGroupNameWrapper getGroup() {
return group;
}
public void setGroup(FlashArrayGroupNameWrapper group) {
this.group = group;
}
public FlashArrayGroupMemberNameWrapper getMember() {
return member;
}
public void setMember(FlashArrayGroupMemberNameWrapper member) {
this.member = member;
}
}

View File

@ -0,0 +1,38 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.ArrayList;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayGroupMemberReferenceList {
@JsonProperty("items")
private ArrayList<FlashArrayGroupMemberReference> items;
public ArrayList<FlashArrayGroupMemberReference> getItems() {
return items;
}
public void setItems(ArrayList<FlashArrayGroupMemberReference> items) {
this.items = items;
}
}

View File

@ -0,0 +1,58 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayHostgroup {
@JsonProperty("name")
private String name;
@JsonProperty("connection_count")
private Long connectionCount;
@JsonProperty("host_count")
private Long hostCount;
@JsonProperty("is_local")
private Boolean local;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Long getConnectionCount() {
return connectionCount;
}
public void setConnectionCount(Long connectionCount) {
this.connectionCount = connectionCount;
}
public Long getHostCount() {
return hostCount;
}
public void setHostCount(Long hostCount) {
this.hostCount = hostCount;
}
public Boolean getLocal() {
return local;
}
public void setLocal(Boolean local) {
this.local = local;
}
}

View File

@ -0,0 +1,60 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayList<T> {
@JsonProperty("more_items_remaining")
private Boolean moreItemsRemaining;
@JsonProperty("total_item_count")
private Integer totalItemCount;
@JsonProperty("continuation_token")
private String continuationToken;
@JsonProperty("items")
private List<T> items;
public Boolean getMoreItemsRemaining() {
return moreItemsRemaining;
}
public void setMoreItemsRemaining(Boolean moreItemsRemaining) {
this.moreItemsRemaining = moreItemsRemaining;
}
public Integer getTotalItemCount() {
return totalItemCount;
}
public void setTotalItemCount(Integer totalItemCount) {
this.totalItemCount = totalItemCount;
}
public String getContinuationToken() {
return continuationToken;
}
public void setContinuationToken(String continuationToken) {
this.continuationToken = continuationToken;
}
public List<T> getItems() {
return items;
}
public void setItems(List<T> items) {
this.items = items;
}
}

View File

@ -0,0 +1,66 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayPod {
@JsonProperty("name")
private String name;
@JsonProperty("id")
private String id;
@JsonProperty("destroyed")
private Boolean destroyed;
@JsonProperty("footprint")
private Long footprint;
@JsonProperty("quota_limit")
private Long quotaLimit;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Boolean getDestroyed() {
return destroyed;
}
public void setDestroyed(Boolean destroyed) {
this.destroyed = destroyed;
}
public Long getFootprint() {
return footprint;
}
public void setFootprint(Long footprint) {
this.footprint = footprint;
}
public Long getQuotaLimit() {
return quotaLimit;
}
public void setQuotaLimit(Long quotaLimit) {
this.quotaLimit = quotaLimit;
}
}

View File

@ -0,0 +1,77 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayTag {
@JsonProperty("copyable")
private Boolean copyable;
@JsonProperty("key")
private String key;
@JsonProperty("namespace")
private String namespace;
@JsonProperty("value")
private String value;
public FlashArrayTag() {
}
public FlashArrayTag(String namespace, String key, String value) {
this.key = key;
this.namespace = namespace;
this.value = value;
}
public Boolean getCopyable() {
return copyable;
}
public void setCopyable(Boolean copyable) {
this.copyable = copyable;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}

View File

@ -0,0 +1,39 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayTagList {
@JsonProperty("tags")
public List<FlashArrayTag> tags;
public List<FlashArrayTag> getTags() {
return tags;
}
public void setTags(List<FlashArrayTag> tags) {
this.tags = tags;
}
}

View File

@ -0,0 +1,253 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolume implements ProviderSnapshot {
public static final String PURE_OUI = "24a9370";
@JsonProperty("destroyed")
private Boolean destroyed;
/** The virtual size requested for this volume */
@JsonProperty("provisioned")
private Long allocatedSizeBytes;
@JsonIgnore
private String id;
@JsonIgnore // we don't use the Cloudstack user name at all
private String name;
@JsonIgnore
private String shortExternalName;
@JsonProperty("pod")
private FlashArrayVolumePod pod;
@JsonProperty("priority")
private Integer priority;
@JsonProperty("promotion_status")
private String promotionStatus;
@JsonProperty("subtype")
private String subtype;
@JsonProperty("space")
private FlashArrayVolumeSpace space;
@JsonProperty("source")
private FlashArrayVolumeSource source;
@JsonProperty("serial")
private String serial;
@JsonProperty("name")
private String externalName;
@JsonProperty("id")
private String externalUuid;
@JsonIgnore
private AddressType addressType;
@JsonIgnore
private String connectionId;
public FlashArrayVolume() {
this.addressType = AddressType.FIBERWWN;
}
@Override
public Boolean isDestroyed() {
return destroyed;
}
@Override
@JsonIgnore
public String getId() {
return id;
}
@Override
@JsonIgnore
public String getName() {
return name;
}
@JsonIgnore
public String getPodName() {
if (pod != null) {
return pod.getName();
} else {
return null;
}
}
@Override
@JsonIgnore
public Integer getPriority() {
return priority;
}
@Override
@JsonIgnore
public String getState() {
return null;
}
@Override
@JsonIgnore
public AddressType getAddressType() {
return addressType;
}
@Override
@JsonIgnore
public String getAddress() {
if (serial == null) return null;
return ("6" + PURE_OUI + serial).toLowerCase();
}
@Override
public String getExternalConnectionId() {
return connectionId;
}
@JsonIgnore
public void setExternalConnectionId(String externalConnectionId) {
this.connectionId = externalConnectionId;
}
@Override
public void setId(String id) {
this.id = id;
}
@Override
public void setName(String name) {
this.name = name;
}
public void setPodName(String podname) {
FlashArrayVolumePod pod = new FlashArrayVolumePod();
pod.setName(podname);
this.pod = pod;
}
@Override
public void setPriority(Integer priority) {
this.priority = priority;
}
@Override
public void setAddressType(AddressType addressType) {
this.addressType = addressType;
}
@Override
@JsonIgnore
public Long getAllocatedSizeInBytes() {
return this.allocatedSizeBytes;
}
public void setAllocatedSizeBytes(Long size) {
this.allocatedSizeBytes = size;
}
@Override
@JsonIgnore
public Long getUsedBytes() {
if (space != null) {
return space.getVirtual();
} else {
return null;
}
}
public void setDestroyed(Boolean destroyed) {
this.destroyed = destroyed;
}
public FlashArrayVolumeSource getSource() {
return source;
}
public void setSource(FlashArrayVolumeSource source) {
this.source = source;
}
@Override
public String getExternalUuid() {
return externalUuid;
}
@Override
public String getExternalName() {
return externalName;
}
public void setExternalUuid(String uuid) {
this.externalUuid = uuid;
}
public void setExternalName(String name) {
this.externalName = name;
}
@Override
public Boolean canAttachDirectly() {
return false;
}
public String getConnectionId() {
return connectionId;
}
public void setConnectionId(String connectionId) {
this.connectionId = connectionId;
}
public Boolean getDestroyed() {
return destroyed;
}
public Long getAllocatedSizeBytes() {
return allocatedSizeBytes;
}
public String getShortExternalName() {
return shortExternalName;
}
public void setShortExternalName(String shortExternalName) {
this.shortExternalName = shortExternalName;
}
public FlashArrayVolumePod getPod() {
return pod;
}
public void setPod(FlashArrayVolumePod pod) {
this.pod = pod;
}
public String getPromotionStatus() {
return promotionStatus;
}
public void setPromotionStatus(String promotionStatus) {
this.promotionStatus = promotionStatus;
}
public String getSubtype() {
return subtype;
}
public void setSubtype(String subtype) {
this.subtype = subtype;
}
public FlashArrayVolumeSpace getSpace() {
return space;
}
public void setSpace(FlashArrayVolumeSpace space) {
this.space = space;
}
public String getSerial() {
return serial;
}
public void setSerial(String serial) {
this.serial = serial;
}
}

View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolumePod {
@JsonProperty("id")
private String id;
@JsonProperty("name")
private String name;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,47 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolumeSource {
@JsonProperty("id")
private String id;
@JsonProperty("name")
private String name;
public FlashArrayVolumeSource() { }
public FlashArrayVolumeSource(String sourceName) {
this.name = sourceName;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}

View File

@ -0,0 +1,122 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolumeSpace {
@JsonProperty("data_reduction")
private Float dataReduction;
@JsonProperty("snapshots")
private Integer snapshots;
@JsonProperty("snapshots_effective")
private Integer snapshotsEffective;
@JsonProperty("thin_provisioning")
private Float thinProvisioning;
@JsonProperty("total_effective")
private Long totalEffective;
@JsonProperty("total_physical")
private Long totalPhysical;
@JsonProperty("total_provisioned")
private Long totalProvisioned;
@JsonProperty("total_reduction")
private Float totalReduction;
@JsonProperty("unique")
private Long unique;
@JsonProperty("unique_effective")
private Long uniqueEffective;
@JsonProperty("user_provisioned")
private Long usedProvisioned;
@JsonProperty("virtual")
private Long virtual;
public Float getData_reduction() {
return dataReduction;
}
public void setData_reduction(Float dataReduction) {
this.dataReduction = dataReduction;
}
public Integer getSnapshots() {
return snapshots;
}
public void setSnapshots(Integer snapshots) {
this.snapshots = snapshots;
}
public Integer getSnapshotsEffective() {
return snapshotsEffective;
}
public void setSnapshotsEffective(Integer snapshotsEffective) {
this.snapshotsEffective = snapshotsEffective;
}
public Float getThinProvisioning() {
return thinProvisioning;
}
public void setThinProvisioning(Float thinProvisioning) {
this.thinProvisioning = thinProvisioning;
}
public Long getTotalEffective() {
return totalEffective;
}
public void setTotalEffective(Long totalEffective) {
this.totalEffective = totalEffective;
}
public Long getTotalPhysical() {
return totalPhysical;
}
public void setTotal_physical(Long totalPhysical) {
this.totalPhysical = totalPhysical;
}
public Long getTotalProvisioned() {
return totalProvisioned;
}
public void setTotalProvisioned(Long totalProvisioned) {
this.totalProvisioned = totalProvisioned;
}
public Float getTotalReduction() {
return totalReduction;
}
public void setTotalReduction(Float totalReduction) {
this.totalReduction = totalReduction;
}
public Long getUnique() {
return unique;
}
public void setUnique(Long unique) {
this.unique = unique;
}
public Long getUniqueEffective() {
return uniqueEffective;
}
public void setUniqueEffective(Long uniqueEffective) {
this.uniqueEffective = uniqueEffective;
}
public Long getUsedProvisioned() {
return usedProvisioned;
}
public void setUsed_provisioned(Long usedProvisioned) {
this.usedProvisioned = usedProvisioned;
}
public Long getVirtual() {
return virtual;
}
public void setVirtual(Long virtual) {
this.virtual = virtual;
}
}

View File

@ -0,0 +1,32 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import org.apache.cloudstack.storage.datastore.adapter.flasharray.FlashArrayAdapterFactory;
public class FlashArrayPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl {
public FlashArrayPrimaryDatastoreProviderImpl() {
super(new FlashArrayAdapterFactory());
}
@Override
public String getName() {
return "Flash Array";
}
}

View File

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=storage-volume-flasharray
parent=storage

View File

@ -0,0 +1,35 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this
to you under the Apache License, Version 2.0 (thefile
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="flashArrayDataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.FlashArrayPrimaryDatastoreProviderImpl">
</bean>
</beans>

View File

@ -0,0 +1,52 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-primera</artifactId>
<name>Apache CloudStack Plugin - Storage Volume - HPE Primera</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.19.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,930 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume.AddressType;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeNamer;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering.ProvisioningType;
import org.apache.http.Header;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.TrustAllStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.ssl.SSLContextBuilder;
import org.apache.log4j.Logger;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PrimeraAdapter implements ProviderAdapter {
static final Logger logger = Logger.getLogger(PrimeraAdapter.class);
public static final String HOSTSET = "hostset";
public static final String CPG = "cpg";
public static final String SNAP_CPG = "snapCpg";
public static final String KEY_TTL = "keyttl";
public static final String CONNECT_TIMEOUT_MS = "connectTimeoutMs";
public static final String POST_COPY_WAIT_MS = "postCopyWaitMs";
public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs";
private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14);
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000;
private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000;
public static final long BYTES_IN_MiB = 1048576;
static final ObjectMapper mapper = new ObjectMapper();
public String cpg = null;
public String snapCpg = null;
public String hostset = null;
private String username;
private String password;
private String key;
private String url;
private long keyExpiration = -1;
private long keyTtl = KEY_TTL_DEFAULT;
private long connTimeout = CONNECT_TIMEOUT_MS_DEFAULT;
private long taskWaitTimeoutMs = TASK_WAIT_TIMEOUT_MS_DEFAULT;
private CloseableHttpClient _client = null;
private boolean skipTlsValidation;
private Map<String, String> connectionDetails = null;
public PrimeraAdapter(String url, Map<String, String> details) {
this.url = url;
this.connectionDetails = details;
login();
}
@Override
public void refresh(Map<String, String> details) {
this.connectionDetails = details;
this.refreshSession(true);
}
/**
* Validate that the hostgroup and pod from the details data exists. Each
* configuration object/connection needs a distinct set of these 2 things.
*/
@Override
public void validate() {
login();
if (this.getHostset(hostset) == null) {
throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url
+ "], please validate configuration");
}
if (this.getCpg(cpg) == null) {
throw new RuntimeException(
"Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration");
}
}
@Override
public void disconnect() {
return;
}
@Override
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataIn,
ProviderAdapterDiskOffering diskOffering, long sizeInBytes) {
PrimeraVolumeRequest request = new PrimeraVolumeRequest();
String externalName = ProviderVolumeNamer.generateObjectName(context, dataIn);
request.setName(externalName);
request.setCpg(cpg);
request.setSnapCPG(snapCpg);
if (sizeInBytes < BYTES_IN_MiB) {
request.setSizeMiB(1);
} else {
request.setSizeMiB(sizeInBytes/BYTES_IN_MiB);
}
// determine volume type based on offering
// THIN: tpvv=true, reduce=false
// SPARSE: tpvv=true, reduce=true
// THICK: tpvv=false, tpZeroFill=true (not supported)
if (diskOffering != null) {
if (diskOffering.getType() == ProvisioningType.THIN) {
request.setTpvv(true);
request.setReduce(false);
} else if (diskOffering.getType() == ProvisioningType.SPARSE) {
request.setTpvv(false);
request.setReduce(true);
} else if (diskOffering.getType() == ProvisioningType.FAT) {
throw new RuntimeException("This storage provider does not support FAT provisioned volumes");
}
// sets the amount of space allowed for snapshots as a % of the volumes size
if (diskOffering.getHypervisorSnapshotReserve() != null) {
request.setSsSpcAllocLimitPct(diskOffering.getHypervisorSnapshotReserve());
}
} else {
// default to deduplicated volume
request.setReduce(true);
request.setTpvv(false);
}
request.setComment(ProviderVolumeNamer.generateObjectComment(context, dataIn));
POST("/volumes", request, null);
dataIn.setExternalName(externalName);
ProviderVolume volume = getVolume(context, dataIn);
return volume;
}
@Override
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) {
assert dataIn.getExternalName() != null : "External name not provided internally on volume attach";
PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest();
request.setHostname("set:" + hostset);
request.setVolumeName(dataIn.getExternalName());
request.setAutoLun(true);
// auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4
String location = POST("/vluns", request, new TypeReference<String>() {});
if (location == null) {
throw new RuntimeException("Attach volume failed with empty location response to vlun add command on storage provider");
}
String[] toks = location.split(",");
if (toks.length <2) {
throw new RuntimeException("Attach volume failed with invalid location response to vlun add command on storage provider. Provided location: " + location);
}
return toks[1];
}
@Override
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) {
// we expect to only be attaching one hostset to the vluns, so on detach we'll
// remove ALL vluns we find.
assert request.getExternalName() != null : "External name not provided internally on volume detach";
removeAllVluns(request.getExternalName());
}
public void removeVlun(String name, Integer lunid, String hostString) {
// hostString can be a hostname OR "set:<hostsetname>". It is stored this way
// in the appliance and returned as the vlun's name/string.
DELETE("/vluns/" + name + "," + lunid + "," + hostString);
}
/**
* Removes all vluns - this should only be done when you are sure the volume is no longer in use
* @param name
*/
public void removeAllVluns(String name) {
PrimeraVlunList list = getVolumeHostsets(name);
if (list != null && list.getMembers() != null) {
for (PrimeraVlun vlun: list.getMembers()) {
removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname());
}
}
}
public PrimeraVlunList getVolumeHostsets(String name) {
String query = "%22volumeName%20EQ%20" + name + "%22";
return GET("/vluns?query=" + query, new TypeReference<PrimeraVlunList>() {});
}
@Override
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject request) {
assert request.getExternalName() != null : "External name not provided internally on volume delete";
// first remove vluns (take volumes from vluns) from hostset
removeAllVluns(request.getExternalName());
DELETE("/volumes/" + request.getExternalName());
}
@Override
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolumeInfo,
ProviderAdapterDataObject targetVolumeInfo) {
PrimeraVolumeCopyRequest request = new PrimeraVolumeCopyRequest();
PrimeraVolumeCopyRequestParameters parms = new PrimeraVolumeCopyRequestParameters();
assert sourceVolumeInfo.getExternalName() != null: "External provider name not provided on copy request to Primera volume provider";
// if we have no external name, treat it as a new volume
if (targetVolumeInfo.getExternalName() == null) {
targetVolumeInfo.setExternalName(ProviderVolumeNamer.generateObjectName(context, targetVolumeInfo));
}
ProviderVolume sourceVolume = this.getVolume(context, sourceVolumeInfo);
if (sourceVolume == null) {
throw new RuntimeException("Source volume " + sourceVolumeInfo.getExternalUuid() + " with provider name " + sourceVolumeInfo.getExternalName() + " not found on storage provider");
}
ProviderVolume targetVolume = this.getVolume(context, targetVolumeInfo);
if (targetVolume == null) {
this.create(context, targetVolumeInfo, null, sourceVolume.getAllocatedSizeInBytes());
}
parms.setDestVolume(targetVolumeInfo.getExternalName());
parms.setOnline(false);
request.setParameters(parms);
PrimeraTaskReference taskref = POST("/volumes/" + sourceVolumeInfo.getExternalName(), request, new TypeReference<PrimeraTaskReference>() {});
if (taskref == null) {
throw new RuntimeException("Unable to retrieve task used to copy to newly created volume");
}
waitForTaskToComplete(taskref.getTaskid(), "copy volume " + sourceVolumeInfo.getExternalName() + " to " +
targetVolumeInfo.getExternalName(), taskWaitTimeoutMs);
return this.getVolume(context, targetVolumeInfo);
}
private void waitForTaskToComplete(String taskid, String taskDescription, Long timeoutMs) {
// first wait for task to complete
long taskWaitTimeout = System.currentTimeMillis() + timeoutMs;
boolean timedOut = true;
PrimeraTaskStatus status = null;
long starttime = System.currentTimeMillis();
while (System.currentTimeMillis() <= taskWaitTimeout) {
status = this.getTaskStatus(taskid);
if (status != null && status.isFinished()) {
timedOut = false;
if (!status.isSuccess()) {
throw new RuntimeException("Task " + taskDescription + " was cancelled. TaskID: " + status.getId() + "; Final Status: " + status.getStatusName());
}
break;
} else {
if (status != null) {
logger.info("Task " + taskDescription + " is still running. TaskID: " + status.getId() + "; Current Status: " + status.getStatusName());
}
// ugly...to keep from hot-polling API
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
}
}
if (timedOut) {
if (status != null) {
throw new RuntimeException("Task " + taskDescription + " timed out. TaskID: " + status.getId() + ", Last Known Status: " + status.getStatusName());
} else {
throw new RuntimeException("Task " + taskDescription + " timed out and a current status could not be retrieved from storage endpoint");
}
}
logger.info(taskDescription + " completed in " + ((System.currentTimeMillis() - starttime)/1000) + " seconds");
}
private PrimeraTaskStatus getTaskStatus(String taskid) {
return GET("/tasks/" + taskid + "?view=excludeDetail", new TypeReference<PrimeraTaskStatus>() {
});
}
@Override
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume,
ProviderAdapterDataObject targetSnapshot) {
assert sourceVolume.getExternalName() != null : "External name not set";
PrimeraVolumeSnapshotRequest request = new PrimeraVolumeSnapshotRequest();
PrimeraVolumeSnapshotRequestParameters parms = new PrimeraVolumeSnapshotRequestParameters();
parms.setName(ProviderVolumeNamer.generateObjectName(context, targetSnapshot));
request.setParameters(parms);
POST("/volumes/" + sourceVolume.getExternalName(), request, null);
targetSnapshot.setExternalName(parms.getName());
return getSnapshot(context, targetSnapshot);
}
@Override
public ProviderVolume revert(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) {
assert dataIn.getExternalName() != null: "External name not internally set for provided snapshot when requested storage provider to revert";
// first get original volume
PrimeraVolume snapVol = (PrimeraVolume)getVolume(context, dataIn);
assert snapVol != null: "Storage volume associated with snapshot externally named [" + dataIn.getExternalName() + "] not found";
assert snapVol.getParentId() != null: "Unable to determine parent volume/snapshot for snapshot named [" + dataIn.getExternalName() + "]";
PrimeraVolumeRevertSnapshotRequest request = new PrimeraVolumeRevertSnapshotRequest();
request.setOnline(true);
request.setPriority(2);
PrimeraTaskReference taskref = PUT("/volumes/" + dataIn.getExternalName(), request, new TypeReference<PrimeraTaskReference>() {});
if (taskref == null) {
throw new RuntimeException("Unable to retrieve task used to revert snapshot to base volume");
}
waitForTaskToComplete(taskref.getTaskid(), "revert snapshot " + dataIn.getExternalName(), taskWaitTimeoutMs);
return getVolumeById(context, snapVol.getParentId());
}
/**
* Resize the volume to the new size. For HPE Primera, the API takes the additional space to add to the volume
* so this method will first retrieve the current volume's size and subtract that from the new size provided
* before calling the API.
*
* This method uses option GROW_VOLUME=3 for the API at this URL:
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html
*
*/
@Override
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject request, long totalNewSizeInBytes) {
assert request.getExternalName() != null: "External name not internally set for provided volume when requesting resize of volume";
PrimeraVolume existingVolume = (PrimeraVolume) getVolume(context, request);
assert existingVolume != null: "Storage volume resize request not possible as existing volume not found for external provider name: " + request.getExternalName();
long existingSizeInBytes = existingVolume.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB;
assert existingSizeInBytes < totalNewSizeInBytes: "Existing volume size is larger than requested new size for volume resize request. The Primera storage system does not support truncating/shrinking volumes.";
long addOnSizeInBytes = totalNewSizeInBytes - existingSizeInBytes;
PrimeraVolume volume = new PrimeraVolume();
volume.setSizeMiB((int) (addOnSizeInBytes / PrimeraAdapter.BYTES_IN_MiB));
volume.setAction(3);
PUT("/volumes/" + request.getExternalName(), volume, null);
}
@Override
public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterDataObject request) {
String externalName;
// if the external name isn't provided, look for the derived contextual name. some failure scenarios
// may result in the volume for this context being created but a subsequent failure causing the external
// name to not be persisted for later use. This is true of template-type objects being cached on primary
// storage
if (request.getExternalName() == null) {
externalName = ProviderVolumeNamer.generateObjectName(context, request);
} else {
externalName = request.getExternalName();
}
return GET("/volumes/" + externalName, new TypeReference<PrimeraVolume>() {
});
}
private ProviderVolume getVolumeById(ProviderAdapterContext context, Integer id) {
String query = "%22id%20EQ%20" + id + "%22";
return GET("/volumes?query=" + query, new TypeReference<PrimeraVolume>() {});
}
@Override
public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdapterDataObject request) {
assert request.getExternalName() != null: "External name not provided internally when finding snapshot on storage provider";
return GET("/volumes/" + request.getExternalName(), new TypeReference<PrimeraVolume>() {
});
}
@Override
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) {
assert address != null: "External volume address not provided";
assert AddressType.FIBERWWN.equals(addressType): "This volume provider currently does not support address type " + addressType.name();
String query = "%22wwn%20EQ%20" + address + "%22";
return GET("/volumes?query=" + query, new TypeReference<PrimeraVolume>() {});
}
@Override
public ProviderVolumeStorageStats getManagedStorageStats() {
PrimeraCpg cpgobj = getCpg(cpg);
// just in case
if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) {
return null;
}
Long capacityBytes = 0L;
if (cpgobj.getsDGrowth() != null) {
capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB;
}
Long usedBytes = 0L;
if (cpgobj.getUsrUsage() != null) {
usedBytes = (cpgobj.getUsrUsage().getRawUsedMiB()) * PrimeraAdapter.BYTES_IN_MiB;
}
ProviderVolumeStorageStats stats = new ProviderVolumeStorageStats();
stats.setActualUsedInBytes(usedBytes);
stats.setCapacityInBytes(capacityBytes);
return stats;
}
@Override
public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, ProviderAdapterDataObject request) {
PrimeraVolume vol = (PrimeraVolume)getVolume(context, request);
if (vol == null || vol.getSizeMiB() == null || vol.getSizeMiB() == 0) {
return null;
}
Long virtualSizeInBytes = vol.getHostWriteMiB() * PrimeraAdapter.BYTES_IN_MiB;
Long allocatedSizeInBytes = vol.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB;
Long actualUsedInBytes = vol.getTotalUsedMiB() * PrimeraAdapter.BYTES_IN_MiB;
ProviderVolumeStats stats = new ProviderVolumeStats();
stats.setActualUsedInBytes(actualUsedInBytes);
stats.setAllocatedInBytes(allocatedSizeInBytes);
stats.setVirtualUsedInBytes(virtualSizeInBytes);
return stats;
}
@Override
public boolean canAccessHost(ProviderAdapterContext context, String hostname) {
PrimeraHostset hostset = getHostset(this.hostset);
List<String> members = hostset.getSetmembers();
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
// hostname configuration
String shortname;
if (hostname.indexOf('.') > 0) {
shortname = hostname.substring(0, (hostname.indexOf('.')));
} else {
shortname = hostname;
}
for (String member: members) {
// exact match (short or long names)
if (member.equals(hostname)) {
return true;
}
// primera has short name and cloudstack had long name
if (member.equals(shortname)) {
return true;
}
// member has long name but cloudstack had shortname
int index = member.indexOf(".");
if (index > 0) {
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
return true;
}
}
}
return false;
}
private PrimeraCpg getCpg(String name) {
return GET("/cpgs/" + name, new TypeReference<PrimeraCpg>() {
});
}
private PrimeraHostset getHostset(String name) {
return GET("/hostsets/" + name, new TypeReference<PrimeraHostset>() {
});
}
private String getSessionKey() {
refreshSession(false);
return key;
}
private synchronized void refreshSession(boolean force) {
try {
if (force || keyExpiration < System.currentTimeMillis()) {
// close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing
_client.close();;
_client = null;
login();
keyExpiration = System.currentTimeMillis() + keyTtl;
}
} catch (Exception e) {
// retry frequently but not every request to avoid DDOS on storage API
logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e);
keyExpiration = System.currentTimeMillis() + (5*1000);
}
}
private void validateLoginInfo(String urlStr) {
URL urlFull;
try {
urlFull = new URL(urlStr);
} catch (MalformedURLException e) {
throw new RuntimeException("Invalid URL format: " + urlStr, e);
}
;
int port = urlFull.getPort();
if (port <= 0) {
port = 443;
}
this.url = urlFull.getProtocol() + "://" + urlFull.getHost() + ":" + port + urlFull.getPath();
Map<String, String> queryParms = new HashMap<String, String>();
if (urlFull.getQuery() != null) {
String[] queryToks = urlFull.getQuery().split("&");
for (String tok : queryToks) {
if (tok.endsWith("=")) {
continue;
}
int i = tok.indexOf("=");
if (i > 0) {
queryParms.put(tok.substring(0, i), tok.substring(i + 1));
}
}
}
cpg = connectionDetails.get(PrimeraAdapter.CPG);
if (cpg == null) {
cpg = queryParms.get(PrimeraAdapter.CPG);
if (cpg == null) {
throw new RuntimeException(
PrimeraAdapter.CPG + " paramater/option required to configure this storage pool");
}
}
snapCpg = connectionDetails.get(PrimeraAdapter.SNAP_CPG);
if (snapCpg == null) {
snapCpg = queryParms.get(PrimeraAdapter.SNAP_CPG);
if (snapCpg == null) {
// default to using same CPG as the volume
snapCpg = cpg;
}
}
hostset = connectionDetails.get(PrimeraAdapter.HOSTSET);
if (hostset == null) {
hostset = queryParms.get(PrimeraAdapter.HOSTSET);
if (hostset == null) {
throw new RuntimeException(
PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool");
}
}
String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS);
if (connTimeoutStr == null) {
connTimeoutStr = queryParms.get(PrimeraAdapter.CONNECT_TIMEOUT_MS);
}
if (connTimeoutStr == null) {
connTimeout = CONNECT_TIMEOUT_MS_DEFAULT;
} else {
try {
connTimeout = Integer.parseInt(connTimeoutStr);
} catch (NumberFormatException e) {
logger.warn("Connection timeout not formatted correctly, using default", e);
connTimeout = CONNECT_TIMEOUT_MS_DEFAULT;
}
}
String keyTtlString = connectionDetails.get(PrimeraAdapter.KEY_TTL);
if (keyTtlString == null) {
keyTtlString = queryParms.get(PrimeraAdapter.KEY_TTL);
}
if (keyTtlString == null) {
keyTtl = KEY_TTL_DEFAULT;
} else {
try {
keyTtl = Integer.parseInt(keyTtlString);
} catch (NumberFormatException e) {
logger.warn("Key TTL not formatted correctly, using default", e);
keyTtl = KEY_TTL_DEFAULT;
}
}
String taskWaitTimeoutMsStr = connectionDetails.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS);
if (taskWaitTimeoutMsStr == null) {
taskWaitTimeoutMsStr = queryParms.get(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS);
if (taskWaitTimeoutMsStr == null) {
taskWaitTimeoutMs = PrimeraAdapter.TASK_WAIT_TIMEOUT_MS_DEFAULT;
} else {
try {
taskWaitTimeoutMs = Long.parseLong(taskWaitTimeoutMsStr);
} catch (NumberFormatException e) {
logger.warn(PrimeraAdapter.TASK_WAIT_TIMEOUT_MS + " property not set to a proper number, using default value");
}
}
}
String skipTlsValidationStr = connectionDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY);
if (skipTlsValidationStr == null) {
skipTlsValidationStr = queryParms.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY);
}
if (skipTlsValidationStr != null) {
skipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr);
} else {
skipTlsValidation = true;
}
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
validateLoginInfo(urlStr);
CloseableHttpResponse response = null;
try {
HttpPost request = new HttpPost(url + "/credentials");
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.setEntity(new StringEntity("{\"user\":\"" + username + "\", \"password\":\"" + password + "\"}"));
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200 | statusCode == 201) {
PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class);
key = keyobj.getKey();
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
} else {
throw new RuntimeException("Unexpected HTTP response code from Primera [" + url + "] - [" + statusCode
+ "] - " + response.getStatusLine().getReasonPhrase());
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("Error creating input for login, check username/password encoding");
} catch (UnsupportedOperationException e) {
throw new RuntimeException("Error processing login response from Primera [" + url + "]", e);
} catch (IOException e) {
throw new RuntimeException("Error sending login request to Primera [" + url + "]", e);
} finally {
try {
if (response != null) {
response.close();
}
} catch (IOException e) {
logger.debug("Error closing response from login attempt to Primera", e);
}
}
}
private CloseableHttpClient getClient() {
if (_client == null) {
RequestConfig config = RequestConfig.custom()
.setConnectTimeout((int) connTimeout)
.setConnectionRequestTimeout((int) connTimeout)
.setSocketTimeout((int) connTimeout).build();
HostnameVerifier verifier = null;
SSLContext sslContext = null;
if (this.skipTlsValidation) {
try {
verifier = NoopHostnameVerifier.INSTANCE;
sslContext = new SSLContextBuilder().loadTrustMaterial(null, TrustAllStrategy.INSTANCE).build();
} catch (KeyManagementException e) {
throw new RuntimeException(e);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
} catch (KeyStoreException e) {
throw new RuntimeException(e);
}
}
_client = HttpClients.custom()
.setDefaultRequestConfig(config)
.setSSLHostnameVerifier(verifier)
.setSSLContext(sslContext)
.build();
}
return _client;
}
@SuppressWarnings("unchecked")
private <T> T POST(String path, Object input, final TypeReference<T> type) {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
HttpPost request = new HttpPost(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
try {
String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data));
logger.debug("POST data: " + request.getEntity());
} catch (UnsupportedEncodingException | JsonProcessingException e) {
throw new RuntimeException(
"Error processing request payload to [" + url + "] for path [" + path + "]", e);
}
CloseableHttpClient client = getClient();
try {
response = (CloseableHttpResponse) client
.execute(request);
} catch (IOException e) {
throw new RuntimeException("Error sending request to Primera [" + url + path + "]", e);
}
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200 || statusCode == 201) {
try {
if (type != null) {
Header header = response.getFirstHeader("Location");
if (type.getType().getTypeName().equals(String.class.getName())) {
if (header != null) {
return (T) header.getValue();
} else {
return null;
}
} else if (type.getType().getTypeName().equals(PrimeraTaskReference.class.getName())) {
T obj = mapper.readValue(response.getEntity().getContent(), type);
PrimeraTaskReference taskref = (PrimeraTaskReference) obj;
taskref.setLocation(header.getValue());
return obj;
} else {
return mapper.readValue(response.getEntity().getContent(), type);
}
}
return null;
} catch (UnsupportedOperationException | IOException e) {
throw new RuntimeException("Error processing response from Primera [" + url + path + "]", e);
}
} else if (statusCode == 400) {
try {
Map<String, Object> payload = mapper.readValue(response.getEntity().getContent(),
new TypeReference<Map<String, Object>>() {
});
throw new RuntimeException("Invalid request error 400: " + payload);
} catch (UnsupportedOperationException | IOException e) {
throw new RuntimeException(
"Error processing bad request response from Primera [" + url + path + "]", e);
}
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
} else {
try {
Map<String, Object> payload = mapper.readValue(response.getEntity().getContent(),
new TypeReference<Map<String, Object>>() {
});
throw new RuntimeException("Invalid request error " + statusCode + ": " + payload);
} catch (UnsupportedOperationException | IOException e) {
throw new RuntimeException("Unexpected HTTP response code from Primera on POST [" + url + path + "] - ["
+ statusCode + "] - " + response.getStatusLine().getReasonPhrase());
}
}
} finally {
if (response != null) {
try {
response.close();
} catch (IOException e) {
logger.debug("Unexpected failure closing response to Primera API", e);
}
}
}
}
private <T> T PUT(String path, Object input, final TypeReference<T> type) {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
HttpPut request = new HttpPut(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data));
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200 || statusCode == 201) {
if (type != null)
return mapper.readValue(response.getEntity().getContent(), type);
return null;
} else if (statusCode == 400) {
Map<String, Object> payload = mapper.readValue(response.getEntity().getContent(),
new TypeReference<Map<String, Object>>() {
});
throw new RuntimeException("Invalid request error 400: " + payload);
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
} else {
Map<String, Object> payload = mapper.readValue(response.getEntity().getContent(),
new TypeReference<Map<String, Object>>() {});
throw new RuntimeException("Invalid request error from Primera on PUT [" + url + path + "]" + statusCode + ": "
+ response.getStatusLine().getReasonPhrase() + " - " + payload);
}
} catch (UnsupportedEncodingException | JsonProcessingException e) {
throw new RuntimeException(
"Error processing request payload to [" + url + "] for path [" + path + "]", e);
} catch (UnsupportedOperationException e) {
throw new RuntimeException("Error processing bad request response from Primera [" + url + "]",
e);
} catch (IOException e) {
throw new RuntimeException("Error sending request to Primera [" + url + "]", e);
} finally {
if (response != null) {
try {
response.close();
} catch (IOException e) {
logger.debug("Unexpected failure closing response to Primera API", e);
}
}
}
}
private <T> T GET(String path, final TypeReference<T> type) {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
HttpGet request = new HttpGet(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200) {
try {
return mapper.readValue(response.getEntity().getContent(), type);
} catch (UnsupportedOperationException | IOException e) {
throw new RuntimeException("Error processing response from Primera [" + url + "]", e);
}
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
} else if (statusCode == 404) {
return null;
} else {
throw new RuntimeException("Unexpected HTTP response code from Primera on GET [" + url + path + "] - ["
+ statusCode + "] - " + response.getStatusLine().getReasonPhrase());
}
} catch (IOException e) {
throw new RuntimeException("Error sending request to Primera [" + url + "]", e);
} catch (UnsupportedOperationException e) {
throw new RuntimeException("Error processing response from Primera [" + url + "]", e);
} finally {
if (response != null) {
try {
response.close();
} catch (IOException e) {
logger.debug("Unexpected failure closing response to Primera API", e);
}
}
}
}
private void DELETE(String path) {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
HttpDelete request = new HttpDelete(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200 || statusCode == 404 || statusCode == 400) {
// this means the volume was deleted successfully, or doesn't exist (effective delete), or
// the volume name is malformed or too long - meaning it never got created to begin with (effective delete)
return;
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
} else if (statusCode == 409) {
throw new RuntimeException("The volume cannot be deleted at this time due to existing dependencies. Validate that all snapshots associated with this volume have been deleted and try again." );
} else {
throw new RuntimeException("Unexpected HTTP response code from Primera on DELETE [" + url + path + "] - ["
+ statusCode + "] - " + response.getStatusLine().getReasonPhrase());
}
} catch (IOException e) {
throw new RuntimeException("Error sending request to Primera [" + url + "]", e);
} finally {
if (response != null) {
try {
response.close();
} catch (IOException e) {
logger.debug("Unexpected failure closing response to Primera API", e);
}
}
}
}
}

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
public class PrimeraAdapterFactory implements ProviderAdapterFactory {
@Override
public String getProviderName() {
return "Primera";
}
@Override
public ProviderAdapter create(String url, Map<String, String> details) {
return new PrimeraAdapter(url, details);
}
}

View File

@ -0,0 +1,203 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.ArrayList;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpg {
private long ddsRsvdMiB;
private String tdvvVersion;
private PrimeraCpgSAGrowth sAGrowth;
private PrimeraCpgSAUsage sAUsage;
private PrimeraCpgSDGrowth sDGrowth;
private PrimeraCpgSDUsage sDUsage;
private PrimeraCpgUsrUsage usrUsage;
private ArrayList<Object> additionalStates;
private boolean dedupCapable;
private ArrayList<Object> degradedStates;
private ArrayList<Object> failedStates;
private int freeSpaceMiB;
private String name;
private int numFPVVs;
private int numTDVVs;
private int numTPVVs;
private PrimeraCpgPrivateSpaceMiB privateSpaceMiB;
private int rawFreeSpaceMiB;
private int rawSharedSpaceMiB;
private int rawTotalSpaceMiB;
private int sharedSpaceMiB;
private int state;
private int totalSpaceMiB;
private String uuid;
private int id;
public long getDdsRsvdMiB() {
return ddsRsvdMiB;
}
public void setDdsRsvdMiB(long ddsRsvdMiB) {
this.ddsRsvdMiB = ddsRsvdMiB;
}
public String getTdvvVersion() {
return tdvvVersion;
}
public void setTdvvVersion(String tdvvVersion) {
this.tdvvVersion = tdvvVersion;
}
public PrimeraCpgSAGrowth getsAGrowth() {
return sAGrowth;
}
public void setsAGrowth(PrimeraCpgSAGrowth sAGrowth) {
this.sAGrowth = sAGrowth;
}
public PrimeraCpgSAUsage getsAUsage() {
return sAUsage;
}
public void setsAUsage(PrimeraCpgSAUsage sAUsage) {
this.sAUsage = sAUsage;
}
public PrimeraCpgSDGrowth getsDGrowth() {
return sDGrowth;
}
public void setsDGrowth(PrimeraCpgSDGrowth sDGrowth) {
this.sDGrowth = sDGrowth;
}
public PrimeraCpgSDUsage getsDUsage() {
return sDUsage;
}
public void setsDUsage(PrimeraCpgSDUsage sDUsage) {
this.sDUsage = sDUsage;
}
public PrimeraCpgUsrUsage getUsrUsage() {
return usrUsage;
}
public void setUsrUsage(PrimeraCpgUsrUsage usrUsage) {
this.usrUsage = usrUsage;
}
public ArrayList<Object> getAdditionalStates() {
return additionalStates;
}
public void setAdditionalStates(ArrayList<Object> additionalStates) {
this.additionalStates = additionalStates;
}
public boolean isDedupCapable() {
return dedupCapable;
}
public void setDedupCapable(boolean dedupCapable) {
this.dedupCapable = dedupCapable;
}
public ArrayList<Object> getDegradedStates() {
return degradedStates;
}
public void setDegradedStates(ArrayList<Object> degradedStates) {
this.degradedStates = degradedStates;
}
public ArrayList<Object> getFailedStates() {
return failedStates;
}
public void setFailedStates(ArrayList<Object> failedStates) {
this.failedStates = failedStates;
}
public int getFreeSpaceMiB() {
return freeSpaceMiB;
}
public void setFreeSpaceMiB(int freeSpaceMiB) {
this.freeSpaceMiB = freeSpaceMiB;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getNumFPVVs() {
return numFPVVs;
}
public void setNumFPVVs(int numFPVVs) {
this.numFPVVs = numFPVVs;
}
public int getNumTDVVs() {
return numTDVVs;
}
public void setNumTDVVs(int numTDVVs) {
this.numTDVVs = numTDVVs;
}
public int getNumTPVVs() {
return numTPVVs;
}
public void setNumTPVVs(int numTPVVs) {
this.numTPVVs = numTPVVs;
}
public PrimeraCpgPrivateSpaceMiB getPrivateSpaceMiB() {
return privateSpaceMiB;
}
public void setPrivateSpaceMiB(PrimeraCpgPrivateSpaceMiB privateSpaceMiB) {
this.privateSpaceMiB = privateSpaceMiB;
}
public int getRawFreeSpaceMiB() {
return rawFreeSpaceMiB;
}
public void setRawFreeSpaceMiB(int rawFreeSpaceMiB) {
this.rawFreeSpaceMiB = rawFreeSpaceMiB;
}
public int getRawSharedSpaceMiB() {
return rawSharedSpaceMiB;
}
public void setRawSharedSpaceMiB(int rawSharedSpaceMiB) {
this.rawSharedSpaceMiB = rawSharedSpaceMiB;
}
public int getRawTotalSpaceMiB() {
return rawTotalSpaceMiB;
}
public void setRawTotalSpaceMiB(int rawTotalSpaceMiB) {
this.rawTotalSpaceMiB = rawTotalSpaceMiB;
}
public int getSharedSpaceMiB() {
return sharedSpaceMiB;
}
public void setSharedSpaceMiB(int sharedSpaceMiB) {
this.sharedSpaceMiB = sharedSpaceMiB;
}
public int getState() {
return state;
}
public void setState(int state) {
this.state = state;
}
public int getTotalSpaceMiB() {
return totalSpaceMiB;
}
public void setTotalSpaceMiB(int totalSpaceMiB) {
this.totalSpaceMiB = totalSpaceMiB;
}
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
}

View File

@ -0,0 +1,35 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgDiskPattern {
private int diskType;
public int getDiskType() {
return diskType;
}
public void setDiskType(int diskType) {
this.diskType = diskType;
}
}

View File

@ -0,0 +1,49 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.ArrayList;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgLDLayout {
private int rAIDType;
private ArrayList<PrimeraCpgDiskPattern> diskPatterns;
private int hA;
public int getrAIDType() {
return rAIDType;
}
public void setrAIDType(int rAIDType) {
this.rAIDType = rAIDType;
}
public ArrayList<PrimeraCpgDiskPattern> getDiskPatterns() {
return diskPatterns;
}
public void setDiskPatterns(ArrayList<PrimeraCpgDiskPattern> diskPatterns) {
this.diskPatterns = diskPatterns;
}
public int gethA() {
return hA;
}
public void sethA(int hA) {
this.hA = hA;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgPrivateSpaceMiB {
private int base;
private int rawBase;
private int rawSnapshot;
private int snapshot;
public int getBase() {
return base;
}
public void setBase(int base) {
this.base = base;
}
public int getRawBase() {
return rawBase;
}
public void setRawBase(int rawBase) {
this.rawBase = rawBase;
}
public int getRawSnapshot() {
return rawSnapshot;
}
public void setRawSnapshot(int rawSnapshot) {
this.rawSnapshot = rawSnapshot;
}
public int getSnapshot() {
return snapshot;
}
public void setSnapshot(int snapshot) {
this.snapshot = snapshot;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgSAGrowth {
private PrimeraCpgLDLayout lDLayout;
private int incrementMiB;
public PrimeraCpgLDLayout getlDLayout() {
return lDLayout;
}
public void setlDLayout(PrimeraCpgLDLayout lDLayout) {
this.lDLayout = lDLayout;
}
public int getIncrementMiB() {
return incrementMiB;
}
public void setIncrementMiB(int incrementMiB) {
this.incrementMiB = incrementMiB;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgSAUsage {
private int rawTotalMiB;
private int rawUsedMiB;
private int totalMiB;
private int usedMiB;
public int getRawTotalMiB() {
return rawTotalMiB;
}
public void setRawTotalMiB(int rawTotalMiB) {
this.rawTotalMiB = rawTotalMiB;
}
public int getRawUsedMiB() {
return rawUsedMiB;
}
public void setRawUsedMiB(int rawUsedMiB) {
this.rawUsedMiB = rawUsedMiB;
}
public int getTotalMiB() {
return totalMiB;
}
public void setTotalMiB(int totalMiB) {
this.totalMiB = totalMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgSDGrowth {
private PrimeraCpgLDLayout lDLayout;
private int incrementMiB;
private int limitMiB;
private int warningMiB;
public PrimeraCpgLDLayout getlDLayout() {
return lDLayout;
}
public void setlDLayout(PrimeraCpgLDLayout lDLayout) {
this.lDLayout = lDLayout;
}
public int getIncrementMiB() {
return incrementMiB;
}
public void setIncrementMiB(int incrementMiB) {
this.incrementMiB = incrementMiB;
}
public int getLimitMiB() {
return limitMiB;
}
public void setLimitMiB(int limitMiB) {
this.limitMiB = limitMiB;
}
public int getWarningMiB() {
return warningMiB;
}
public void setWarningMiB(int warningMiB) {
this.warningMiB = warningMiB;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgSDUsage {
private int rawTotalMiB;
private int rawUsedMiB;
private int totalMiB;
private int usedMiB;
public int getRawTotalMiB() {
return rawTotalMiB;
}
public void setRawTotalMiB(int rawTotalMiB) {
this.rawTotalMiB = rawTotalMiB;
}
public int getRawUsedMiB() {
return rawUsedMiB;
}
public void setRawUsedMiB(int rawUsedMiB) {
this.rawUsedMiB = rawUsedMiB;
}
public int getTotalMiB() {
return totalMiB;
}
public void setTotalMiB(int totalMiB) {
this.totalMiB = totalMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraCpgUsrUsage {
private int rawTotalMiB;
private int rawUsedMiB;
private int totalMiB;
private int usedMiB;
public int getRawTotalMiB() {
return rawTotalMiB;
}
public void setRawTotalMiB(int rawTotalMiB) {
this.rawTotalMiB = rawTotalMiB;
}
public int getRawUsedMiB() {
return rawUsedMiB;
}
public void setRawUsedMiB(int rawUsedMiB) {
this.rawUsedMiB = rawUsedMiB;
}
public int getTotalMiB() {
return totalMiB;
}
public void setTotalMiB(int totalMiB) {
this.totalMiB = totalMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
}

View File

@ -0,0 +1,141 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraHostset {
private String comment;
private Integer id;
private String name;
private List<String> setmembers = new ArrayList<String>();
private String uuid;
private Map<String, Object> additionalProperties = new LinkedHashMap<String, Object>();
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getSetmembers() {
return setmembers;
}
public void setSetmembers(List<String> setmembers) {
this.setmembers = setmembers;
}
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public Map<String, Object> getAdditionalProperties() {
return additionalProperties;
}
public void setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
}
// adds members to a hostset
public static class PrimeraHostsetVLUNRequest {
private String volumeName;
private Boolean autoLun = true;
private Integer lun = 0;
private Integer maxAutoLun = 0;
/**
* This can be a single hostname OR the set of hosts in the format
* "set:<hostset>".
* For the purposes of this driver, its expected that the predominate usecase is
* to use
* a hostset that is aligned with a CloudStack Cluster.
*/
private String hostname;
public String getVolumeName() {
return volumeName;
}
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
public Boolean getAutoLun() {
return autoLun;
}
public void setAutoLun(Boolean autoLun) {
this.autoLun = autoLun;
}
public Integer getLun() {
return lun;
}
public void setLun(Integer lun) {
this.lun = lun;
}
public Integer getMaxAutoLun() {
return maxAutoLun;
}
public void setMaxAutoLun(Integer maxAutoLun) {
this.maxAutoLun = maxAutoLun;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
}
}

View File

@ -0,0 +1,35 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraKey {
private String key;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
}

View File

@ -0,0 +1,44 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraTaskReference {
private String taskid;
/**
* not really returned, but if there is a Location header in a
* response we'll add it automatically if this is the type
**/
private String location;
public String getTaskid() {
return taskid;
}
public void setTaskid(String taskid) {
this.taskid = taskid;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
}

View File

@ -0,0 +1,174 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraTaskStatus {
private Integer id;
private Integer type;
private String name;
private Integer status;
private Integer completedPhases;
private Integer totalPhases;
private Integer completedSteps;
private Integer totalSteps;
private String startTime;
private String finishTime;
private Integer priority;
private String user;
private String detailedStatus;
public static final Integer STATUS_DONE = 1;
public static final Integer STATUS_ACTIVE = 2;
public static final Integer STATUS_CANCELLED = 3;
public static final Integer STATUS_FAILED = 4;
public boolean isFinished() {
if (status != STATUS_ACTIVE) {
return true;
}
return false;
}
public boolean isSuccess() {
if (status == STATUS_DONE) {
return true;
}
return false;
}
public String getStatusName() {
if (status == PrimeraTaskStatus.STATUS_DONE) {
return "DONE";
} else if (status == PrimeraTaskStatus.STATUS_ACTIVE) {
return "ACTIVE";
} else if (status == PrimeraTaskStatus.STATUS_CANCELLED) {
return "CANCELLED";
} else if (status == PrimeraTaskStatus.STATUS_FAILED) {
return "FAILED";
} else {
return "UNKNOWN";
}
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Integer getType() {
return type;
}
public void setType(Integer type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Integer getCompletedPhases() {
return completedPhases;
}
public void setCompletedPhases(Integer completedPhases) {
this.completedPhases = completedPhases;
}
public Integer getTotalPhases() {
return totalPhases;
}
public void setTotalPhases(Integer totalPhases) {
this.totalPhases = totalPhases;
}
public Integer getCompletedSteps() {
return completedSteps;
}
public void setCompletedSteps(Integer completedSteps) {
this.completedSteps = completedSteps;
}
public Integer getTotalSteps() {
return totalSteps;
}
public void setTotalSteps(Integer totalSteps) {
this.totalSteps = totalSteps;
}
public String getStartTime() {
return startTime;
}
public void setStartTime(String startTime) {
this.startTime = startTime;
}
public String getFinishTime() {
return finishTime;
}
public void setFinishTime(String finishTime) {
this.finishTime = finishTime;
}
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
public String getDetailedStatus() {
return detailedStatus;
}
public void setDetailedStatus(String detailedStatus) {
this.detailedStatus = detailedStatus;
}
}

View File

@ -0,0 +1,180 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVlun {
private int lun;
private String volumeName;
private String hostname;
private String remoteName;
private int type;
private String serial;
private PrimeraPortPosition portPos;
private String volumeWWN;
private int multipathing;
private int failedPathPol;
private int failedPathInterval;
private String hostDeviceName;
@JsonProperty("Subsystem_NQN")
private String subsystemNQN;
private boolean active;
public static class PrimeraPortPosition {
private int node;
private int slot;
private int cardPort;
public int getNode() {
return node;
}
public void setNode(int node) {
this.node = node;
}
public int getSlot() {
return slot;
}
public void setSlot(int slot) {
this.slot = slot;
}
public int getCardPort() {
return cardPort;
}
public void setCardPort(int cardPort) {
this.cardPort = cardPort;
}
}
public int getLun() {
return lun;
}
public void setLun(int lun) {
this.lun = lun;
}
public String getVolumeName() {
return volumeName;
}
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public String getRemoteName() {
return remoteName;
}
public void setRemoteName(String remoteName) {
this.remoteName = remoteName;
}
public int getType() {
return type;
}
public void setType(int type) {
this.type = type;
}
public String getSerial() {
return serial;
}
public void setSerial(String serial) {
this.serial = serial;
}
public PrimeraPortPosition getPortPos() {
return portPos;
}
public void setPortPos(PrimeraPortPosition portPos) {
this.portPos = portPos;
}
public String getVolumeWWN() {
return volumeWWN;
}
public void setVolumeWWN(String volumeWWN) {
this.volumeWWN = volumeWWN;
}
public int getMultipathing() {
return multipathing;
}
public void setMultipathing(int multipathing) {
this.multipathing = multipathing;
}
public int getFailedPathPol() {
return failedPathPol;
}
public void setFailedPathPol(int failedPathPol) {
this.failedPathPol = failedPathPol;
}
public int getFailedPathInterval() {
return failedPathInterval;
}
public void setFailedPathInterval(int failedPathInterval) {
this.failedPathInterval = failedPathInterval;
}
public String getHostDeviceName() {
return hostDeviceName;
}
public void setHostDeviceName(String hostDeviceName) {
this.hostDeviceName = hostDeviceName;
}
public String getSubsystemNQN() {
return subsystemNQN;
}
public void setSubsystemNQN(String subsystemNQN) {
this.subsystemNQN = subsystemNQN;
}
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
}

View File

@ -0,0 +1,49 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVlunList {
private int total;
private String serial;
private List<PrimeraVlun> members;
public int getTotal() {
return total;
}
public void setTotal(int total) {
this.total = total;
}
public String getSerial() {
return serial;
}
public void setSerial(String serial) {
this.serial = serial;
}
public List<PrimeraVlun> getMembers() {
return members;
}
public void setMembers(List<PrimeraVlun> members) {
this.members = members;
}
}

View File

@ -0,0 +1,420 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.ArrayList;
import java.util.Date;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolume implements ProviderSnapshot {
@JsonIgnore
private AddressType addressType = AddressType.FIBERWWN;
@JsonIgnore
private String connectionId;
@JsonIgnore
private Integer priority = 0;
private String physParentId = null;
private Integer parentId = null;
private String copyOf = null;
private Integer roChildId = null;
private Integer rwChildId = null;
private String snapCPG = null;
private Long total = null;
/**
* Actions are enumerated and listed at
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v25706371.html
*/
private Integer action = null;
private String comment = null;
private Integer id = null;
private String name = null;
private Integer deduplicationState = null;
private Integer compressionState = null;
private Integer provisioningType = null;
private Integer copyType = null;
private Integer baseId = null;
private Boolean readOnly = null;
private Integer state = null;
private ArrayList<Object> failedStates = null;
private ArrayList<Object> degradedStates = null;
private ArrayList<Object> additionalStates = null;
private PrimeraVolumeAdminSpace adminSpace = null;
private PrimeraVolumeSnapshotSpace snapshotSpace = null;
private PrimeraVolumeUserSpace userSpace = null;
private Integer totalReservedMiB = null;
private Integer totalUsedMiB = null;
private Integer sizeMiB = null;
private Integer hostWriteMiB = null;
private String wwn = null;
private Integer creationTimeSec = null;
private Date creationTime8601 = null;
private Integer ssSpcAllocWarningPct;
private Integer ssSpcAllocLimitPct = null;
private Integer usrSpcAllocWarningPct = null;
private Integer usrSpcAllocLimitPct = null;
private PrimeraVolumePolicies policies = null;
private String userCPG = null;
private String uuid = null;
private Integer sharedParentId = null;
private Integer udid = null;
private PrimeraVolumeCapacityEfficiency capacityEfficiency = null;
private Integer rcopyStatus = null;
private ArrayList<PrimeraVolumeLink> links = null;
public String getPhysParentId() {
return physParentId;
}
public void setPhysParentId(String physParentId) {
this.physParentId = physParentId;
}
public Integer getParentId() {
return parentId;
}
public void setParentId(Integer parentId) {
this.parentId = parentId;
}
public String getCopyOf() {
return copyOf;
}
public void setCopyOf(String copyOf) {
this.copyOf = copyOf;
}
public Integer getRoChildId() {
return roChildId;
}
public void setRoChildId(Integer roChildId) {
this.roChildId = roChildId;
}
public Integer getRwChildId() {
return rwChildId;
}
public void setRwChildId(Integer rwChildId) {
this.rwChildId = rwChildId;
}
public String getSnapCPG() {
return snapCPG;
}
public void setSnapCPG(String snapCPG) {
this.snapCPG = snapCPG;
}
public Long getTotal() {
return total;
}
public void setTotal(Long total) {
this.total = total;
}
public Integer getAction() {
return action;
}
public void setAction(Integer action) {
this.action = action;
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getDeduplicationState() {
return deduplicationState;
}
public void setDeduplicationState(Integer deduplicationState) {
this.deduplicationState = deduplicationState;
}
public Integer getCompressionState() {
return compressionState;
}
public void setCompressionState(Integer compressionState) {
this.compressionState = compressionState;
}
public Integer getProvisioningType() {
return provisioningType;
}
public void setProvisioningType(Integer provisioningType) {
this.provisioningType = provisioningType;
}
public Integer getCopyType() {
return copyType;
}
public void setCopyType(Integer copyType) {
this.copyType = copyType;
}
public Integer getBaseId() {
return baseId;
}
public void setBaseId(Integer baseId) {
this.baseId = baseId;
}
public Boolean getReadOnly() {
return readOnly;
}
public void setReadOnly(Boolean readOnly) {
this.readOnly = readOnly;
}
public String getState() {
if (state != null) {
return state.toString();
}
return null;
}
public void setState(Integer state) {
this.state = state;
}
public ArrayList<Object> getFailedStates() {
return failedStates;
}
public void setFailedStates(ArrayList<Object> failedStates) {
this.failedStates = failedStates;
}
public ArrayList<Object> getDegradedStates() {
return degradedStates;
}
public void setDegradedStates(ArrayList<Object> degradedStates) {
this.degradedStates = degradedStates;
}
public ArrayList<Object> getAdditionalStates() {
return additionalStates;
}
public void setAdditionalStates(ArrayList<Object> additionalStates) {
this.additionalStates = additionalStates;
}
public PrimeraVolumeAdminSpace getAdminSpace() {
return adminSpace;
}
public void setAdminSpace(PrimeraVolumeAdminSpace adminSpace) {
this.adminSpace = adminSpace;
}
public PrimeraVolumeSnapshotSpace getSnapshotSpace() {
return snapshotSpace;
}
public void setSnapshotSpace(PrimeraVolumeSnapshotSpace snapshotSpace) {
this.snapshotSpace = snapshotSpace;
}
public PrimeraVolumeUserSpace getUserSpace() {
return userSpace;
}
public void setUserSpace(PrimeraVolumeUserSpace userSpace) {
this.userSpace = userSpace;
}
public Integer getTotalReservedMiB() {
return totalReservedMiB;
}
public void setTotalReservedMiB(Integer totalReservedMiB) {
this.totalReservedMiB = totalReservedMiB;
}
public Integer getTotalUsedMiB() {
return totalUsedMiB;
}
public void setTotalUsedMiB(Integer totalUsedMiB) {
this.totalUsedMiB = totalUsedMiB;
}
public Integer getSizeMiB() {
return sizeMiB;
}
public void setSizeMiB(Integer sizeMiB) {
this.sizeMiB = sizeMiB;
}
public Integer getHostWriteMiB() {
return hostWriteMiB;
}
public void setHostWriteMiB(Integer hostWriteMiB) {
this.hostWriteMiB = hostWriteMiB;
}
public String getWwn() {
return wwn;
}
public void setWwn(String wwn) {
this.wwn = wwn;
}
public Integer getCreationTimeSec() {
return creationTimeSec;
}
public void setCreationTimeSec(Integer creationTimeSec) {
this.creationTimeSec = creationTimeSec;
}
public Date getCreationTime8601() {
return creationTime8601;
}
public void setCreationTime8601(Date creationTime8601) {
this.creationTime8601 = creationTime8601;
}
public Integer getSsSpcAllocWarningPct() {
return ssSpcAllocWarningPct;
}
public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) {
this.ssSpcAllocWarningPct = ssSpcAllocWarningPct;
}
public Integer getSsSpcAllocLimitPct() {
return ssSpcAllocLimitPct;
}
public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) {
this.ssSpcAllocLimitPct = ssSpcAllocLimitPct;
}
public Integer getUsrSpcAllocWarningPct() {
return usrSpcAllocWarningPct;
}
public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) {
this.usrSpcAllocWarningPct = usrSpcAllocWarningPct;
}
public Integer getUsrSpcAllocLimitPct() {
return usrSpcAllocLimitPct;
}
public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) {
this.usrSpcAllocLimitPct = usrSpcAllocLimitPct;
}
public PrimeraVolumePolicies getPolicies() {
return policies;
}
public void setPolicies(PrimeraVolumePolicies policies) {
this.policies = policies;
}
public String getUserCPG() {
return userCPG;
}
public void setUserCPG(String userCPG) {
this.userCPG = userCPG;
}
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public Integer getSharedParentId() {
return sharedParentId;
}
public void setSharedParentId(Integer sharedParentId) {
this.sharedParentId = sharedParentId;
}
public Integer getUdid() {
return udid;
}
public void setUdid(Integer udid) {
this.udid = udid;
}
public PrimeraVolumeCapacityEfficiency getCapacityEfficiency() {
return capacityEfficiency;
}
public void setCapacityEfficiency(PrimeraVolumeCapacityEfficiency capacityEfficiency) {
this.capacityEfficiency = capacityEfficiency;
}
public Integer getRcopyStatus() {
return rcopyStatus;
}
public void setRcopyStatus(Integer rcopyStatus) {
this.rcopyStatus = rcopyStatus;
}
public ArrayList<PrimeraVolumeLink> getLinks() {
return links;
}
public void setLinks(ArrayList<PrimeraVolumeLink> links) {
this.links = links;
}
@Override
@JsonIgnore
public Boolean isDestroyed() {
return false;
}
@Override
public void setId(String id) {
this.id = Integer.parseInt(id);
}
public String getId() {
if (id != null) {
return Integer.toString(id);
}
return null;
}
@Override
public Integer getPriority() {
return priority;
}
@Override
public void setPriority(Integer priority) {
this.priority = priority;
}
@Override
public AddressType getAddressType() {
return addressType;
}
@Override
public void setAddressType(AddressType addressType) {
this.addressType = addressType;
}
@Override
public String getAddress() {
return this.wwn;
}
@Override
@JsonIgnore
public Long getAllocatedSizeInBytes() {
if (this.getSizeMiB() != null) {
return this.getSizeMiB() * PrimeraAdapter.BYTES_IN_MiB;
}
return 0L;
}
@Override
@JsonIgnore
public Long getUsedBytes() {
if (this.getTotalReservedMiB() != null) {
return this.getTotalReservedMiB() * PrimeraAdapter.BYTES_IN_MiB;
}
return 0L;
}
@Override
@JsonIgnore
public String getExternalUuid() {
return uuid;
}
public void setExternalUuid(String uuid) {
this.uuid = uuid;
}
@Override
@JsonIgnore
public String getExternalName() {
return name;
}
public void setExternalName(String name) {
this.name = name;
}
@Override
@JsonIgnore
public String getExternalConnectionId() {
return connectionId;
}
public void setExternalConnection(String connectionId) {
this.connectionId = connectionId;
}
@Override
@JsonIgnore
public Boolean canAttachDirectly() {
return true;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeAdminSpace {
private int reservedMiB;
private int rawReservedMiB;
private int usedMiB;
private int freeMiB;
public int getReservedMiB() {
return reservedMiB;
}
public void setReservedMiB(int reservedMiB) {
this.reservedMiB = reservedMiB;
}
public int getRawReservedMiB() {
return rawReservedMiB;
}
public void setRawReservedMiB(int rawReservedMiB) {
this.rawReservedMiB = rawReservedMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
public int getFreeMiB() {
return freeMiB;
}
public void setFreeMiB(int freeMiB) {
this.freeMiB = freeMiB;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeCapacityEfficiency {
private double compaction;
private double deduplication;
public double getCompaction() {
return compaction;
}
public void setCompaction(double compaction) {
this.compaction = compaction;
}
public double getDeduplication() {
return deduplication;
}
public void setDeduplication(double deduplication) {
this.deduplication = deduplication;
}
}

View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
/**
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeCopyRequest {
private String action = "createPhysicalCopy";
private PrimeraVolumeCopyRequestParameters parameters;
public String getAction() {
return action;
}
public void setAction(String action) {
this.action = action;
}
public PrimeraVolumeCopyRequestParameters getParameters() {
return parameters;
}
public void setParameters(PrimeraVolumeCopyRequestParameters parameters) {
this.parameters = parameters;
}
}

View File

@ -0,0 +1,101 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
/**
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=v24885490.html
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeCopyRequestParameters {
private String destVolume = null;
private String destCPG = null;
private Boolean online = false;
private String wwn = null;
private Boolean tpvv = null;
private Boolean reduce = null;
private String snapCPG = null;
private Boolean skipZero = null;
private Boolean saveSnapshot = null;
/** 1=HIGH, 2=MED, 3=LOW */
private Integer priority = null;
public String getDestVolume() {
return destVolume;
}
public void setDestVolume(String destVolume) {
this.destVolume = destVolume;
}
public String getDestCPG() {
return destCPG;
}
public void setDestCPG(String destCPG) {
this.destCPG = destCPG;
}
public Boolean getOnline() {
return online;
}
public void setOnline(Boolean online) {
this.online = online;
}
public String getWwn() {
return wwn;
}
public void setWwn(String wwn) {
this.wwn = wwn;
}
public Boolean getTpvv() {
return tpvv;
}
public void setTpvv(Boolean tpvv) {
this.tpvv = tpvv;
}
public Boolean getReduce() {
return reduce;
}
public void setReduce(Boolean reduce) {
this.reduce = reduce;
}
public String getSnapCPG() {
return snapCPG;
}
public void setSnapCPG(String snapCPG) {
this.snapCPG = snapCPG;
}
public Boolean getSkipZero() {
return skipZero;
}
public void setSkipZero(Boolean skipZero) {
this.skipZero = skipZero;
}
public Boolean getSaveSnapshot() {
return saveSnapshot;
}
public void setSaveSnapshot(Boolean saveSnapshot) {
this.saveSnapshot = saveSnapshot;
}
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeLink {
private String href;
private String rel;
public String getHref() {
return href;
}
public void setHref(String href) {
this.href = href;
}
public String getRel() {
return rel;
}
public void setRel(String rel) {
this.rel = rel;
}
}

View File

@ -0,0 +1,37 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeLinkList {
private List<PrimeraVolumeLink> list;
public List<PrimeraVolumeLink> getList() {
return list;
}
public void setList(List<PrimeraVolumeLink> list) {
this.list = list;
}
}

View File

@ -0,0 +1,82 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumePolicies {
private Boolean tpZeroFill;
private Boolean staleSS;
private Boolean oneHost;
private Boolean zeroDetect;
private Boolean system;
private Boolean caching;
private Boolean fsvc;
private Integer hostDIF;
public Boolean getTpZeroFill() {
return tpZeroFill;
}
public void setTpZeroFill(Boolean tpZeroFill) {
this.tpZeroFill = tpZeroFill;
}
public Boolean getStaleSS() {
return staleSS;
}
public void setStaleSS(Boolean staleSS) {
this.staleSS = staleSS;
}
public Boolean getOneHost() {
return oneHost;
}
public void setOneHost(Boolean oneHost) {
this.oneHost = oneHost;
}
public Boolean getZeroDetect() {
return zeroDetect;
}
public void setZeroDetect(Boolean zeroDetect) {
this.zeroDetect = zeroDetect;
}
public Boolean getSystem() {
return system;
}
public void setSystem(Boolean system) {
this.system = system;
}
public Boolean getCaching() {
return caching;
}
public void setCaching(Boolean caching) {
this.caching = caching;
}
public Boolean getFsvc() {
return fsvc;
}
public void setFsvc(Boolean fsvc) {
this.fsvc = fsvc;
}
public Integer getHostDIF() {
return hostDIF;
}
public void setHostDIF(Integer hostDIF) {
this.hostDIF = hostDIF;
}
}

View File

@ -0,0 +1,57 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumePromoteRequest {
/**
* Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
*/
private Integer action = 4;
private Boolean online = true;
private Integer priority = 2; // MEDIUM
private Boolean allowRemoteCopyParent = true;
public Integer getAction() {
return action;
}
public void setAction(Integer action) {
this.action = action;
}
public Boolean getOnline() {
return online;
}
public void setOnline(Boolean online) {
this.online = online;
}
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
public Boolean getAllowRemoteCopyParent() {
return allowRemoteCopyParent;
}
public void setAllowRemoteCopyParent(Boolean allowRemoteCopyParent) {
this.allowRemoteCopyParent = allowRemoteCopyParent;
}
}

View File

@ -0,0 +1,110 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeRequest {
private String name;
private String cpg;
private long sizeMiB;
private String comment;
private String snapCPG = null;
private Boolean reduce;
private Boolean tpvv;
private Integer ssSpcAllocLimitPct;
private Integer ssSpcAllocWarningPct;
private Integer usrSpcAllocWarningPct;
private Integer usrSpcAllocLimitPct;
private PrimeraVolumePolicies policies;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getCpg() {
return cpg;
}
public void setCpg(String cpg) {
this.cpg = cpg;
}
public long getSizeMiB() {
return sizeMiB;
}
public void setSizeMiB(long sizeMiB) {
this.sizeMiB = sizeMiB;
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getSnapCPG() {
return snapCPG;
}
public void setSnapCPG(String snapCPG) {
this.snapCPG = snapCPG;
}
public Boolean getReduce() {
return reduce;
}
public void setReduce(Boolean reduce) {
this.reduce = reduce;
}
public Boolean getTpvv() {
return tpvv;
}
public void setTpvv(Boolean tpvv) {
this.tpvv = tpvv;
}
public Integer getSsSpcAllocLimitPct() {
return ssSpcAllocLimitPct;
}
public void setSsSpcAllocLimitPct(Integer ssSpcAllocLimitPct) {
this.ssSpcAllocLimitPct = ssSpcAllocLimitPct;
}
public Integer getSsSpcAllocWarningPct() {
return ssSpcAllocWarningPct;
}
public void setSsSpcAllocWarningPct(Integer ssSpcAllocWarningPct) {
this.ssSpcAllocWarningPct = ssSpcAllocWarningPct;
}
public Integer getUsrSpcAllocWarningPct() {
return usrSpcAllocWarningPct;
}
public void setUsrSpcAllocWarningPct(Integer usrSpcAllocWarningPct) {
this.usrSpcAllocWarningPct = usrSpcAllocWarningPct;
}
public Integer getUsrSpcAllocLimitPct() {
return usrSpcAllocLimitPct;
}
public void setUsrSpcAllocLimitPct(Integer usrSpcAllocLimitPct) {
this.usrSpcAllocLimitPct = usrSpcAllocLimitPct;
}
public PrimeraVolumePolicies getPolicies() {
return policies;
}
public void setPolicies(PrimeraVolumePolicies policies) {
this.policies = policies;
}
}

View File

@ -0,0 +1,50 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
/**
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeRevertSnapshotRequest {
private int action = 4; //PROMOTE_VIRTUAL_COPY
private Boolean online = true;
private Integer priority = 2;
public int getAction() {
return action;
}
public void setAction(int action) {
this.action = action;
}
public Boolean getOnline() {
return online;
}
public void setOnline(Boolean online) {
this.online = online;
}
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
}

View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
/**
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeSnapshotRequest {
private String action = "createSnapshot";
private PrimeraVolumeSnapshotRequestParameters parameters;
public String getAction() {
return action;
}
public void setAction(String action) {
this.action = action;
}
public PrimeraVolumeSnapshotRequestParameters getParameters() {
return parameters;
}
public void setParameters(PrimeraVolumeSnapshotRequestParameters parameters) {
this.parameters = parameters;
}
}

View File

@ -0,0 +1,85 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
/**
* https://support.hpe.com/hpesc/public/docDisplay?docId=a00118636en_us&page=s_creating_snapshot_volumes.html
*/
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeSnapshotRequestParameters {
private String name = null;
private String id = null;
private String comment = null;
private Boolean readOnly = false;
private Integer expirationHours = null;
private Integer retentionHours = null;
private String addToSet = null;
private Boolean syncSnapRcopy = false;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public Boolean getReadOnly() {
return readOnly;
}
public void setReadOnly(Boolean readOnly) {
this.readOnly = readOnly;
}
public Integer getExpirationHours() {
return expirationHours;
}
public void setExpirationHours(Integer expirationHours) {
this.expirationHours = expirationHours;
}
public Integer getRetentionHours() {
return retentionHours;
}
public void setRetentionHours(Integer retentionHours) {
this.retentionHours = retentionHours;
}
public String getAddToSet() {
return addToSet;
}
public void setAddToSet(String addToSet) {
this.addToSet = addToSet;
}
public Boolean getSyncSnapRcopy() {
return syncSnapRcopy;
}
public void setSyncSnapRcopy(Boolean syncSnapRcopy) {
this.syncSnapRcopy = syncSnapRcopy;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeSnapshotSpace {
private int reservedMiB;
private int rawReservedMiB;
private int usedMiB;
private int freeMiB;
public int getReservedMiB() {
return reservedMiB;
}
public void setReservedMiB(int reservedMiB) {
this.reservedMiB = reservedMiB;
}
public int getRawReservedMiB() {
return rawReservedMiB;
}
public void setRawReservedMiB(int rawReservedMiB) {
this.rawReservedMiB = rawReservedMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
public int getFreeMiB() {
return freeMiB;
}
public void setFreeMiB(int freeMiB) {
this.freeMiB = freeMiB;
}
}

View File

@ -0,0 +1,35 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeUpdateRequest {
private String comment;
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumeUserSpace {
private int reservedMiB;
private int rawReservedMiB;
private int usedMiB;
private int freeMiB;
public int getReservedMiB() {
return reservedMiB;
}
public void setReservedMiB(int reservedMiB) {
this.reservedMiB = reservedMiB;
}
public int getRawReservedMiB() {
return rawReservedMiB;
}
public void setRawReservedMiB(int rawReservedMiB) {
this.rawReservedMiB = rawReservedMiB;
}
public int getUsedMiB() {
return usedMiB;
}
public void setUsedMiB(int usedMiB) {
this.usedMiB = usedMiB;
}
public int getFreeMiB() {
return freeMiB;
}
public void setFreeMiB(int freeMiB) {
this.freeMiB = freeMiB;
}
}

View File

@ -0,0 +1,32 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import org.apache.cloudstack.storage.datastore.adapter.primera.PrimeraAdapterFactory;
public class PrimeraPrimaryDatastoreProviderImpl extends AdaptivePrimaryDatastoreProviderImpl {
public PrimeraPrimaryDatastoreProviderImpl() {
super(new PrimeraAdapterFactory());
}
@Override
public String getName() {
return "Primera";
}
}

View File

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=storage-volume-primera
parent=storage

View File

@ -0,0 +1,35 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="primeraDataStoreProvider"
class="org.apache.cloudstack.storage.datastore.provider.PrimeraPrimaryDatastoreProviderImpl">
</bean>
</beans>

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################################################
#
# Clean old multipath maps that have 0 paths available
#
#############################################################################################
cd $(dirname $0)
for WWID in $(multipathd list maps status | awk '{ if ($4 == 0) { print substr($1,2); }}'); do
./removeVolume.sh ${WWID}
done
exit 0

View File

@ -0,0 +1,133 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#####################################################################################
#
# Given a lun # and a WWID for a volume provisioned externally, find the volume
# through the SCSI bus and make sure its visable via multipath
#
#####################################################################################
LUN=${1:?"LUN required"}
WWID=${2:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
exit 1
}
echo "$(date): Looking for ${WWID} on lun ${LUN}"
# get vendor OUI. we will only delete a device on the designated lun if it matches the
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
# host on different fiber channel hosts with the same LUN
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
# first we need to check if any stray references are left from a previous use of this lun
for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
if [ ! -z "${lingering_devs}" ]; then
for dev in ${lingering_devs}; do
LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
continue;
fi
dev=$(echo $dev | awk -F: '{ print $1}')
logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
MP_WWID=${MP_WWID:1} # strip first character (3) off
# don't do this if the WWID passed in matches the WWID from multipath
if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
# run full removal again so all devices and multimap are cleared
$(dirname $0)/disconnectVolume.sh ${MP_WWID}
# we don't have a multimap but we may still have some stranded devices to clean up
elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
fi
done
sleep 3
fi
done
logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
# wait for multipath to map the new lun to the WWID
echo "$(date): Waiting for multipath entry to show up for the WWID"
while true; do
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
break
fi
logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan"
# instruct bus to scan for new lun
for fchost in $(ls /sys/class/fc_host); do
echo " --> Scanning ${fchost}"
echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
done
multipath -v2 2>/dev/null
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
break
fi
sleep 5
done
echo "$(date): Doing a recan to make sure we have proper current size locally"
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
done
sleep 3
multipathd reconfigure
sleep 3
# cleanup any old/faulty paths
delete_needed=false
multipath -l 3${WWID}
for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing"
echo 1 > /sys/block/${dev}/device/delete;
delete_needed=true
done
if [ "${delete_needed}" == "true" ]; then
sleep 10
multipath -v2 >/dev/null
fi
multipath -l 3${WWID}
logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available"
echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}"
exit 0

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
OUTPUT_FORMAT=${1:?"Output format is required"}
INPUT_FILE=${2:?"Input file/path is required"}
OUTPUT_FILE=${3:?"Output file/path is required"}
echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}"
qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
# if its a block device make sure we flush caches before exiting
lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && {
blockdev --flushbufs ${OUTPUT_FILE}
hdparm -F ${OUTPUT_FILE}
}
exit 0
}

View File

@ -0,0 +1,71 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#########################################################################################
#
# Given a WWID, cleanup/remove any multipath and devices associated with this WWID. This
# may not always have lasting result because if the storage array still has the volume
# visable to the host, it may be rediscovered. The cleanupStaleMaps.sh script should
# catch those cases
#
#########################################################################################
WWID=${1:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
echo "$(date): Removing ${WWID}"
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_REMOVE" "${WWID} cannot be disconnected from this host because multipathd is not currently running and cannot be started"
exit 1
}
# first get dm- name
DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}')
SLAVE_DEVS=""
if [ -z "${DM_NAME}" ]; then
logger -t CS_SCSI_VOL_REMOVE "${WWID} has no active multimap so no removal performed"
logger -t CS_SCSI_VOL_REMOVE "WARN: dm name could not be found for ${WWID}"
dmsetup remove /dev/mapper/*${WWID}
logger -t CS_SCSI_VOL_REMOVE "${WWID} removal via dmsetup remove /dev/mapper/${WWID} finished with return code $?"
else
# now look for slave devices and save for deletion
for dev in $(ls /sys/block/${DM_NAME}/slaves/ 2>/dev/null); do
SLAVE_DEVS="${SLAVE_DEVS} ${dev}"
done
fi
# delete the path map last
multipath -f 3${WWID}
# now delete slave devices
# https://bugzilla.redhat.com/show_bug.cgi?id=1949369
if [ ! -z "${SLAVE_DEVS}" ]; then
for dev in ${SLAVE_DEVS}; do
multipathd del path /dev/${dev}
echo "1" > /sys/block/${dev}/device/delete
logger -t CS_SCSI_VOL_REMOVE "${WWID} removal of device ${dev} complete"
done
fi
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
echo "$(date): ${WWID} removed"
exit 0

View File

@ -0,0 +1,70 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
notifyqemu() {
if `virsh help 2>/dev/null | grep -q blockresize`
then
if `virsh domstate $VMNAME >/dev/null 2>&1`
then
sizeinkb=$(($NEWSIZE/1024))
devicepath=$(virsh domblklist $VMNAME | grep ${WWID} | awk '{print $1}')
virsh blockresize --path $devicepath --size $sizeinkb ${VMNAME} >/dev/null 2>&1
retval=$?
if [ -z $retval ] || [ $retval -ne 0 ]
then
log "failed to live resize $path to size of $sizeinkb kb" 1
else
liveresize='true'
fi
fi
fi
}
WWID=${1:?"WWID required"}
VMNAME=${2:?"VMName required"}
NEWSIZE=${3:?"New size required in bytes"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
export WWID VMNAME NEWSIZE
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_RESIZE" "Unable to notify running VM of resize for ${WWID} because multipathd is not currently running and cannot be started"
exit 1
}
logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} STARTING"
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
done
sleep 3
multipathd reconfigure
sleep 3
multipath -ll 3${WWID}
notifyqemu
logger -t "CS_SCSI_VOL_RESIZE" "${WWID} resizing disk path at /dev/mapper/3${WWID} COMPLETE"
exit 0

View File

@ -1294,7 +1294,7 @@ public class ApiDBUtils {
type = HypervisorType.Hyperv;
}
} if (format == ImageFormat.RAW) {
// Currently, KVM only supports RBD and PowerFlex images of type RAW.
// Currently, KVM only supports RBD, PowerFlex, and FiberChannel images of type RAW.
// This results in a weird collision with OVM volumes which
// can only be raw, thus making KVM RBD volumes show up as OVM
// rather than RBD. This block of code can (hopefully) by checking to
@ -1306,10 +1306,12 @@ public class ApiDBUtils {
ListIterator<StoragePoolVO> itr = pools.listIterator();
while(itr.hasNext()) {
StoragePoolVO pool = itr.next();
if(pool.getPoolType() == StoragePoolType.RBD ||
pool.getPoolType() == StoragePoolType.PowerFlex ||
pool.getPoolType() == StoragePoolType.CLVM ||
pool.getPoolType() == StoragePoolType.Linstor) {
if(List.of(StoragePoolType.RBD,
StoragePoolType.PowerFlex,
StoragePoolType.CLVM,
StoragePoolType.Linstor,
StoragePoolType.FiberChannel).contains(pool.getPoolType())) {
// This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse,
// If this check is not passed, the hypervisor type will remain OVM.
type = HypervisorType.KVM;

View File

@ -92,7 +92,9 @@ public class ParamGenericValidationWorker implements DispatchWorker {
break;
}
}
if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && !((String)actualParamName).equalsIgnoreCase("signatureversion")) {
if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") &&
!((String)actualParamName).equalsIgnoreCase("signatureversion") &&
!((String)actualParamName).equalsIgnoreCase("projectid")) {
errorMsg.append(" ").append(actualParamName);
foundUnknownParam= true;
}

View File

@ -114,6 +114,9 @@ import com.cloud.org.Cluster;
import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceState;
import com.cloud.serializer.GsonHelper;
import com.cloud.server.StatsCollector.AbstractStatsCollector;
import com.cloud.server.StatsCollector.AutoScaleMonitor;
import com.cloud.server.StatsCollector.StorageCollector;
import com.cloud.storage.ImageStoreDetailsUtil;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
@ -1620,7 +1623,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
for (StoragePoolVO pool : pools) {
List<VolumeVO> volumes = _volsDao.findByPoolId(pool.getId(), null);
for (VolumeVO volume : volumes) {
if (volume.getFormat() != ImageFormat.QCOW2 && volume.getFormat() != ImageFormat.VHD && volume.getFormat() != ImageFormat.OVA && (volume.getFormat() != ImageFormat.RAW || pool.getPoolType() != Storage.StoragePoolType.PowerFlex)) {
if (!List.of(ImageFormat.QCOW2, ImageFormat.VHD, ImageFormat.OVA, ImageFormat.RAW).contains(volume.getFormat()) &&
!List.of(Storage.StoragePoolType.PowerFlex, Storage.StoragePoolType.FiberChannel).contains(pool.getPoolType())) {
LOGGER.warn("Volume stats not implemented for this format type " + volume.getFormat());
break;
}

View File

@ -1052,36 +1052,56 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_storagePoolTagsDao.persist(pool.getId(), storagePoolTags, cmd.isTagARule());
}
boolean changes = false;
Long updatedCapacityBytes = null;
Long capacityBytes = cmd.getCapacityBytes();
if (capacityBytes != null) {
if (capacityBytes != pool.getCapacityBytes()) {
updatedCapacityBytes = capacityBytes;
changes = true;
}
}
Long updatedCapacityIops = null;
Long capacityIops = cmd.getCapacityIops();
if (capacityIops != null) {
if (!capacityIops.equals(pool.getCapacityIops())) {
updatedCapacityIops = capacityIops;
changes = true;
}
}
if (updatedCapacityBytes != null || updatedCapacityIops != null) {
// retrieve current details and merge/overlay input to capture changes
Map<String, String> inputDetails = extractApiParamAsMap(cmd.getDetails());
Map<String, String> details = null;
if (inputDetails == null) {
details = _storagePoolDetailsDao.listDetailsKeyPairs(id);
} else {
details = _storagePoolDetailsDao.listDetailsKeyPairs(id);
details.putAll(inputDetails);
changes = true;
}
if (changes) {
StoragePoolVO storagePool = _storagePoolDao.findById(id);
DataStoreProvider dataStoreProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName());
DataStoreLifeCycle dataStoreLifeCycle = dataStoreProvider.getDataStoreLifeCycle();
if (dataStoreLifeCycle instanceof PrimaryDataStoreLifeCycle) {
Map<String, String> details = new HashMap<String, String>();
details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null);
details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null);
((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).updateStoragePool(storagePool, details);
if (updatedCapacityBytes != null) {
details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null);
_storagePoolDao.updateCapacityBytes(id, updatedCapacityBytes);
}
if (updatedCapacityIops != null) {
details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null);
_storagePoolDao.updateCapacityIops(id, updatedCapacityIops);
}
if (cmd.getUrl() != null) {
details.put("url", cmd.getUrl());
}
_storagePoolDao.update(id, storagePool);
_storagePoolDao.updateDetails(id, details);
}
}
@ -1094,14 +1114,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
}
if (updatedCapacityBytes != null) {
_storagePoolDao.updateCapacityBytes(id, capacityBytes);
}
if (updatedCapacityIops != null) {
_storagePoolDao.updateCapacityIops(id, capacityIops);
}
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}

Some files were not shown because too many files have changed in this diff Show More