diff --git a/core/src/com/cloud/agent/api/StartupRoutingCommand.java b/core/src/com/cloud/agent/api/StartupRoutingCommand.java index c4138367b7d..b459f884969 100644 --- a/core/src/com/cloud/agent/api/StartupRoutingCommand.java +++ b/core/src/com/cloud/agent/api/StartupRoutingCommand.java @@ -35,7 +35,7 @@ public class StartupRoutingCommand extends StartupCommand { long memory; long dom0MinMemory; boolean poolSync; - + private boolean supportsClonedVolumes; String caps; String pool; @@ -180,4 +180,12 @@ public class StartupRoutingCommand extends StartupCommand { public void setGpuGroupDetails(HashMap> groupDetails) { this.groupDetails = groupDetails; } + + public boolean getSupportsClonedVolumes() { + return supportsClonedVolumes; + } + + public void setSupportsClonedVolumes(boolean supportsClonedVolumes) { + this.supportsClonedVolumes = supportsClonedVolumes; + } } diff --git a/core/src/com/cloud/agent/api/storage/ResizeVolumeCommand.java b/core/src/com/cloud/agent/api/storage/ResizeVolumeCommand.java index 3b121e1a54d..22cff13de25 100644 --- a/core/src/com/cloud/agent/api/storage/ResizeVolumeCommand.java +++ b/core/src/com/cloud/agent/api/storage/ResizeVolumeCommand.java @@ -25,22 +25,34 @@ import com.cloud.agent.api.to.StorageFilerTO; public class ResizeVolumeCommand extends Command { private String path; private StorageFilerTO pool; - private String vmInstance; - private Long newSize; private Long currentSize; + private Long newSize; private boolean shrinkOk; + private String vmInstance; + + /* For managed storage */ + private boolean managed; + private String iScsiName; protected ResizeVolumeCommand() { - } public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance) { this.path = path; this.pool = pool; - this.vmInstance = vmInstance; this.currentSize = currentSize; this.newSize = newSize; this.shrinkOk = shrinkOk; + this.vmInstance = vmInstance; + this.managed = false; + } + + public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance, + boolean isManaged, String iScsiName) { + this(path, pool, currentSize, newSize, shrinkOk, vmInstance); + + this.iScsiName = iScsiName; + this.managed = isManaged; } public String getPath() { @@ -55,22 +67,20 @@ public class ResizeVolumeCommand extends Command { return pool; } - public long getNewSize() { - return newSize; - } + public long getCurrentSize() { return currentSize; } - public long getCurrentSize() { - return currentSize; - } + public long getNewSize() { return newSize; } - public boolean getShrinkOk() { - return shrinkOk; - } + public boolean getShrinkOk() { return shrinkOk; } public String getInstanceName() { return vmInstance; } + public boolean isManaged() { return managed; } + + public String get_iScsiName() {return iScsiName; } + /** * {@inheritDoc} */ @@ -78,5 +88,4 @@ public class ResizeVolumeCommand extends Command { public boolean executeInSequence() { return false; } - } diff --git a/core/src/com/cloud/storage/resource/StorageProcessor.java b/core/src/com/cloud/storage/resource/StorageProcessor.java index e2bf1b78772..e5832cc23d4 100644 --- a/core/src/com/cloud/storage/resource/StorageProcessor.java +++ b/core/src/com/cloud/storage/resource/StorageProcessor.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import com.cloud.agent.api.Answer; @@ -68,4 +69,6 @@ public interface StorageProcessor { public Answer forgetObject(ForgetObjectCmd cmd); public Answer snapshotAndCopy(SnapshotAndCopyCommand cmd); + + public Answer resignature(ResignatureCommand cmd); } diff --git a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index fc771e0ff59..d9d2993dbaf 100644 --- a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; @@ -64,6 +65,8 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return processor.introduceObject((IntroduceObjectCmd)command); } else if (command instanceof SnapshotAndCopyCommand) { return processor.snapshotAndCopy((SnapshotAndCopyCommand)command); + } else if (command instanceof ResignatureCommand) { + return processor.resignature((ResignatureCommand)command); } return new Answer((Command)command, false, "not implemented yet"); diff --git a/core/src/org/apache/cloudstack/storage/command/ResignatureAnswer.java b/core/src/org/apache/cloudstack/storage/command/ResignatureAnswer.java new file mode 100644 index 00000000000..071f6a9cf19 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/ResignatureAnswer.java @@ -0,0 +1,60 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Answer; +import com.cloud.storage.Storage.ImageFormat; + +public class ResignatureAnswer extends Answer { + private long size; + private String path; + private ImageFormat format; + + public ResignatureAnswer() { + } + + public ResignatureAnswer(String errMsg) { + super(null, false, errMsg); + } + + public void setSize(long size) { + this.size = size; + } + + public long getSize() { + return size; + } + + public void setPath(String path) { + this.path = path; + } + + public String getPath() { + return path; + } + + public void setFormat(ImageFormat format) { + this.format = format; + } + + public ImageFormat getFormat() { + return format; + } +} diff --git a/core/src/org/apache/cloudstack/storage/command/ResignatureCommand.java b/core/src/org/apache/cloudstack/storage/command/ResignatureCommand.java new file mode 100644 index 00000000000..beb4b657480 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/ResignatureCommand.java @@ -0,0 +1,48 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.command; + +import com.cloud.utils.Utils; + +import java.util.Map; + +public final class ResignatureCommand extends StorageSubSystemCommand { + private final Map details; + + private boolean executeInSequence = true; + + public ResignatureCommand(final Map details) { + this.details = Utils.getImmutableMap(details); + } + + public Map getDetails() { + return details; + } + + @Override + public void setExecuteInSequence(final boolean executeInSequence) { + this.executeInSequence = executeInSequence; + } + + @Override + public boolean executeInSequence() { + return executeInSequence; + } +} diff --git a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index a69f35706da..9b711bc3b3a 100644 --- a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -37,6 +37,7 @@ public class PrimaryDataStoreTO implements DataStoreTO { public static final String CHAP_INITIATOR_SECRET = PrimaryDataStore.CHAP_INITIATOR_SECRET; public static final String CHAP_TARGET_USERNAME = PrimaryDataStore.CHAP_TARGET_USERNAME; public static final String CHAP_TARGET_SECRET = PrimaryDataStore.CHAP_TARGET_SECRET; + public static final String REMOVE_AFTER_COPY = PrimaryDataStore.REMOVE_AFTER_COPY; public static final String VOLUME_SIZE = PrimaryDataStore.VOLUME_SIZE; private final String uuid; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java index 09883c66ce7..2cde5bdc155 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java @@ -18,7 +18,23 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +/** + * enumerates different capabilities storage drivers may have + */ public enum DataStoreCapabilities { VOLUME_SNAPSHOT_QUIESCEVM, - STORAGE_SYSTEM_SNAPSHOT // indicates to the StorageSystemSnapshotStrategy that this driver takes snapshots on its own system + /** + * indicates that this driver takes CloudStack volume snapshots on its own system (as either back-end snapshots or back-end clones) + */ + STORAGE_SYSTEM_SNAPSHOT, + /** + * indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a back-end volume + * from a back-end snapshot or a back-end clone) and that it supports the invocation of the createAsync method where a SnapshotInfo is passed in while using + * the "tempVolume" property of snapshot_details + */ + CAN_CREATE_VOLUME_FROM_SNAPSHOT, + /** + * indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a volume from a volume) + */ + CAN_CREATE_VOLUME_FROM_VOLUME } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStore.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStore.java index 465b7eb2437..a399758217b 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStore.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStore.java @@ -23,6 +23,8 @@ import java.util.List; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { + DataObject create(DataObject dataObject, boolean createEntryInTempSpoolRef); + VolumeInfo getVolume(long id); List getVolumes(); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index e0c0d28e9da..6dcdf4f0c7c 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -23,27 +23,38 @@ import org.apache.cloudstack.storage.command.CommandResult; import com.cloud.host.Host; import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume; public interface PrimaryDataStoreDriver extends DataStoreDriver { - ChapInfo getChapInfo(VolumeInfo volumeInfo); + ChapInfo getChapInfo(DataObject dataObject); boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore); void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); - // intended for managed storage (cloud.storage_pool.managed = true) - // if not managed, return volume.getSize() - long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool); + /** + * intended for managed storage (cloud.storage_pool.managed = true) + * if not managed, return volume.getSize() + */ + long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool storagePool); - // intended for managed storage (cloud.storage_pool.managed = true) - // if managed storage, return the total number of bytes currently in use for the storage pool in question - // if not managed storage, return 0 + /** + * intended for zone-wide primary storage that is capable of storing a template once and using it in multiple clusters + * if not this kind of storage, return 0 + */ + long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool); + + /** + * intended for managed storage (cloud.storage_pool.managed = true) + * if managed storage, return the total number of bytes currently in use for the storage pool in question + * if not managed storage, return 0 + */ long getUsedBytes(StoragePool storagePool); - // intended for managed storage (cloud.storage_pool.managed = true) - // if managed storage, return the total number of IOPS currently in use for the storage pool in question - // if not managed storage, return 0 + /** + * intended for managed storage (cloud.storage_pool.managed = true) + * if managed storage, return the total number of IOPS currently in use for the storage pool in question + * if not managed storage, return 0 + */ long getUsedIops(StoragePool storagePool); void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java index f08d9a45509..7f2f4dc6b85 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java @@ -36,6 +36,7 @@ public interface PrimaryDataStoreInfo extends StoragePool { static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret"; static final String CHAP_TARGET_USERNAME = "chapTargetUsername"; static final String CHAP_TARGET_SECRET = "chapTargetSecret"; + static final String REMOVE_AFTER_COPY = "removeAfterCopy"; static final String VOLUME_SIZE = "volumeSize"; boolean isHypervisorSupported(HypervisorType hypervisor); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index 88ce932b266..ff204c663c0 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -32,6 +32,7 @@ public interface TemplateService { public TemplateApiResult(TemplateInfo template) { super(); + this.template = template; } @@ -52,6 +53,8 @@ public interface TemplateService { AsyncCallFuture prepareTemplateOnPrimary(TemplateInfo srcTemplate, StoragePool pool); + AsyncCallFuture deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool); + void syncTemplateToRegionStore(long templateId, DataStore store); void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index 8352682ee68..75a7ad96c4b 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -45,7 +45,7 @@ public interface VolumeService { } } - ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore); + ChapInfo getChapInfo(DataObject dataObject, DataStore dataStore); boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore); @@ -81,7 +81,7 @@ public interface VolumeService { VolumeEntity getVolumeEntity(long volumeId); - AsyncCallFuture createManagedStorageAndVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, + AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId); AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, diff --git a/engine/components-api/src/com/cloud/vm/VmWorkResizeVolume.java b/engine/components-api/src/com/cloud/vm/VmWorkResizeVolume.java index d2691129613..de049b3f0b6 100644 --- a/engine/components-api/src/com/cloud/vm/VmWorkResizeVolume.java +++ b/engine/components-api/src/com/cloud/vm/VmWorkResizeVolume.java @@ -24,12 +24,12 @@ public class VmWorkResizeVolume extends VmWork { private long newSize; private Long newMinIops; private Long newMaxIops; + private Integer newHypervisorSnapshotReserve; private Long newServiceOfferingId; private boolean shrinkOk; - public VmWorkResizeVolume(long userId, long accountId, long vmId, String handlerName, - long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newServiceOfferingId, boolean shrinkOk) { - + public VmWorkResizeVolume(long userId, long accountId, long vmId, String handlerName, long volumeId, long currentSize, long newSize, + Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, Long newServiceOfferingId, boolean shrinkOk) { super(userId, accountId, vmId, handlerName); this.volumeId = volumeId; @@ -37,6 +37,7 @@ public class VmWorkResizeVolume extends VmWork { this.newSize = newSize; this.newMinIops = newMinIops; this.newMaxIops = newMaxIops; + this.newHypervisorSnapshotReserve = newHypervisorSnapshotReserve; this.newServiceOfferingId = newServiceOfferingId; this.shrinkOk = shrinkOk; } @@ -68,4 +69,6 @@ public class VmWorkResizeVolume extends VmWork { public boolean isShrinkOk() { return shrinkOk; } + + public Integer getNewHypervisorSnapshotReserve() { return newHypervisorSnapshotReserve; } } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index d407bb1afff..166210aa310 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1242,10 +1242,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati future = volService.createVolumeAsync(volume, destPool); } else { - TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); + if (templ == null) { s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); + throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); } @@ -1260,13 +1261,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati long hostId = vm.getVirtualMachine().getHostId(); - future = volService.createManagedStorageAndVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId); + future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId); } else { future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ); } } - VolumeApiResult result = null; + VolumeApiResult result; try { result = future.get(); if (result.isFailed()) { @@ -1290,10 +1291,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati newVol = _volsDao.findById(newVol.getId()); break; //break out of template-redeploy retry loop - } catch (InterruptedException e) { - s_logger.error("Unable to create " + newVol, e); - throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); - } catch (ExecutionException e) { + } catch (InterruptedException | ExecutionException e) { s_logger.error("Unable to create " + newVol, e); throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); } diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/com/cloud/dc/dao/ClusterDao.java index 50c234c9154..06bc5a3afc6 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterDao.java @@ -45,4 +45,6 @@ public interface ClusterDao extends GenericDao { List listClustersByDcId(long zoneId); List listAllCusters(long zoneId); + + boolean getSupportsResigning(long clusterId); } diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java index 3459e514b71..0c5bd6f4a79 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java @@ -28,6 +28,8 @@ import javax.inject.Inject; import org.springframework.stereotype.Component; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.HostPodVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -57,7 +59,9 @@ public class ClusterDaoImpl extends GenericDaoBase implements C private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )"; @Inject - protected HostPodDao _hostPodDao; + private ClusterDetailsDao clusterDetailsDao; + @Inject + protected HostPodDao hostPodDao; public ClusterDaoImpl() { super(); @@ -214,7 +218,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C @Override public List listClustersWithDisabledPods(long zoneId) { - GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); + GenericSearchBuilder disabledPodIdSearch = hostPodDao.createSearchBuilder(Long.class); disabledPodIdSearch.selectFields(disabledPodIdSearch.entity().getId()); disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); @@ -260,4 +264,23 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("dataCenterId", zoneId); return customSearch(sc, null); } + + @Override + public boolean getSupportsResigning(long clusterId) { + ClusterVO cluster = findById(clusterId); + + if (cluster == null || cluster.getAllocationState() != Grouping.AllocationState.Enabled) { + return false; + } + + ClusterDetailsVO clusterDetailsVO = clusterDetailsDao.findDetail(clusterId, "supportsResign"); + + if (clusterDetailsVO != null) { + String value = clusterDetailsVO.getValue(); + + return Boolean.parseBoolean(value); + } + + return false; + } } diff --git a/engine/schema/src/com/cloud/host/dao/HostDao.java b/engine/schema/src/com/cloud/host/dao/HostDao.java index 26e0644c59e..3cfdc94af85 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/com/cloud/host/dao/HostDao.java @@ -23,6 +23,7 @@ import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor; import com.cloud.info.RunningHostCountInfo; import com.cloud.resource.ResourceState; import com.cloud.utils.db.GenericDao; @@ -89,6 +90,8 @@ public interface HostDao extends GenericDao, StateDao listByDataCenterId(long id); + List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType); + List listAllHosts(long zoneId); List listAllHostsByType(Host.Type type); diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java index 09d9d40cd57..54133b90d03 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java @@ -47,7 +47,9 @@ import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.Status.Event; +import com.cloud.hypervisor.Hypervisor; import com.cloud.info.RunningHostCountInfo; +import com.cloud.org.Grouping; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; @@ -421,6 +423,37 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType) { + SearchBuilder clusterSearch = _clusterDao.createSearchBuilder(); + + clusterSearch.and("allocationState", clusterSearch.entity().getAllocationState(), SearchCriteria.Op.EQ); + clusterSearch.and("hypervisorType", clusterSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + + SearchBuilder hostSearch = createSearchBuilder(); + + hostSearch.and("dc", hostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + hostSearch.and("type", hostSearch.entity().getType(), Op.EQ); + hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ); + hostSearch.and("resourceState", hostSearch.entity().getResourceState(), Op.EQ); + + hostSearch.join("clusterSearch", clusterSearch, hostSearch.entity().getClusterId(), clusterSearch.entity().getId(), JoinBuilder.JoinType.INNER); + + hostSearch.done(); + + SearchCriteria sc = hostSearch.create(); + + sc.setParameters("dc", zoneId); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + sc.setJoinParameters("clusterSearch", "allocationState", Grouping.AllocationState.Enabled); + sc.setJoinParameters("clusterSearch", "hypervisorType", hypervisorType.toString()); + + return listBy(sc); + } + @Override public HostVO findByGuid(String guid) { SearchCriteria sc = GuidSearch.create("guid", guid); diff --git a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java index 6a8ff563642..b864a592105 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java @@ -65,10 +65,13 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement @Override public Map findDetails(long hostId) { SearchCriteria sc = HostSearch.create(); + sc.setParameters("hostId", hostId); List results = search(sc, null); + Map details = new HashMap(results.size()); + for (DetailVO result : results) { if ("password".equals(result.getName())) { details.put(result.getName(), DBEncryptionUtil.decrypt(result.getValue())); @@ -76,6 +79,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement details.put(result.getName(), result.getValue()); } } + return details; } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java index 770e673e88d..93aad153371 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java @@ -118,9 +118,9 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase sc = PoolTemplateSearch.create(); - sc.setParameters("pool_id", hostId); + sc.setParameters("pool_id", poolId); sc.setParameters("template_id", templateId); return findOneIncludingRemovedBy(sc); } diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index cdcab754e91..7a59ad07c44 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -18,18 +18,27 @@ */ package org.apache.cloudstack.storage.motion; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; +import java.util.concurrent.ExecutionException; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; + import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; @@ -43,8 +52,12 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -55,65 +68,98 @@ import com.cloud.configuration.Config; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.org.Cluster; -import com.cloud.org.Grouping.AllocationState; -import com.cloud.resource.ResourceState; import com.cloud.server.ManagementService; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachineManager; +import com.google.common.base.Preconditions; + @Component public class StorageSystemDataMotionStrategy implements DataMotionStrategy { - private static final Logger s_logger = Logger.getLogger(StorageSystemDataMotionStrategy.class); + private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class); + private static final Random RANDOM = new Random(System.nanoTime()); @Inject private AgentManager _agentMgr; @Inject private ConfigurationDao _configDao; + @Inject private DataStoreManager dataStoreMgr; @Inject private DiskOfferingDao _diskOfferingDao; + @Inject private ClusterDao clusterDao; @Inject private HostDao _hostDao; + @Inject private HostDetailsDao hostDetailsDao; @Inject private ManagementService _mgr; @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private SnapshotDao _snapshotDao; @Inject private SnapshotDetailsDao _snapshotDetailsDao; @Inject private VolumeDao _volumeDao; @Inject private VolumeDataFactory _volumeDataFactory; + @Inject private VolumeDetailsDao volumeDetailsDao; @Inject private VolumeService _volumeService; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { if (srcData instanceof SnapshotInfo) { - if (canHandle(srcData.getDataStore()) || canHandle(destData.getDataStore())) { + if (canHandle(srcData) || canHandle(destData)) { return StrategyPriority.HIGHEST; } } + if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo && + (srcData.getDataStore().getId() == destData.getDataStore().getId()) && + (canHandle(srcData) || canHandle(destData))) { + // Both source and dest are on the same storage, so just clone them. + return StrategyPriority.HIGHEST; + } + return StrategyPriority.CANT_HANDLE; } - private boolean canHandle(DataStore dataStore) { + private boolean canHandle(DataObject dataObject) { + Preconditions.checkArgument(dataObject != null, "Passing 'null' to dataObject of canHandle(DataObject) is not supported."); + + DataStore dataStore = dataObject.getDataStore(); + if (dataStore.getRole() == DataStoreRole.Primary) { Map mapCapabilities = dataStore.getDriver().getCapabilities(); - if (mapCapabilities != null) { + if (mapCapabilities == null) { + return false; + } + + if (dataObject instanceof VolumeInfo || dataObject instanceof SnapshotInfo) { String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); - Boolean supportsStorageSystemSnapshots = new Boolean(value); + Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value); if (supportsStorageSystemSnapshots) { - s_logger.info("Using 'StorageSystemDataMotionStrategy'"); + LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a volume or snapshot and the storage system supports snapshots)"); return true; } + } else if (dataObject instanceof TemplateInfo) { + // If the storage system can clone volumes, we can cache templates on it. + String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()); + Boolean canCloneVolume = Boolean.valueOf(value); + + if (canCloneVolume) { + LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a template and the storage system can create a volume from a volume)"); + + return true; + } + } } @@ -132,36 +178,92 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { validate(snapshotInfo); - boolean canHandleSrc = canHandle(srcData.getDataStore()); + boolean canHandleSrc = canHandle(srcData); if (canHandleSrc && destData instanceof TemplateInfo && (destData.getDataStore().getRole() == DataStoreRole.Image || destData.getDataStore().getRole() == DataStoreRole.ImageCache)) { handleCreateTemplateFromSnapshot(snapshotInfo, (TemplateInfo)destData, callback); + return; } if (destData instanceof VolumeInfo) { VolumeInfo volumeInfo = (VolumeInfo)destData; - boolean canHandleDest = canHandle(destData.getDataStore()); + + boolean canHandleDest = canHandle(destData); if (canHandleSrc && canHandleDest) { - handleCreateVolumeFromSnapshotBothOnStorageSystem(snapshotInfo, volumeInfo, callback); - return; + if (snapshotInfo.getDataStore().getId() == volumeInfo.getDataStore().getId()) { + handleCreateVolumeFromSnapshotBothOnStorageSystem(snapshotInfo, volumeInfo, callback); + return; + } + else { + String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + + "not supported by source or destination storage plug-in). " + getSrcDestDataStoreMsg(srcData, destData); + + LOGGER.warn(errMsg); + + throw new UnsupportedOperationException(errMsg); + } } + if (canHandleSrc) { - throw new UnsupportedOperationException("This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + - "not supported by destination storage plug-in)."); + String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + + "not supported by destination storage plug-in). " + getDestDataStoreMsg(destData); + + LOGGER.warn(errMsg); + + throw new UnsupportedOperationException(errMsg); } + if (canHandleDest) { - throw new UnsupportedOperationException("This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + - "not supported by source storage plug-in)."); + String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " + + "not supported by source storage plug-in). " + getSrcDataStoreMsg(srcData); + + LOGGER.warn(errMsg); + + throw new UnsupportedOperationException(errMsg); } } + } else if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo) { + boolean canHandleSrc = canHandle(srcData); + + if (!canHandleSrc) { + String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_CAN_CREATE_VOLUME_FROM_VOLUME " + + "not supported by destination storage plug-in). " + getDestDataStoreMsg(destData); + + LOGGER.warn(errMsg); + + throw new UnsupportedOperationException(errMsg); + } + + handleCreateVolumeFromTemplateBothOnStorageSystem((TemplateInfo)srcData, (VolumeInfo)destData, callback); + + return; } throw new UnsupportedOperationException("This operation is not supported."); } + private String getSrcDestDataStoreMsg(DataObject srcData, DataObject destData) { + Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported."); + Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported."); + + return "Source data store = " + srcData.getDataStore().getName() + "; " + "Destination data store = " + destData.getDataStore().getName() + "."; + } + + private String getSrcDataStoreMsg(DataObject srcData) { + Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDataStoreMsg(DataObject) is not supported."); + + return "Source data store = " + srcData.getDataStore().getName() + "."; + } + + private String getDestDataStoreMsg(DataObject destData) { + Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getDestDataStoreMsg(DataObject) is not supported."); + + return "Destination data store = " + destData.getDataStore().getName() + "."; + } + private void validate(SnapshotInfo snapshotInfo) { long volumeId = snapshotInfo.getVolumeId(); @@ -172,7 +274,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } } - private Void handleCreateTemplateFromSnapshot(SnapshotInfo snapshotInfo, TemplateInfo templateInfo, AsyncCompletionCallback callback) { + private boolean usingBackendSnapshotFor(SnapshotInfo snapshotInfo) { + String property = getProperty(snapshotInfo.getId(), "takeSnapshot"); + + return Boolean.parseBoolean(property); + } + + private void handleCreateTemplateFromSnapshot(SnapshotInfo snapshotInfo, TemplateInfo templateInfo, AsyncCompletionCallback callback) { try { snapshotInfo.processEvent(Event.CopyingRequested); } @@ -180,57 +288,168 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException("This snapshot is not currently in a state where it can be used to create a template."); } - HostVO hostVO = getHost(snapshotInfo.getDataStore().getId()); - DataStore srcDataStore = snapshotInfo.getDataStore(); + HostVO hostVO = getHost(snapshotInfo); - String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); - int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); - CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo); + boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); - String errMsg = null; + if (usingBackendSnapshot && !computeClusterSupportsResign) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); - CopyCmdAnswer copyCmdAnswer = null; + LOGGER.warn(noSupportForResignErrMsg); + + throw new CloudRuntimeException(noSupportForResignErrMsg); + } try { - _volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore); + if (usingBackendSnapshot) { + createVolumeFromSnapshot(hostVO, snapshotInfo, true); + } - Map srcDetails = getSnapshotDetails(_storagePoolDao.findById(srcDataStore.getId()), snapshotInfo); + DataStore srcDataStore = snapshotInfo.getDataStore(); - copyCommand.setOptions(srcDetails); + String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); - } - catch (Exception ex) { - throw new CloudRuntimeException(ex.getMessage()); + String errMsg = null; + + CopyCmdAnswer copyCmdAnswer = null; + + try { + // If we are using a back-end snapshot, then we should still have access to it from the hosts in the cluster that hostVO is in + // (because we passed in true as the third parameter to createVolumeFromSnapshot above). + if (usingBackendSnapshot == false) { + _volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore); + } + + Map srcDetails = getSnapshotDetails(snapshotInfo); + + copyCommand.setOptions(srcDetails); + + copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); + } + catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { + String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage()); + } + finally { + try { + _volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore); + } + catch (Exception ex) { + LOGGER.warn("Error revoking access to snapshot (Snapshot ID = " + snapshotInfo.getId() + "): " + ex.getMessage(), ex); + } + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = copyCmdAnswer.getDetails(); + } + else { + errMsg = "Unable to create template from snapshot"; + } + } + + try { + if (StringUtils.isEmpty(errMsg)) { + snapshotInfo.processEvent(Event.OperationSuccessed); + } + else { + snapshotInfo.processEvent(Event.OperationFailed); + } + } + catch (Exception ex) { + LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); + } + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + + result.setResult(errMsg); + + callback.complete(result); } finally { - try { - _volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore); + if (usingBackendSnapshot) { + deleteVolumeFromSnapshot(snapshotInfo); } - catch (Exception ex) { - s_logger.debug(ex.getMessage(), ex); + } + } + + /** + * Clones a template present on the storage to a new volume and resignatures it. + * + * @param templateInfo source template + * @param volumeInfo destination ROOT volume + * @param callback for async + */ + private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo templateInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { + Preconditions.checkArgument(templateInfo != null, "Passing 'null' to templateInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); + Preconditions.checkArgument(volumeInfo != null, "Passing 'null' to volumeInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); + + CopyCmdAnswer copyCmdAnswer = null; + String errMsg = null; + + HostVO hostVO = getHost(volumeInfo.getDataCenterId(), true); + + if (hostVO == null) { + throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + volumeInfo.getDataCenterId()); + } + + boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); + + if (!computeClusterSupportsResign) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); + + LOGGER.warn(noSupportForResignErrMsg); + + throw new CloudRuntimeException(noSupportForResignErrMsg); + } + + try { + VolumeDetailVO volumeDetail = new VolumeDetailVO(volumeInfo.getId(), + "cloneOfTemplate", + String.valueOf(templateInfo.getId()), + false); + + volumeDetail = volumeDetailsDao.persist(volumeDetail); + + AsyncCallFuture future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore()); + VolumeApiResult result = future.get(); + + if (volumeDetail != null) { + volumeDetailsDao.remove(volumeDetail.getId()); } + if (result.isFailed()) { + LOGGER.warn("Failed to create a volume: " + result.getResult()); + + throw new CloudRuntimeException(result.getResult()); + } + + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + + volumeInfo.processEvent(Event.MigrationRequested); + + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + + copyCmdAnswer = performResignature(volumeInfo, hostVO); + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { - if (copyCmdAnswer != null && copyCmdAnswer.getDetails() != null && !copyCmdAnswer.getDetails().isEmpty()) { - errMsg = copyCmdAnswer.getDetails(); + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); } else { - errMsg = "Unable to perform host-side operation"; + throw new CloudRuntimeException("Unable to create a volume from a template"); } } + } catch (InterruptedException | ExecutionException ex) { + volumeInfo.getDataStore().getDriver().deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); - try { - if (errMsg == null) { - snapshotInfo.processEvent(Event.OperationSuccessed); - } - else { - snapshotInfo.processEvent(Event.OperationFailed); - } - } - catch (Exception ex) { - s_logger.debug(ex.getMessage(), ex); - } + throw new CloudRuntimeException("Create volume from template (ID = " + templateInfo.getId() + ") failed " + ex.getMessage()); } CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); @@ -238,12 +457,40 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { result.setResult(errMsg); callback.complete(result); - - return null; } - private Void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { + private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback callback) { + CopyCmdAnswer copyCmdAnswer = null; + String errMsg = null; + try { + HostVO hostVO = getHost(snapshotInfo); + + boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo); + boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); + + if (usingBackendSnapshot && !computeClusterSupportsResign) { + String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); + + LOGGER.warn(noSupportForResignErrMsg); + + throw new CloudRuntimeException(noSupportForResignErrMsg); + } + + boolean canStorageSystemCreateVolumeFromVolume = canStorageSystemCreateVolumeFromVolume(snapshotInfo); + boolean useCloning = usingBackendSnapshot || (canStorageSystemCreateVolumeFromVolume && computeClusterSupportsResign); + + VolumeDetailVO volumeDetail = null; + + if (useCloning) { + volumeDetail = new VolumeDetailVO(volumeInfo.getId(), + "cloneOfSnapshot", + String.valueOf(snapshotInfo.getId()), + false); + + volumeDetail = volumeDetailsDao.persist(volumeDetail); + } + // at this point, the snapshotInfo and volumeInfo should have the same disk offering ID (so either one should be OK to get a DiskOfferingVO instance) DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId()); SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId()); @@ -255,72 +502,44 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { VolumeApiResult result = future.get(); + if (volumeDetail != null) { + volumeDetailsDao.remove(volumeDetail.getId()); + } + if (result.isFailed()) { - s_logger.debug("Failed to create a volume: " + result.getResult()); + LOGGER.warn("Failed to create a volume: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } - } - catch (Exception ex) { - throw new CloudRuntimeException(ex.getMessage()); - } - volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); - volumeInfo.processEvent(Event.MigrationRequested); + volumeInfo.processEvent(Event.MigrationRequested); - volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); - HostVO hostVO = getHost(snapshotInfo.getDataStore().getId()); - - String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); - int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); - CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - - CopyCmdAnswer copyCmdAnswer = null; - - try { - _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); - _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); - - Map srcDetails = getSnapshotDetails(_storagePoolDao.findById(snapshotInfo.getDataStore().getId()), snapshotInfo); - - copyCommand.setOptions(srcDetails); - - Map destDetails = getVolumeDetails(volumeInfo); - - copyCommand.setOptions2(destDetails); - - copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); - } - catch (Exception ex) { - throw new CloudRuntimeException(ex.getMessage()); - } - finally { - try { - _volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); - } - catch (Exception ex) { - s_logger.debug(ex.getMessage(), ex); - } - - try { - _volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); - } - catch (Exception ex) { - s_logger.debug(ex.getMessage(), ex); - } - } - - String errMsg = null; - - if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { - if (copyCmdAnswer != null && copyCmdAnswer.getDetails() != null && !copyCmdAnswer.getDetails().isEmpty()) { - errMsg = copyCmdAnswer.getDetails(); + if (useCloning) { + copyCmdAnswer = performResignature(volumeInfo, hostVO); } else { - errMsg = "Unable to perform host-side operation"; + // asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning + // even when we don't need those hosts to do this kind of copy work + hostVO = getHost(snapshotInfo.getDataCenterId(), false); + + copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO); } + + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = copyCmdAnswer.getDetails(); + } + else { + errMsg = "Unable to create volume from snapshot"; + } + } + } + catch (Exception ex) { + errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'"; } CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); @@ -328,26 +547,78 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { result.setResult(errMsg); callback.complete(result); - - return null; } - private Map getSnapshotDetails(StoragePoolVO storagePoolVO, SnapshotInfo snapshotInfo) { - Map details = new HashMap(); + /** + * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to + * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. + * + * The resultant volume must be writable because we need to resign the SR and the VDI that should be inside of it before we copy + * the VHD file to secondary storage. + * + * If the storage system is using writable snapshots, then nothing need be done by that storage system here because we can just + * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. + */ + private void createVolumeFromSnapshot(HostVO hostVO, SnapshotInfo snapshotInfo, boolean keepGrantedAccess) { + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "create"); - details.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); - details.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } - long snapshotId = snapshotInfo.getId(); + CopyCmdAnswer copyCmdAnswer = performResignature(snapshotInfo, hostVO, keepGrantedAccess); - details.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN)); + if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) { + if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + throw new CloudRuntimeException(copyCmdAnswer.getDetails()); + } + else { + throw new CloudRuntimeException("Unable to create volume from snapshot"); + } + } + } - details.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME)); - details.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET)); - details.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME)); - details.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET)); + /** + * If the underlying storage system needed to create a volume from a snapshot for createVolumeFromSnapshot(HostVO, SnapshotInfo), then + * this is its opportunity to delete that temporary volume and restore properties in snapshot_details to the way they were before the + * invocation of createVolumeFromSnapshot(HostVO, SnapshotInfo). + */ + private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "delete"); - return details; + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } + } + + private SnapshotDetailsVO handleSnapshotDetails(long csSnapshotId, String name, String value) { + _snapshotDetailsDao.removeDetail(csSnapshotId, name); + + SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false); + + return _snapshotDetailsDao.persist(snapshotDetails); + } + + private boolean canStorageSystemCreateVolumeFromVolume(SnapshotInfo snapshotInfo) { + boolean supportsCloningVolumeFromVolume = false; + + DataStore dataStore = dataStoreMgr.getDataStore(snapshotInfo.getDataStore().getId(), DataStoreRole.Primary); + + Map mapCapabilities = dataStore.getDriver().getCapabilities(); + + if (mapCapabilities != null) { + String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()); + + supportsCloningVolumeFromVolume = Boolean.valueOf(value); + } + + return supportsCloningVolumeFromVolume; } private String getProperty(long snapshotId, String property) { @@ -361,59 +632,209 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } private Map getVolumeDetails(VolumeInfo volumeInfo) { - Map sourceDetails = new HashMap(); + Map volumeDetails = new HashMap(); VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); long storagePoolId = volumeVO.getPoolId(); StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); - sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); - sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); + volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); ChapInfo chapInfo = _volumeService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); if (chapInfo != null) { - sourceDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); - sourceDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret()); - sourceDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername()); - sourceDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); + volumeDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); + volumeDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret()); + volumeDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername()); + volumeDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); } - return sourceDetails; + return volumeDetails; } - public HostVO getHost(long dataStoreId) { - StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStoreId); + private Map getSnapshotDetails(SnapshotInfo snapshotInfo) { + Map snapshotDetails = new HashMap(); - List clusters = _mgr.searchForClusters(storagePoolVO.getDataCenterId(), new Long(0), Long.MAX_VALUE, HypervisorType.XenServer.toString()); + long storagePoolId = snapshotInfo.getDataStore().getId(); + StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - if (clusters == null) { - throw new CloudRuntimeException("Unable to locate an applicable cluster"); - } + snapshotDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); + snapshotDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); - for (Cluster cluster : clusters) { - if (cluster.getAllocationState() == AllocationState.Enabled) { - List hosts = _hostDao.findByClusterId(cluster.getId()); + long snapshotId = snapshotInfo.getId(); - if (hosts != null) { - for (HostVO host : hosts) { - if (host.getResourceState() == ResourceState.Enabled) { - return host; - } - } - } + snapshotDetails.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN)); + + snapshotDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME)); + snapshotDetails.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET)); + snapshotDetails.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME)); + snapshotDetails.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET)); + + return snapshotDetails; + } + + private HostVO getHost(SnapshotInfo snapshotInfo) { + HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), true); + + if (hostVO == null) { + hostVO = getHost(snapshotInfo.getDataCenterId(), false); + + if (hostVO == null) { + throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId()); } } - throw new CloudRuntimeException("Unable to locate an applicable cluster"); + return hostVO; + } + + private HostVO getHost(Long zoneId, boolean computeClusterMustSupportResign) { + Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); + + List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, HypervisorType.XenServer); + + if (hosts == null) { + return null; + } + + List clustersToSkip = new ArrayList<>(); + + Collections.shuffle(hosts, RANDOM); + + for (HostVO host : hosts) { + if (computeClusterMustSupportResign) { + long clusterId = host.getClusterId(); + + if (clustersToSkip.contains(clusterId)) { + continue; + } + + if (clusterDao.getSupportsResigning(clusterId)) { + return host; + } + else { + clustersToSkip.add(clusterId); + } + } + else { + return host; + } + } + + return null; } @Override public void copyAsync(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback callback) { CopyCommandResult result = new CopyCommandResult(null, null); + result.setResult("Unsupported operation requested for copying data."); + callback.complete(result); } + + private Map getDetails(DataObject dataObj) { + if (dataObj instanceof VolumeInfo) { + return getVolumeDetails((VolumeInfo)dataObj); + } + else if (dataObj instanceof SnapshotInfo) { + return getSnapshotDetails((SnapshotInfo)dataObj); + } + + throw new CloudRuntimeException("'dataObj' must be of type 'VolumeInfo' or 'SnapshotInfo'."); + } + + private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO) { + return performResignature(dataObj, hostVO, false); + } + + private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, boolean keepGrantedAccess) { + long storagePoolId = dataObj.getDataStore().getId(); + DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + + Map details = getDetails(dataObj); + + ResignatureCommand command = new ResignatureCommand(details); + + ResignatureAnswer answer = null; + + try { + _volumeService.grantAccess(dataObj, hostVO, dataStore); + + answer = (ResignatureAnswer)_agentMgr.send(hostVO.getId(), command); + } + catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { + keepGrantedAccess = false; + + String msg = "Failed to resign the DataObject with the following ID: " + dataObj.getId(); + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage()); + } + finally { + if (keepGrantedAccess == false) { + _volumeService.revokeAccess(dataObj, hostVO, dataStore); + } + } + + if (answer == null || !answer.getResult()) { + final String errMsg; + + if (answer != null && answer.getDetails() != null && !answer.getDetails().isEmpty()) { + errMsg = answer.getDetails(); + } + else { + errMsg = "Unable to perform resignature operation in 'StorageSystemDataMotionStrategy.performResignature'"; + } + + throw new CloudRuntimeException(errMsg); + } + + VolumeObjectTO newVolume = new VolumeObjectTO(); + + newVolume.setSize(answer.getSize()); + newVolume.setPath(answer.getPath()); + newVolume.setFormat(answer.getFormat()); + + return new CopyCmdAnswer(newVolume); + } + + private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo, HostVO hostVO) { + String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + + CopyCmdAnswer copyCmdAnswer = null; + + try { + _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); + _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); + + Map srcDetails = getSnapshotDetails(snapshotInfo); + + copyCommand.setOptions(srcDetails); + + Map destDetails = getVolumeDetails(volumeInfo); + + copyCommand.setOptions2(destDetails); + + copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand); + } + catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { + String msg = "Failed to perform VDI copy : "; + + LOGGER.warn(msg, ex); + + throw new CloudRuntimeException(msg + ex.getMessage()); + } + finally { + _volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); + _volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); + } + + return copyCmdAnswer; + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index df2d37f05b0..0d96660d58c 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -918,6 +918,25 @@ public class TemplateServiceImpl implements TemplateService { return copyAsync(srcTemplate, srcTemplate, (DataStore)pool); } + @Override + public AsyncCallFuture deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool) { + TemplateObject templateObject = (TemplateObject)_templateFactory.getTemplate(template.getId(), (DataStore)pool); + + templateObject.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested); + + DataStore dataStore = _storeMgr.getPrimaryDataStore(pool.getId()); + + AsyncCallFuture future = new AsyncCallFuture<>(); + TemplateOpContext context = new TemplateOpContext<>(null, templateObject, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + + caller.setCallback(caller.getTarget().deleteTemplateCallback(null, null)).setContext(context); + + dataStore.getDriver().deleteAsync(dataStore, templateObject, caller); + + return future; + } + protected Void copyTemplateCallBack(AsyncCallbackDispatcher callback, TemplateOpContext context) { TemplateInfo destTemplate = context.getTemplate(); CopyCommandResult result = callback.getResult(); diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java index 473959b0814..6e78f190d5d 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -28,7 +28,6 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; @@ -54,6 +53,9 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; +import com.google.common.base.Strings; + +@SuppressWarnings("serial") public class TemplateObject implements TemplateInfo { private static final Logger s_logger = Logger.getLogger(TemplateObject.class); private VMTemplateVO imageVO; @@ -189,12 +191,15 @@ public class TemplateObject implements TemplateInfo { TemplateObjectTO newTemplate = (TemplateObjectTO)cpyAnswer.getNewData(); VMTemplateStoragePoolVO templatePoolRef = templatePoolDao.findByPoolTemplate(getDataStore().getId(), getId()); templatePoolRef.setDownloadPercent(100); - if (newTemplate.getSize() != null) { - templatePoolRef.setTemplateSize(newTemplate.getSize()); - } + + setTemplateSizeIfNeeded(newTemplate, templatePoolRef); + templatePoolRef.setDownloadState(Status.DOWNLOADED); - templatePoolRef.setLocalDownloadPath(newTemplate.getPath()); - templatePoolRef.setInstallPath(newTemplate.getPath()); + + setDownloadPathIfNeeded(newTemplate, templatePoolRef); + + setInstallPathIfNeeded(newTemplate, templatePoolRef); + templatePoolDao.update(templatePoolRef.getId(), templatePoolRef); } } else if (getDataStore().getRole() == DataStoreRole.Image || getDataStore().getRole() == DataStoreRole.ImageCache) { @@ -243,6 +248,33 @@ public class TemplateObject implements TemplateInfo { } } + /** + * In the case of managed storage, the install path may already be specified (by the storage plug-in), so do not overwrite it. + */ + private void setInstallPathIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) { + if (Strings.isNullOrEmpty(templatePoolRef.getInstallPath())) { + templatePoolRef.setInstallPath(template.getPath()); + } + } + + /** + * In the case of managed storage, the local download path may already be specified (by the storage plug-in), so do not overwrite it. + */ + private void setDownloadPathIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) { + if (Strings.isNullOrEmpty(templatePoolRef.getLocalDownloadPath())) { + templatePoolRef.setLocalDownloadPath(template.getPath()); + } + } + + /** + * In the case of managed storage, the template size may already be specified (by the storage plug-in), so do not overwrite it. + */ + private void setTemplateSizeIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) { + if (templatePoolRef.getTemplateSize() == 0 && template.getSize() != null) { + templatePoolRef.setTemplateSize(template.getSize()); + } + } + @Override public void incRefCount() { if (dataStore == null) { @@ -299,28 +331,17 @@ public class TemplateObject implements TemplateInfo { @Override public String getInstallPath() { - if (installPath != null) + if (installPath != null) { return installPath; + } if (dataStore == null) { return null; } - // managed primary data stores should not have an install path - if (dataStore instanceof PrimaryDataStore) { - PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; - - Map details = primaryDataStore.getDetails(); - - boolean managed = details != null && Boolean.parseBoolean(details.get(PrimaryDataStore.MANAGED)); - - if (managed) { - return null; - } - } - DataObjectInStore obj = objectInStoreMgr.findObject(this, dataStore); - return obj.getInstallPath(); + + return obj != null ? obj.getInstallPath() : null; } public void setInstallPath(String installPath) { @@ -435,7 +456,7 @@ public class TemplateObject implements TemplateInfo { } @Override - public Map getDetails() { + public Map getDetails() { return imageVO.getDetails(); } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java index 1f1ba24b6bc..a86b3219ee5 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; @@ -44,8 +45,8 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver { boolean snapshotResult = true; @Override - public ChapInfo getChapInfo(VolumeInfo volumeInfo) { - return null; // To change body of implemented methods, use File | Settings | File Templates. + public ChapInfo getChapInfo(DataObject dataObject) { + return null; } @Override @@ -65,8 +66,13 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { - return volume.getSize(); + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + return dataObject.getSize(); + } + + @Override + public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) { + return 0L; } @Override @@ -90,23 +96,21 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { - //To change body of implemented methods use File | Settings | File Templates. + public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { } @Override public DataTO getTO(DataObject data) { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public DataStoreTO getStoreTO(DataStore store) { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public void createAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { - //To change body of implemented methods use File | Settings | File Templates. } @Override @@ -119,22 +123,19 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { - //To change body of implemented methods use File | Settings | File Templates. } @Override public boolean canCopy(DataObject srcData, DataObject destData) { - return false; //To change body of implemented methods use File | Settings | File Templates. + return false; } @Override public void resize(DataObject data, AsyncCompletionCallback callback) { - //To change body of implemented methods use File | Settings | File Templates. } @Override public Map getCapabilities() { - // TODO Auto-generated method stub return null; } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index c6960eb4a1e..02691ff318e 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -16,14 +16,16 @@ // under the License. package org.apache.cloudstack.storage.snapshot; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; @@ -38,11 +40,15 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.springframework.stereotype.Component; + +import com.google.common.base.Optional; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.to.DiskTO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -71,18 +77,18 @@ import com.cloud.vm.dao.VMInstanceDao; public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { private static final Logger s_logger = Logger.getLogger(StorageSystemSnapshotStrategy.class); - @Inject private AgentManager _agentMgr; - @Inject private DataStoreManager _dataStoreMgr; - @Inject private HostDao _hostDao; - @Inject private ManagementService _mgr; - @Inject private PrimaryDataStoreDao _storagePoolDao; - @Inject private SnapshotDao _snapshotDao; - @Inject private SnapshotDataFactory _snapshotDataFactory; - @Inject private SnapshotDataStoreDao _snapshotStoreDao; - @Inject private SnapshotDetailsDao _snapshotDetailsDao; - @Inject private VMInstanceDao _vmInstanceDao; - @Inject private VolumeDao _volumeDao; - @Inject private VolumeService _volService; + @Inject private AgentManager agentMgr; + @Inject private ClusterDao clusterDao; + @Inject private DataStoreManager dataStoreMgr; + @Inject private HostDao hostDao; + @Inject private ManagementService mgr; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private SnapshotDao snapshotDao; + @Inject private SnapshotDataFactory snapshotDataFactory; + @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Inject private VMInstanceDao vmInstanceDao; + @Inject private VolumeDao volumeDao; + @Inject private VolumeService volService; @Override public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { @@ -91,14 +97,14 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { @Override public boolean deleteSnapshot(Long snapshotId) { - SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId); + SnapshotVO snapshotVO = snapshotDao.findById(snapshotId); if (Snapshot.State.Destroyed.equals(snapshotVO.getState())) { return true; } if (Snapshot.State.Error.equals(snapshotVO.getState())) { - _snapshotDao.remove(snapshotId); + snapshotDao.remove(snapshotId); return true; } @@ -107,12 +113,12 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the following state: " + snapshotVO.getState()); } - SnapshotObject snapshotObj = (SnapshotObject)_snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary); + SnapshotObject snapshotObj = (SnapshotObject)snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary); if (snapshotObj == null) { s_logger.debug("Can't find snapshot; deleting it in DB"); - _snapshotDao.remove(snapshotId); + snapshotDao.remove(snapshotId); return true; } @@ -165,7 +171,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { throw new CloudRuntimeException("Only the " + ImageFormat.VHD.toString() + " image type is currently supported."); } - SnapshotVO snapshotVO = _snapshotDao.acquireInLockTable(snapshotInfo.getId()); + SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId()); if (snapshotVO == null) { throw new CloudRuntimeException("Failed to acquire lock on the following snapshot: " + snapshotInfo.getId()); @@ -176,7 +182,24 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { try { volumeInfo.stateTransit(Volume.Event.SnapshotRequested); - // tell the storage driver to create a back-end volume (eventually used to create a new SR on and to copy the VM snapshot VDI to) + // only XenServer is currently supported + HostVO hostVO = getHost(volumeInfo.getId()); + + boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId()); + boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId()); + + // if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign, then take a back-end snapshot or create a back-end clone; + // else, just create a new back-end volume (eventually used to create a new SR on and to copy a VDI to) + + if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign) { + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(), + "takeSnapshot", + Boolean.TRUE.toString(), + false); + + snapshotDetailsDao.persist(snapshotDetail); + } + result = snapshotSvr.takeSnapshot(snapshotInfo); if (result.isFailed()) { @@ -185,9 +208,9 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { throw new CloudRuntimeException(result.getResult()); } - // send a command to XenServer to create a VM snapshot on the applicable SR (get back the VDI UUID of the VM snapshot) - - performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo); + if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) { + performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo); + } markAsBackedUp((SnapshotObject)result.getSnashot()); } @@ -199,19 +222,35 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { volumeInfo.stateTransit(Volume.Event.OperationFailed); } - _snapshotDao.releaseFromLockTable(snapshotInfo.getId()); + snapshotDao.releaseFromLockTable(snapshotInfo.getId()); } return snapshotInfo; } + private boolean canStorageSystemCreateVolumeFromSnapshot(long storagePoolId) { + boolean supportsCloningVolumeFromSnapshot = false; + + DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + + Map mapCapabilities = dataStore.getDriver().getCapabilities(); + + if (mapCapabilities != null) { + String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString()); + + supportsCloningVolumeFromSnapshot = Boolean.valueOf(value); + } + + return supportsCloningVolumeFromSnapshot; + } + private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo) { Map sourceDetails = null; - VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); Long vmInstanceId = volumeVO.getInstanceId(); - VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vmInstanceId); + VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstanceId); Long hostId = null; @@ -233,11 +272,30 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { sourceDetails = getSourceDetails(volumeInfo); } - HostVO hostVO = getHost(hostId, volumeVO); + HostVO hostVO = null; + + if (hostId != null) { + hostVO = hostDao.findById(hostId); + } + else { + Optional optHostVO = getHost(volumeInfo.getDataCenterId(), false); + + if (optHostVO.isPresent()) { + hostVO = optHostVO.get(); + } + } + + if (hostVO == null) { + final String errMsg = "Unable to locate an applicable host"; + + s_logger.error("performSnapshotAndCopyOnHostSide: " + errMsg); + + throw new CloudRuntimeException(errMsg); + } long storagePoolId = volumeVO.getPoolId(); - StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - DataStore dataStore = _dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId); + DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); Map destDetails = getDestDetails(storagePoolVO, snapshotInfo); @@ -248,23 +306,23 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { try { // if sourceDetails != null, we need to connect the host(s) to the volume if (sourceDetails != null) { - _volService.grantAccess(volumeInfo, hostVO, dataStore); + volService.grantAccess(volumeInfo, hostVO, dataStore); } - _volService.grantAccess(snapshotInfo, hostVO, dataStore); + volService.grantAccess(snapshotInfo, hostVO, dataStore); - snapshotAndCopyAnswer = (SnapshotAndCopyAnswer)_agentMgr.send(hostVO.getId(), snapshotAndCopyCommand); + snapshotAndCopyAnswer = (SnapshotAndCopyAnswer)agentMgr.send(hostVO.getId(), snapshotAndCopyCommand); } catch (Exception ex) { throw new CloudRuntimeException(ex.getMessage()); } finally { try { - _volService.revokeAccess(snapshotInfo, hostVO, dataStore); + volService.revokeAccess(snapshotInfo, hostVO, dataStore); // if sourceDetails != null, we need to disconnect the host(s) from the volume if (sourceDetails != null) { - _volService.revokeAccess(volumeInfo, hostVO, dataStore); + volService.revokeAccess(volumeInfo, hostVO, dataStore); } } catch (Exception ex) { @@ -292,22 +350,22 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { path, false); - _snapshotDetailsDao.persist(snapshotDetail); + snapshotDetailsDao.persist(snapshotDetail); } private Map getSourceDetails(VolumeInfo volumeInfo) { - Map sourceDetails = new HashMap(); + Map sourceDetails = new HashMap<>(); - VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); long storagePoolId = volumeVO.getPoolId(); - StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId); sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); - ChapInfo chapInfo = _volService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); + ChapInfo chapInfo = volService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); if (chapInfo != null) { sourceDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); @@ -320,7 +378,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { } private Map getDestDetails(StoragePoolVO storagePoolVO, SnapshotInfo snapshotInfo) { - Map destDetails = new HashMap(); + Map destDetails = new HashMap<>(); destDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); destDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); @@ -338,7 +396,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { } private String getProperty(long snapshotId, String property) { - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, property); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, property); if (snapshotDetails != null) { return snapshotDetails.getValue(); @@ -347,38 +405,87 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { return null; } - private HostVO getHost(Long hostId, VolumeVO volumeVO) { - HostVO hostVO = _hostDao.findById(hostId); + private HostVO getHost(long volumeId) { + VolumeVO volumeVO = volumeDao.findById(volumeId); + + Long vmInstanceId = volumeVO.getInstanceId(); + VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstanceId); + + Long hostId = null; + + // if the volume to snapshot is associated with a VM + if (vmInstanceVO != null) { + hostId = vmInstanceVO.getHostId(); + + // if the VM is not associated with a host + if (hostId == null) { + hostId = vmInstanceVO.getLastHostId(); + } + } + + return getHost(volumeVO.getDataCenterId(), hostId); + } + + private HostVO getHost(long zoneId, Long hostId) { + Optional optHostVO = getHost(zoneId, true); + + if (optHostVO.isPresent()) { + return optHostVO.get(); + } + + HostVO hostVO = hostDao.findById(hostId); if (hostVO != null) { return hostVO; } - // pick a host in any XenServer cluster that's in the applicable zone + optHostVO = getHost(zoneId, false); - long zoneId = volumeVO.getDataCenterId(); - - List clusters = _mgr.searchForClusters(zoneId, new Long(0), Long.MAX_VALUE, HypervisorType.XenServer.toString()); - - if (clusters == null) { - throw new CloudRuntimeException("Unable to locate an applicable cluster"); + if (optHostVO.isPresent()) { + return optHostVO.get(); } + throw new CloudRuntimeException("Unable to locate an applicable host"); + } + + private Optional getHost(long zoneId, boolean computeClusterMustSupportResign) { + List clusters = mgr.searchForClusters(zoneId, 0L, Long.MAX_VALUE, HypervisorType.XenServer.toString()); + + if (clusters == null) { + clusters = new ArrayList<>(); + } + + Collections.shuffle(clusters, new Random(System.nanoTime())); + + clusters: for (Cluster cluster : clusters) { if (cluster.getAllocationState() == AllocationState.Enabled) { - List hosts = _hostDao.findByClusterId(cluster.getId()); + List hosts = hostDao.findByClusterId(cluster.getId()); if (hosts != null) { + Collections.shuffle(hosts, new Random(System.nanoTime())); + for (HostVO host : hosts) { if (host.getResourceState() == ResourceState.Enabled) { - return host; + if (computeClusterMustSupportResign) { + if (clusterDao.getSupportsResigning(cluster.getId())) { + return Optional.of(host); + } + else { + // no other host in the cluster in question should be able to satisfy our requirements here, so move on to the next cluster + continue clusters; + } + } + else { + return Optional.of(host); + } } } } } } - throw new CloudRuntimeException("Unable to locate an applicable cluster"); + return Optional.absent(); } private void markAsBackedUp(SnapshotObject snapshotObj) { @@ -406,18 +513,18 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { long volumeId = snapshot.getVolumeId(); - VolumeVO volumeVO = _volumeDao.findByIdIncludingRemoved(volumeId); + VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); long storagePoolId = volumeVO.getPoolId(); - DataStore dataStore = _dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); if (dataStore != null) { Map mapCapabilities = dataStore.getDriver().getCapabilities(); if (mapCapabilities != null) { String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); - Boolean supportsStorageSystemSnapshots = new Boolean(value); + Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value); if (supportsStorageSystemSnapshots) { return StrategyPriority.HIGHEST; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java index 99fc1612faa..25444843fe4 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java @@ -78,6 +78,8 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { VolumeDao volumeDao; @Inject SnapshotDataFactory snapshotDataFactory; + @Inject + private SnapshotDao _snapshotDao; @Override public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { @@ -289,7 +291,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { @Override public boolean revertSnapshot(SnapshotInfo snapshot) { if (canHandle(snapshot,SnapshotOperation.REVERT) == StrategyPriority.CANT_HANDLE) { - throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead."); + throw new CloudRuntimeException("Reverting not supported. Create a template or volume based on the snapshot instead."); } SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 73a8544f51a..194f7bd857c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -223,7 +223,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement Volume volume = _volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList(); requestVolumes.add(volume); - return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); + return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId()); } /* diff --git a/engine/storage/volume/pom.xml b/engine/storage/volume/pom.xml index e790df12dd9..340010ba34b 100644 --- a/engine/storage/volume/pom.xml +++ b/engine/storage/volume/pom.xml @@ -25,6 +25,11 @@ cloud-engine-storage ${project.version} + + org.apache.cloudstack + cloud-engine-storage-image + ${project.version} + diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index f3c9e790277..81966784be0 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -65,6 +66,7 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.storage.encoding.EncodingType; +@SuppressWarnings("serial") public class PrimaryDataStoreImpl implements PrimaryDataStore { private static final Logger s_logger = Logger.getLogger(PrimaryDataStoreImpl.class); @@ -239,10 +241,34 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { return pdsv.isManaged(); } + private boolean canCloneVolume() { + return Boolean.valueOf(getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString())); + } + + /** + * The parameter createEntryInTempSpoolRef in the overloaded create(DataObject, boolean) method only applies to managed storage. We pass + * in "true" here. + * + * In the case of managed storage that can create a volume from a volume (clone), if the DataObject passed in is a TemplateInfo, + * we do want to create an entry in the cloud.template_spool_ref table (so that multiple uses of the template can be leveraged from + * the one copy on managed storage). + * + * In cases where UUID resigning is not available, then the code calling "create" should invoke the overloaded "create" method whose second + * parameter is a boolean. This code can pass in "false" so that an entry in the cloud.template_spool_ref table is not created (no template to share + * on the primary storage). + */ @Override - public DataObject create(DataObject obj) { + public DataObject create(DataObject dataObject) { + return create(dataObject, true); + } + + /** + * Please read the comment for the create(DataObject) method if you are planning on passing in "false" for createEntryInTempSpoolRef. + */ + @Override + public DataObject create(DataObject obj, boolean createEntryInTempSpoolRef) { // create template on primary storage - if (obj.getType() == DataObjectType.TEMPLATE && !isManaged()) { + if (obj.getType() == DataObjectType.TEMPLATE && (!isManaged() || (createEntryInTempSpoolRef && canCloneVolume()))) { try { String templateIdPoolIdString = "templateId:" + obj.getId() + "poolId:" + getId(); VMTemplateStoragePoolVO templateStoragePoolRef; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index a2fd656d4b2..05ec7d02706 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -19,14 +19,21 @@ package org.apache.cloudstack.storage.volume; import java.util.ArrayList; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; import com.cloud.offering.DiskOffering; +import com.cloud.org.Cluster; +import com.cloud.org.Grouping.AllocationState; +import com.cloud.resource.ResourceState; +import com.cloud.server.ManagementService; import com.cloud.storage.RegisterVolumePayload; import com.cloud.utils.Pair; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; @@ -36,6 +43,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -59,8 +67,10 @@ import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.log4j.Logger; @@ -80,7 +90,9 @@ import com.cloud.event.UsageEventUtils; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ResourceAllocationException; import com.cloud.host.Host; +import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -134,6 +146,14 @@ public class VolumeServiceImpl implements VolumeService { EndPointSelector _epSelector; @Inject HostDao _hostDao; + @Inject + private PrimaryDataStoreDao storagePoolDao; + @Inject + private HostDetailsDao hostDetailsDao; + @Inject + private ManagementService mgr; + @Inject + private ClusterDao clusterDao; public VolumeServiceImpl() { } @@ -160,11 +180,11 @@ public class VolumeServiceImpl implements VolumeService { } @Override - public ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore) { + public ChapInfo getChapInfo(DataObject dataObject, DataStore dataStore) { DataStoreDriver dataStoreDriver = dataStore.getDriver(); if (dataStoreDriver instanceof PrimaryDataStoreDriver) { - return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(volumeInfo); + return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(dataObject); } return null; @@ -554,6 +574,49 @@ public class VolumeServiceImpl implements VolumeService { return null; } + protected Void createManagedTemplateImageCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + CreateCmdResult result = callback.getResult(); + VolumeApiResult res = new VolumeApiResult(null); + + res.setResult(result.getResult()); + + AsyncCallFuture future = context.getFuture(); + DataObject templateOnPrimaryStoreObj = context.getVolume(); + + if (result.isSuccess()) { + ((TemplateObject)templateOnPrimaryStoreObj).setInstallPath(result.getPath()); + templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer()); + } + else { + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); + } + + future.complete(res); + + return null; + } + + protected Void copyManagedTemplateCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { + CopyCommandResult result = callback.getResult(); + VolumeApiResult res = new VolumeApiResult(context.getVolume()); + + res.setResult(result.getResult()); + + AsyncCallFuture future = context.getFuture(); + DataObject templateOnPrimaryStoreObj = context.destObj; + + if (result.isSuccess()) { + templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer()); + } + else { + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); + } + + future.complete(res); + + return null; + } + @DB protected Void copyBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { CopyCommandResult result = callback.getResult(); @@ -636,8 +699,10 @@ public class VolumeServiceImpl implements VolumeService { if (templatePoolRef == null) { s_logger.warn("Reset Template State On Pool failed - unable to lock TemplatePoolRef " + templatePoolRefId); } else { + templatePoolRef.setTemplateSize(0); templatePoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED); templatePoolRef.setState(ObjectInDataStoreStateMachine.State.Allocated); + _tmpltPoolDao.update(templatePoolRefId, templatePoolRef); } }finally { @@ -653,50 +718,132 @@ public class VolumeServiceImpl implements VolumeService { return null; } - @Override - public AsyncCallFuture createManagedStorageAndVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, - TemplateInfo srcTemplateInfo, long destHostId) { - PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); - TemplateInfo destTemplateInfo = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo); - Host destHost = _hostDao.findById(destHostId); + /** + * Creates a template volume on managed storage, which will be used for creating ROOT volumes by cloning. + * + * @param srcTemplateInfo Source template on secondary storage + * @param destPrimaryDataStore Managed storage on which we need to create the volume + */ + private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) { + // create a template volume on primary storage + AsyncCallFuture createTemplateFuture = new AsyncCallFuture<>(); + TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo); - if (destHost == null) { - throw new CloudRuntimeException("Destinatin host should not be null."); + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId()); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); } - AsyncCallFuture future = new AsyncCallFuture(); + // At this point, we have an entry in the DB that points to our cached template. + // We need to lock it as there may be other VMs that may get started using the same template. + // We want to avoid having to create multiple cache copies of the same template. + + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + long templatePoolRefId = templatePoolRef.getId(); + + templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); + } + + // Template already exists + if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { + _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); + + return templateOnPrimary; + } try { - // must call driver to have a volume created - AsyncCallFuture createVolumeFuture = createVolumeAsync(volumeInfo, destPrimaryDataStore); + // create a cache volume on the back-end - VolumeApiResult createVolumeResult = createVolumeFuture.get(); + templateOnPrimary.processEvent(Event.CreateOnlyRequested); - if (createVolumeResult.isFailed()) { - throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult()); + CreateVolumeContext createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture); + AsyncCallbackDispatcher createCaller = AsyncCallbackDispatcher.create(this); + + createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext); + + destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller); + + VolumeApiResult result = createTemplateFuture.get(); + + if (result.isFailed()) { + String errMesg = result.getResult(); + + throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg); } + } catch (Throwable e) { + s_logger.debug("Failed to create template volume on storage", e); - // refresh the volume from the DB - volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore); + templateOnPrimary.processEvent(Event.OperationFailed); - grantAccess(volumeInfo, destHost, destPrimaryDataStore); + throw new CloudRuntimeException(e.getMessage()); + } + finally { + _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); + } - ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext(null, volumeInfo, - destPrimaryDataStore, srcTemplateInfo, future); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context); + return templateOnPrimary; + } + /** + * This function copies a template from secondary storage to a template volume + * created on managed storage. This template volume will be used as a cache. + * Instead of copying the template to a ROOT volume every time, a clone is performed instead. + * + * @param srcTemplateInfo Source from which to copy the template + * @param templateOnPrimary Dest to copy to + * @param templatePoolRef Template reference on primary storage (entry in the template_spool_ref) + * @param destPrimaryDataStore The managed primary storage + * @param destHost The host that we will use for the copy + */ + private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef, + PrimaryDataStore destPrimaryDataStore, Host destHost) + { + AsyncCallFuture copyTemplateFuture = new AsyncCallFuture<>(); + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + long templatePoolRefId = templatePoolRef.getId(); + + templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); + } + + if (templatePoolRef.getDownloadState() == Status.DOWNLOADED) { + // There can be cases where we acquired the lock, but the template + // was already copied by a previous thread. Just return in that case. + + s_logger.debug("Template already downloaded, nothing to do"); + + return; + } + + try { + // copy the template from sec storage to the created volume + CreateBaseImageContext copyContext = new CreateBaseImageContext<>( + null, null, destPrimaryDataStore, srcTemplateInfo, + copyTemplateFuture, templateOnPrimary, templatePoolRefId + ); + + AsyncCallbackDispatcher copyCaller = AsyncCallbackDispatcher.create(this); + copyCaller.setCallback(copyCaller.getTarget().copyManagedTemplateCallback(null, null)).setContext(copyContext); + + // Populate details which will be later read by the storage subsystem. Map details = new HashMap(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); - // for managed storage, the storage repository (XenServer) or datastore (ESX) name is based off of the iScsiName property of a volume - details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); - details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); - details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, ((TemplateObject)templateOnPrimary).getInstallPath()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); + details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); - ChapInfo chapInfo = getChapInfo(volumeInfo, destPrimaryDataStore); + ChapInfo chapInfo = getChapInfo(templateOnPrimary, destPrimaryDataStore); if (chapInfo != null) { details.put(PrimaryDataStore.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); @@ -705,17 +852,142 @@ public class VolumeServiceImpl implements VolumeService { details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); } + templateOnPrimary.processEvent(Event.CopyingRequested); + destPrimaryDataStore.setDetails(details); - motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller); + grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + + VolumeApiResult result = null; + + try { + motionSrv.copyAsync(srcTemplateInfo, templateOnPrimary, destHost, copyCaller); + + result = copyTemplateFuture.get(); + } + finally { + revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } + + if (result.isFailed()) { + throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() + + " to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult()); + // XXX: I find it is useful to destroy the volume on primary storage instead of another thread trying the copy again because I've seen + // something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail). + // For now, I just retry the copy. + } } - catch (Throwable t) { + catch (Throwable e) { + s_logger.debug("Failed to create a template on primary storage", e); + + templateOnPrimary.processEvent(Event.OperationFailed); + + throw new CloudRuntimeException(e.getMessage()); + } + finally { + _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); + } + } + + /** + * Clones the template volume on managed storage to the ROOT volume + * + * @param volumeInfo ROOT volume to create + * @param templateOnPrimary Template from which to clone the ROOT volume + * @param destPrimaryDataStore Primary storage of the volume + * @param future For async + */ + private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, TemplateInfo templateOnPrimary, PrimaryDataStore destPrimaryDataStore, + AsyncCallFuture future) { + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId()); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + templateOnPrimary.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + } + + //XXX: not sure if this the right thing to do here. We can always fallback to the "copy from sec storage" + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + throw new CloudRuntimeException("Template " + templateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + } + + try { + volumeInfo.processEvent(Event.CreateOnlyRequested); + + CreateVolumeFromBaseImageContext context = + new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, templateOnPrimary, future, null); + + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + + caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null)); + caller.setContext(context); + + motionSrv.copyAsync(templateOnPrimary, volumeInfo, caller); + } catch (Throwable e) { + s_logger.debug("Failed to clone template on primary storage", e); + + volumeInfo.processEvent(Event.OperationFailed); + + throw new CloudRuntimeException(e.getMessage()); + } + } + + private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost, + AsyncCallFuture future) { + try { + // Create a volume on managed storage. + + TemplateInfo destTemplateInfo = (TemplateInfo)primaryDataStore.create(srcTemplateInfo, false); + + AsyncCallFuture createVolumeFuture = createVolumeAsync(volumeInfo, primaryDataStore); + VolumeApiResult createVolumeResult = createVolumeFuture.get(); + + if (createVolumeResult.isFailed()) { + throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult()); + } + + // Refresh the volume info from the DB. + volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore); + + ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext(null, volumeInfo, + primaryDataStore, srcTemplateInfo, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + + caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context); + + Map details = new HashMap(); + + details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.STORAGE_HOST, primaryDataStore.getHostAddress()); + details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(primaryDataStore.getPort())); + // for managed storage, the storage repository (XenServer) or datastore (ESX) name is based off of the iScsiName property of a volume + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + + ChapInfo chapInfo = getChapInfo(volumeInfo, primaryDataStore); + + if (chapInfo != null) { + details.put(PrimaryDataStore.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); + details.put(PrimaryDataStore.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret()); + details.put(PrimaryDataStore.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername()); + details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); + } + + primaryDataStore.setDetails(details); + + grantAccess(volumeInfo, destHost, primaryDataStore); + + try { + motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller); + } + finally { + revokeAccess(volumeInfo, destHost, primaryDataStore); + } + } catch (Throwable t) { String errMsg = t.toString(); volumeInfo.processEvent(Event.DestroyRequested); - revokeAccess(volumeInfo, destHost, destPrimaryDataStore); - try { AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); @@ -735,10 +1007,112 @@ public class VolumeServiceImpl implements VolumeService { future.complete(result); } + } + + @Override + public AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, + TemplateInfo srcTemplateInfo, long destHostId) { + PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); + Host destHost = _hostDao.findById(destHostId); + + if (destHost == null) { + throw new CloudRuntimeException("Destination host should not be null."); + } + + Boolean storageCanCloneVolume = new Boolean( + destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()) + ); + + boolean computeZoneSupportsResign = computeZoneSupportsResign(destHost.getDataCenterId(), destHost.getHypervisorType()); + + AsyncCallFuture future = new AsyncCallFuture<>(); + + if (storageCanCloneVolume && computeZoneSupportsResign) { + s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and host cluster can perform UUID resigning."); + + TemplateInfo templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId()); + + if (templateOnPrimary == null) { + templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); + + if (templateOnPrimary == null) { + throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + } + } + + // Copy the template to the template volume. + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId()); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + + srcTemplateInfo.getUniqueName() + " in storage pool " + + destPrimaryDataStore.getId() + ); + } + + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); + } + + // We have a template on primary storage. Clone it to new volume. + s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); + } else { + s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); + createManagedVolumeCopyTemplateAsync(volumeInfo, destPrimaryDataStore, srcTemplateInfo, destHost, future); + } return future; } + private boolean computeZoneSupportsResign(long zoneId, HypervisorType hypervisorType) { + return getHost(zoneId, hypervisorType, true) != null; + } + + private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + if (zoneId == null) { + throw new CloudRuntimeException("Zone ID cannot be null."); + } + + List clusters = mgr.searchForClusters(zoneId, new Long(0), Long.MAX_VALUE, hypervisorType.toString()); + + if (clusters == null) { + clusters = new ArrayList<>(); + } + + Collections.shuffle(clusters, new Random(System.nanoTime())); + + clusters: + for (Cluster cluster : clusters) { + if (cluster.getAllocationState() == AllocationState.Enabled) { + List hosts = _hostDao.findByClusterId(cluster.getId()); + + if (hosts != null) { + Collections.shuffle(hosts, new Random(System.nanoTime())); + + for (HostVO host : hosts) { + if (host.getResourceState() == ResourceState.Enabled) { + if (computeClusterMustSupportResign) { + if (clusterDao.getSupportsResigning(cluster.getId())) { + return host; + } + else { + // no other host in the cluster in question should be able to satisfy our requirements here, so move on to the next cluster + continue clusters; + } + } + else { + return host; + } + } + } + } + } + } + + return null; + } + @DB @Override public AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template) { @@ -1332,7 +1706,8 @@ public class VolumeServiceImpl implements VolumeService { if (ep != null) { VolumeVO volume = volDao.findById(volumeId); PrimaryDataStore primaryDataStore = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore), volume.getSize(), newSize, true, instanceName); + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore), + volume.getSize(), newSize, true, instanceName, primaryDataStore.isManaged(), volume.get_iScsiName()); answer = ep.sendMessage(resizeCmd); } else { diff --git a/plugins/api/solidfire-intg-test/pom.xml b/plugins/api/solidfire-intg-test/pom.xml index a15d7b3e85f..fa5302b14b5 100644 --- a/plugins/api/solidfire-intg-test/pom.xml +++ b/plugins/api/solidfire-intg-test/pom.xml @@ -19,7 +19,7 @@ 4.0.0 cloud-plugin-api-solidfire-intg-test - Apache CloudStack Plugin - API SolidFire + Apache CloudStack Plugin - API SolidFire Integration Testing org.apache.cloudstack cloudstack-plugins diff --git a/plugins/api/solidfire-intg-test/resources/META-INF/cloudstack/solidfire-intg-test/spring-solidfire-intg-test-context.xml b/plugins/api/solidfire-intg-test/resources/META-INF/cloudstack/solidfire-intg-test/spring-solidfire-intg-test-context.xml index 1bab7349caa..2fe875af698 100644 --- a/plugins/api/solidfire-intg-test/resources/META-INF/cloudstack/solidfire-intg-test/spring-solidfire-intg-test-context.xml +++ b/plugins/api/solidfire-intg-test/resources/META-INF/cloudstack/solidfire-intg-test/spring-solidfire-intg-test-context.xml @@ -27,6 +27,8 @@ http://www.springframework.org/schema/context/spring-context-3.0.xsd" > - + + + diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java new file mode 100644 index 00000000000..5ff178a2e92 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.solidfire; + +import javax.inject.Inject; + +import org.apache.log4j.Logger; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.solidfire.ApiPathForVolumeResponse; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; + +@APICommand(name = "getPathForVolume", responseObject = ApiPathForVolumeResponse.class, description = "Get the path associated with the provided volume UUID", + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class GetPathForVolumeCmd extends BaseCmd { + private static final Logger LOGGER = Logger.getLogger(GetPathForVolumeCmd.class.getName()); + private static final String NAME = "getpathforvolumeresponse"; + + @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true) + private String _volumeUuid; + + @Inject private SolidFireIntegrationTestUtil _util; + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public long getEntityOwnerId() { + return _util.getAccountIdForVolumeUuid(_volumeUuid); + } + + @Override + public void execute() { + LOGGER.info("'GetPathForVolumeIdCmd.execute' method invoked"); + + String pathForVolume = _util.getPathForVolumeUuid(_volumeUuid); + + ApiPathForVolumeResponse response = new ApiPathForVolumeResponse(pathForVolume); + + response.setResponseName(getCommandName()); + response.setObjectName("apipathforvolume"); + + setResponseObject(response); + } +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireAccountIdCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java similarity index 60% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireAccountIdCmd.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java index f4c0076f867..9bb8481c3c4 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireAccountIdCmd.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java @@ -14,10 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.command.user.solidfire; - -import com.cloud.user.Account; -import com.cloud.user.dao.AccountDao; +package org.apache.cloudstack.api.command.admin.solidfire; import javax.inject.Inject; @@ -27,26 +24,23 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.solidfire.ApiSolidFireService; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.api.response.solidfire.ApiSolidFireAccountIdResponse; +import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; @APICommand(name = "getSolidFireAccountId", responseObject = ApiSolidFireAccountIdResponse.class, description = "Get SolidFire Account ID", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireAccountIdCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(GetSolidFireAccountIdCmd.class.getName()); - private static final String s_name = "getsolidfireaccountidresponse"; + private static final Logger LOGGER = Logger.getLogger(GetSolidFireAccountIdCmd.class.getName()); + private static final String NAME = "getsolidfireaccountidresponse"; @Parameter(name = ApiConstants.ACCOUNT_ID, type = CommandType.STRING, description = "CloudStack Account UUID", required = true) - private String accountUuid; + private String csAccountUuid; @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true) private String storagePoolUuid; - @Inject private ApiSolidFireService _apiSolidFireService; - @Inject private AccountDao _accountDao; - @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private SolidFireIntegrationTestManager manager; + @Inject private SolidFireIntegrationTestUtil util; ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -54,26 +48,21 @@ public class GetSolidFireAccountIdCmd extends BaseCmd { @Override public String getCommandName() { - return s_name; + return NAME; } @Override public long getEntityOwnerId() { - Account account = CallContext.current().getCallingAccount(); - - if (account != null) { - return account.getId(); - } - - return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + return util.getAccountIdForAccountUuid(csAccountUuid); } @Override public void execute() { - Account account = _accountDao.findByUuid(accountUuid); - StoragePoolVO storagePool = _storagePoolDao.findByUuid(storagePoolUuid); + LOGGER.info("'GetSolidFireAccountIdCmd.execute' method invoked"); - ApiSolidFireAccountIdResponse response = _apiSolidFireService.getSolidFireAccountId(account.getId(), storagePool.getId()); + long sfAccountId = manager.getSolidFireAccountId(csAccountUuid, storagePoolUuid); + + ApiSolidFireAccountIdResponse response = new ApiSolidFireAccountIdResponse(sfAccountId); response.setResponseName(getCommandName()); response.setObjectName("apisolidfireaccountid"); diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java similarity index 70% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java index c432fb1109e..5c15e01a30b 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdCmd.java @@ -14,12 +14,9 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.command.user.solidfire; +package org.apache.cloudstack.api.command.admin.solidfire; import com.cloud.user.Account; -import com.cloud.org.Cluster; -import com.cloud.storage.StoragePool; -import com.cloud.dc.dao.ClusterDao; import javax.inject.Inject; @@ -29,25 +26,24 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse; +import org.apache.cloudstack.api.response.solidfire.ApiSolidFireVolumeAccessGroupIdResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.solidfire.ApiSolidFireService; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; @APICommand(name = "getSolidFireVolumeAccessGroupId", responseObject = ApiSolidFireVolumeAccessGroupIdResponse.class, description = "Get the SF Volume Access Group ID", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeAccessGroupIdCmd.class.getName()); - private static final String s_name = "getsolidfirevolumeaccessgroupidresponse"; + private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeAccessGroupIdCmd.class.getName()); + private static final String NAME = "getsolidfirevolumeaccessgroupidresponse"; @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.STRING, description = "Cluster UUID", required = true) private String clusterUuid; @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true) private String storagePoolUuid; - @Inject private ApiSolidFireService _apiSolidFireService; - @Inject private ClusterDao _clusterDao; - @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private SolidFireIntegrationTestManager manager; + @Inject private SolidFireIntegrationTestUtil util; ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -55,7 +51,7 @@ public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd { @Override public String getCommandName() { - return s_name; + return NAME; } @Override @@ -71,10 +67,11 @@ public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd { @Override public void execute() { - Cluster cluster = _clusterDao.findByUuid(clusterUuid); - StoragePool storagePool = _storagePoolDao.findByUuid(storagePoolUuid); + LOGGER.info("'GetSolidFireVolumeAccessGroupIdCmd.execute' method invoked"); - ApiSolidFireVolumeAccessGroupIdResponse response = _apiSolidFireService.getSolidFireVolumeAccessGroupId(cluster.getId(), storagePool.getId()); + long sfVagId = manager.getSolidFireVolumeAccessGroupId(clusterUuid, storagePoolUuid); + + ApiSolidFireVolumeAccessGroupIdResponse response = new ApiSolidFireVolumeAccessGroupIdResponse(sfVagId); response.setResponseName(getCommandName()); response.setObjectName("apisolidfirevolumeaccessgroupid"); diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeSizeCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java similarity index 57% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeSizeCmd.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java index 3a27a668117..d7c8acfe378 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeSizeCmd.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java @@ -14,12 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.command.user.solidfire; - -import com.cloud.storage.Volume; -import com.cloud.user.Account; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.StoragePool; +package org.apache.cloudstack.api.command.admin.solidfire; import javax.inject.Inject; @@ -29,25 +24,21 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.solidfire.ApiSolidFireService; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.api.response.solidfire.ApiSolidFireVolumeSizeResponse; +import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; @APICommand(name = "getSolidFireVolumeSize", responseObject = ApiSolidFireVolumeSizeResponse.class, description = "Get the SF volume size including Hypervisor Snapshot Reserve", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireVolumeSizeCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeSizeCmd.class.getName()); - private static final String s_name = "getsolidfirevolumesizeresponse"; + private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeSizeCmd.class.getName()); + private static final String NAME = "getsolidfirevolumesizeresponse"; @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "Volume UUID", required = true) private String volumeUuid; - @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true) - private String storagePoolUuid; - @Inject private ApiSolidFireService _apiSolidFireService; - @Inject private VolumeDao _volumeDao; - @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private SolidFireIntegrationTestManager manager; + @Inject private SolidFireIntegrationTestUtil util; ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -55,26 +46,21 @@ public class GetSolidFireVolumeSizeCmd extends BaseCmd { @Override public String getCommandName() { - return s_name; + return NAME; } @Override public long getEntityOwnerId() { - Account account = CallContext.current().getCallingAccount(); - - if (account != null) { - return account.getId(); - } - - return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + return util.getAccountIdForVolumeUuid(volumeUuid); } @Override public void execute() { - Volume volume = _volumeDao.findByUuid(volumeUuid); - StoragePool storagePool = _storagePoolDao.findByUuid(storagePoolUuid); + LOGGER.info("'GetSolidFireVolumeSizeCmd.execute' method invoked"); - ApiSolidFireVolumeSizeResponse response = _apiSolidFireService.getSolidFireVolumeSize(volume, storagePool); + long sfVolumeSize = manager.getSolidFireVolumeSize(volumeUuid); + + ApiSolidFireVolumeSizeResponse response = new ApiSolidFireVolumeSizeResponse(sfVolumeSize); response.setResponseName(getCommandName()); response.setObjectName("apisolidfirevolumesize"); diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java new file mode 100644 index 00000000000..5b9ce373328 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.solidfire; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.log4j.Logger; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse; +import org.apache.cloudstack.api.response.solidfire.ApiVolumeiScsiNameResponse; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; + +@APICommand(name = "getVolumeSnapshotDetails", responseObject = ApiVolumeiScsiNameResponse.class, description = "Get Volume Snapshot Details", + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + +public class GetVolumeSnapshotDetailsCmd extends BaseCmd { + private static final Logger LOGGER = Logger.getLogger(GetVolumeSnapshotDetailsCmd.class.getName()); + private static final String NAME = "getvolumesnapshotdetailsresponse"; + + @Parameter(name = ApiConstants.SNAPSHOT_ID, type = CommandType.STRING, description = "CloudStack Snapshot UUID", required = true) + private String snapshotUuid; + + @Inject private SolidFireIntegrationTestUtil util; + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public long getEntityOwnerId() { + return util.getAccountIdForSnapshotUuid(snapshotUuid); + } + + @Override + public void execute() { + LOGGER.info("'" + GetVolumeSnapshotDetailsCmd.class.getSimpleName() + ".execute' method invoked"); + + List responses = util.getSnapshotDetails(snapshotUuid); + + ListResponse listReponse = new ListResponse<>(); + + listReponse.setResponses(responses); + listReponse.setResponseName(getCommandName()); + listReponse.setObjectName("apivolumesnapshotdetails"); + + this.setResponseObject(listReponse); + } +} \ No newline at end of file diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeIscsiNameCmd.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java similarity index 55% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeIscsiNameCmd.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java index 7afa3014034..dd6992caf36 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/user/solidfire/GetSolidFireVolumeIscsiNameCmd.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java @@ -14,11 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.command.user.solidfire; - -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.Volume; -import com.cloud.user.Account; +package org.apache.cloudstack.api.command.admin.solidfire; import javax.inject.Inject; @@ -27,22 +23,20 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.solidfire.ApiSolidFireService; +import org.apache.cloudstack.api.response.solidfire.ApiVolumeiScsiNameResponse; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; -@APICommand(name = "getSolidFireVolumeIscsiName", responseObject = ApiSolidFireVolumeIscsiNameResponse.class, description = "Get SolidFire Volume's Iscsi Name", +@APICommand(name = "getVolumeiScsiName", responseObject = ApiVolumeiScsiNameResponse.class, description = "Get Volume's iSCSI Name", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class GetSolidFireVolumeIscsiNameCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeIscsiNameCmd.class.getName()); - private static final String s_name = "getsolidfirevolumeiscsinameresponse"; +public class GetVolumeiScsiNameCmd extends BaseCmd { + private static final Logger LOGGER = Logger.getLogger(GetVolumeiScsiNameCmd.class.getName()); + private static final String NAME = "getvolumeiscsinameresponse"; @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true) private String volumeUuid; - @Inject private ApiSolidFireService _apiSolidFireService; - @Inject private VolumeDao _volumeDao; + @Inject private SolidFireIntegrationTestUtil _util; ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -50,28 +44,24 @@ public class GetSolidFireVolumeIscsiNameCmd extends BaseCmd { @Override public String getCommandName() { - return s_name; + return NAME; } @Override public long getEntityOwnerId() { - Account account = CallContext.current().getCallingAccount(); - - if (account != null) { - return account.getId(); - } - - return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + return _util.getAccountIdForVolumeUuid(volumeUuid); } @Override public void execute() { - Volume volume = _volumeDao.findByUuid(volumeUuid); + LOGGER.info("'GetVolumeiScsiNameCmd.execute' method invoked"); - ApiSolidFireVolumeIscsiNameResponse response = _apiSolidFireService.getSolidFireVolumeIscsiName(volume); + String volume_iScsiName = _util.getVolume_iScsiName(volumeUuid); + + ApiVolumeiScsiNameResponse response = new ApiVolumeiScsiNameResponse(volume_iScsiName); response.setResponseName(getCommandName()); - response.setObjectName("apisolidfirevolumeiscsiname"); + response.setObjectName("apivolumeiscsiname"); this.setResponseObject(response); } diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiPathForVolumeResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiPathForVolumeResponse.java new file mode 100644 index 00000000000..3e0f820aeab --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiPathForVolumeResponse.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response.solidfire; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class ApiPathForVolumeResponse extends BaseResponse { + @SerializedName(ApiConstants.PATH) + @Param(description = "The path field for the volume") + private String path; + + public ApiPathForVolumeResponse(String path) { + this.path = path; + } +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireAccountIdResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireAccountIdResponse.java similarity index 90% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireAccountIdResponse.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireAccountIdResponse.java index ad77c7495db..a1c2a4c32d1 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireAccountIdResponse.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireAccountIdResponse.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.response; +package org.apache.cloudstack.api.response.solidfire; import com.cloud.serializer.Param; @@ -30,8 +30,4 @@ public class ApiSolidFireAccountIdResponse extends BaseResponse { public ApiSolidFireAccountIdResponse(long sfAccountId) { solidFireAccountId = sfAccountId; } - - public long getSolidFireAccountId() { - return solidFireAccountId; - } } diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeAccessGroupIdResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeAccessGroupIdResponse.java similarity index 90% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeAccessGroupIdResponse.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeAccessGroupIdResponse.java index 8b63192b7c4..202a7e9ebba 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeAccessGroupIdResponse.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeAccessGroupIdResponse.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.response; +package org.apache.cloudstack.api.response.solidfire; import com.cloud.serializer.Param; @@ -30,8 +30,4 @@ public class ApiSolidFireVolumeAccessGroupIdResponse extends BaseResponse { public ApiSolidFireVolumeAccessGroupIdResponse(long sfVolumeAccessGroupId) { solidFireVolumeAccessGroupId = sfVolumeAccessGroupId; } - - public long getSolidFireAccessGroupId() { - return solidFireVolumeAccessGroupId; - } } \ No newline at end of file diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeSizeResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeSizeResponse.java similarity index 90% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeSizeResponse.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeSizeResponse.java index b320adab870..d8a7d0407ca 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeSizeResponse.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiSolidFireVolumeSizeResponse.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.response; +package org.apache.cloudstack.api.response.solidfire; import com.cloud.serializer.Param; @@ -30,8 +30,4 @@ public class ApiSolidFireVolumeSizeResponse extends BaseResponse { public ApiSolidFireVolumeSizeResponse(long sfVolumeSize) { solidFireVolumeSize = sfVolumeSize; } - - public long getSolidFireVolumeSize() { - return solidFireVolumeSize; - } } \ No newline at end of file diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeSnapshotDetailsResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeSnapshotDetailsResponse.java new file mode 100644 index 00000000000..364ded8dce6 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeSnapshotDetailsResponse.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response.solidfire; + +import com.cloud.serializer.Param; + +import com.google.gson.annotations.SerializedName; + +import org.apache.cloudstack.api.BaseResponse; + +public class ApiVolumeSnapshotDetailsResponse extends BaseResponse { + @SerializedName("volumeSnapshotId") + @Param(description = "CloudStack Volume Snapshot ID") + private long volumeSnapshotId; + + @SerializedName("snapshotDetailsName") + @Param(description = "Snapshot Details Name") + private String volumeSnapshotDetailsName; + + @SerializedName("snapshotDetailsValue") + @Param(description = "Snapshot Details Value") + private String volumeSnapshotDetailsValue; + + public ApiVolumeSnapshotDetailsResponse(long volumeSnapshotId, String volumeSnapshotDetailsName, String volumeSnapshotDetailsValue) { + this.volumeSnapshotId = volumeSnapshotId; + this.volumeSnapshotDetailsName = volumeSnapshotDetailsName; + this.volumeSnapshotDetailsValue = volumeSnapshotDetailsValue; + } +} \ No newline at end of file diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeIscsiNameResponse.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeiScsiNameResponse.java similarity index 65% rename from plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeIscsiNameResponse.java rename to plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeiScsiNameResponse.java index 517cba93ec7..f43e53352aa 100644 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/ApiSolidFireVolumeIscsiNameResponse.java +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/response/solidfire/ApiVolumeiScsiNameResponse.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.api.response; +package org.apache.cloudstack.api.response.solidfire; import com.cloud.serializer.Param; @@ -22,16 +22,12 @@ import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; -public class ApiSolidFireVolumeIscsiNameResponse extends BaseResponse { - @SerializedName("solidFireVolumeIscsiName") - @Param(description = "SolidFire Volume Iscsi Name") - private String solidFireVolumeIscsiName; +public class ApiVolumeiScsiNameResponse extends BaseResponse { + @SerializedName("volumeiScsiName") + @Param(description = "Volume iSCSI Name") + private String volumeiScsiName; - public ApiSolidFireVolumeIscsiNameResponse(String sfVolumeIscsiName) { - solidFireVolumeIscsiName = sfVolumeIscsiName; + public ApiVolumeiScsiNameResponse(String volumeiScsiName) { + this.volumeiScsiName = volumeiScsiName; } - - public String getSolidFireVolumeIscsiName() { - return solidFireVolumeIscsiName; - } -} \ No newline at end of file +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestService.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestService.java new file mode 100644 index 00000000000..ff206d3a3b6 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestService.java @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.solidfire; + +import com.cloud.utils.component.PluggableService; + +public interface ApiSolidFireIntegrationTestService extends PluggableService { +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java new file mode 100644 index 00000000000..04589038d34 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.solidfire; + +import java.util.List; +import java.util.ArrayList; + +import org.apache.cloudstack.api.command.admin.solidfire.GetPathForVolumeCmd; +// import org.apache.log4j.Logger; +import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireAccountIdCmd; +import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireVolumeAccessGroupIdCmd; +import org.apache.cloudstack.api.command.admin.solidfire.GetVolumeSnapshotDetailsCmd; +import org.apache.cloudstack.api.command.admin.solidfire.GetVolumeiScsiNameCmd; +import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireVolumeSizeCmd; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.AdapterBase; + +@Component +public class ApiSolidFireIntegrationTestServiceImpl extends AdapterBase implements ApiSolidFireIntegrationTestService { + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + + cmdList.add(GetPathForVolumeCmd.class); + cmdList.add(GetSolidFireAccountIdCmd.class); + cmdList.add(GetSolidFireVolumeAccessGroupIdCmd.class); + cmdList.add(GetVolumeiScsiNameCmd.class); + cmdList.add(GetSolidFireVolumeSizeCmd.class); + cmdList.add(GetVolumeSnapshotDetailsCmd.class); + + return cmdList; + } +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireService.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireService.java deleted file mode 100644 index 92828d4b813..00000000000 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireService.java +++ /dev/null @@ -1,37 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.solidfire; - -import com.cloud.utils.component.PluggableService; -import com.cloud.storage.Volume; -import com.cloud.storage.StoragePool; - -import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse; - -/** - * Provide API for SolidFire integration tests - * - */ -public interface ApiSolidFireService extends PluggableService { - public ApiSolidFireAccountIdResponse getSolidFireAccountId(Long csAccountId, Long storagePoolId); - public ApiSolidFireVolumeSizeResponse getSolidFireVolumeSize(Volume volume, StoragePool storagePool); - public ApiSolidFireVolumeAccessGroupIdResponse getSolidFireVolumeAccessGroupId(Long csClusterId, Long storagePoolId); - public ApiSolidFireVolumeIscsiNameResponse getSolidFireVolumeIscsiName(Volume volume); -} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireServiceImpl.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireServiceImpl.java deleted file mode 100644 index fbda65433a2..00000000000 --- a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/ApiSolidFireServiceImpl.java +++ /dev/null @@ -1,126 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.solidfire; - -import java.util.Map; -import java.util.List; -import java.util.ArrayList; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -// import org.apache.log4j.Logger; -import org.apache.cloudstack.acl.APIChecker; -import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; -import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireAccountIdCmd; -import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeAccessGroupIdCmd; -import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeIscsiNameCmd; -import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeSizeCmd; -import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse; -import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.springframework.stereotype.Component; - -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterDetailsVO; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume; -import com.cloud.user.AccountDetailsDao; -import com.cloud.user.AccountDetailVO; -import com.cloud.user.User; -import com.cloud.utils.component.AdapterBase; - -@Component -public class ApiSolidFireServiceImpl extends AdapterBase implements APIChecker, ApiSolidFireService { - // private static final Logger s_logger = Logger.getLogger(ApiSolidFireServiceImpl.class); - - @Inject private AccountDetailsDao _accountDetailsDao; - @Inject private DataStoreProviderManager _dataStoreProviderMgr; - @Inject private ClusterDetailsDao _clusterDetailsDao; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - - return true; - } - - @Override - public ApiSolidFireAccountIdResponse getSolidFireAccountId(Long csAccountId, Long storagePoolId) { - AccountDetailVO accountDetail = _accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId)); - String sfAccountId = accountDetail.getValue(); - - return new ApiSolidFireAccountIdResponse(Long.parseLong(sfAccountId)); - } - - @Override - public ApiSolidFireVolumeSizeResponse getSolidFireVolumeSize(Volume volume, StoragePool storagePool) { - PrimaryDataStoreDriver primaryStoreDriver = null; - - try { - DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName()); - DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - - if (storeDriver instanceof PrimaryDataStoreDriver) { - primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; - } - } - catch (InvalidParameterValueException e) { - throw new InvalidParameterValueException("Invalid Storage Driver Type"); - } - - return new ApiSolidFireVolumeSizeResponse(primaryStoreDriver.getVolumeSizeIncludingHypervisorSnapshotReserve(volume, storagePool)); - } - - @Override - public ApiSolidFireVolumeAccessGroupIdResponse getSolidFireVolumeAccessGroupId(Long csClusterId, Long storagePoolId) { - ClusterDetailsVO clusterDetails = _clusterDetailsDao.findDetail(csClusterId, SolidFireUtil.getVagKey(storagePoolId)); - String sfVagId = clusterDetails.getValue(); - - return new ApiSolidFireVolumeAccessGroupIdResponse(Long.parseLong(sfVagId)); - } - - @Override - public ApiSolidFireVolumeIscsiNameResponse getSolidFireVolumeIscsiName(Volume volume) { - return new ApiSolidFireVolumeIscsiNameResponse(volume.get_iScsiName()); - } - - - @Override - public boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException { - return true; - } - - @Override - public List> getCommands() { - List> cmdList = new ArrayList>(); - - cmdList.add(GetSolidFireAccountIdCmd.class); - cmdList.add(GetSolidFireVolumeSizeCmd.class); - cmdList.add(GetSolidFireVolumeAccessGroupIdCmd.class); - cmdList.add(GetSolidFireVolumeIscsiNameCmd.class); - - return cmdList; - } -} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManager.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManager.java new file mode 100644 index 00000000000..bdc11807efe --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManager.java @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.solidfire; + +public interface SolidFireIntegrationTestManager { + long getSolidFireAccountId(String csAccountUuid, String storagePoolUuid); + long getSolidFireVolumeAccessGroupId(String csClusterUuid, String storagePoolUuid); + long getSolidFireVolumeSize(String volumeUuid); +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java new file mode 100644 index 00000000000..ff6e72cb29a --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/solidfire/SolidFireIntegrationTestManagerImpl.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.solidfire; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; +import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil; +import org.springframework.stereotype.Component; + +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.user.AccountDetailsDao; +import com.cloud.user.AccountDetailVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@Component +public class SolidFireIntegrationTestManagerImpl implements SolidFireIntegrationTestManager { + + @Inject private AccountDetailsDao accountDetailsDao; + @Inject private ClusterDetailsDao clusterDetailsDao; + @Inject private SolidFireIntegrationTestUtil util; + @Inject private VolumeDao volumeDao; + @Inject private VolumeDetailsDao volumeDetailsDao; + + @Override + public long getSolidFireAccountId(String csAccountUuid, String storagePoolUuid) { + long csAccountId = util.getAccountIdForAccountUuid(csAccountUuid); + long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid); + + AccountDetailVO accountDetail = accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId)); + String sfAccountId = accountDetail.getValue(); + + return Long.parseLong(sfAccountId); + } + + @Override + public long getSolidFireVolumeAccessGroupId(String csClusterUuid, String storagePoolUuid) { + long csClusterId = util.getClusterIdForClusterUuid(csClusterUuid); + long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid); + + ClusterDetailsVO clusterDetails = clusterDetailsDao.findDetail(csClusterId, SolidFireUtil.getVagKey(storagePoolId)); + String sfVagId = clusterDetails.getValue(); + + return Long.parseLong(sfVagId); + } + + @Override + public long getSolidFireVolumeSize(String volumeUuid) { + VolumeVO volume = volumeDao.findByUuid(volumeUuid); + + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volume.getId(), SolidFireUtil.VOLUME_SIZE); + + if (volumeDetail != null && volumeDetail.getValue() != null) { + return Long.parseLong(volumeDetail.getValue()); + } + + throw new CloudRuntimeException("Unable to determine the size of the SolidFire volume"); + } +} diff --git a/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java new file mode 100644 index 00000000000..307e8c59691 --- /dev/null +++ b/plugins/api/solidfire-intg-test/src/org/apache/cloudstack/util/solidfire/SolidFireIntegrationTestUtil.java @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.util.solidfire; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.dao.AccountDao; + +public class SolidFireIntegrationTestUtil { + @Inject private AccountDao accountDao; + @Inject private ClusterDao clusterDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private SnapshotDao snapshotDao; + @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Inject private VolumeDao volumeDao; + + private SolidFireIntegrationTestUtil() {} + + public long getAccountIdForAccountUuid(String accountUuid) { + Account account = accountDao.findByUuid(accountUuid); + + return account.getAccountId(); + } + + public long getAccountIdForVolumeUuid(String volumeUuid) { + VolumeVO volume = volumeDao.findByUuid(volumeUuid); + + return volume.getAccountId(); + } + + public long getAccountIdForSnapshotUuid(String snapshotUuid) { + SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid); + + return snapshot.getAccountId(); + } + + public long getClusterIdForClusterUuid(String clusterUuid) { + ClusterVO cluster = clusterDao.findByUuid(clusterUuid); + + return cluster.getId(); + } + + public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) { + StoragePoolVO storagePool = storagePoolDao.findByUuid(storagePoolUuid); + + return storagePool.getId(); + } + + public String getPathForVolumeUuid(String volumeUuid) { + VolumeVO volume = volumeDao.findByUuid(volumeUuid); + + return volume.getPath(); + } + + public String getVolume_iScsiName(String volumeUuid) { + VolumeVO volume = volumeDao.findByUuid(volumeUuid); + + return volume.get_iScsiName(); + } + + public List getSnapshotDetails(String snapshotUuid) { + SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid); + + List snapshotDetails = snapshotDetailsDao.listDetails(snapshot.getId()); + + List responses = new ArrayList<>(); + + if (snapshotDetails != null) { + for (SnapshotDetailsVO snapshotDetail : snapshotDetails) { + ApiVolumeSnapshotDetailsResponse response = new ApiVolumeSnapshotDetailsResponse( + snapshotDetail.getResourceId(), + snapshotDetail.getName(), + snapshotDetail.getValue() + ); + + responses.add(response); + } + } + + return responses; + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 92e985e023e..ad87469f689 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -47,6 +47,8 @@ import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; @@ -147,6 +149,13 @@ public class KVMStorageProcessor implements StorageProcessor { return new SnapshotAndCopyAnswer(); } + @Override + public ResignatureAnswer resignature(final ResignatureCommand cmd) { + s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for KVMStorageProcessor"); + + return new ResignatureAnswer(); + } + @Override public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { final DataTO srcData = cmd.getSrcTO(); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java index 3c28f1fde35..7c89921936a 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java @@ -31,6 +31,8 @@ import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.to.SnapshotObjectTO; @@ -805,9 +807,17 @@ public class Ovm3StorageProcessor implements StorageProcessor { * iSCSI? */ @Override - public Answer snapshotAndCopy(SnapshotAndCopyCommand cmd) { - LOGGER.debug("execute snapshotAndCopy: "+ cmd.getClass()); - return new SnapshotAndCopyAnswer("not implemented yet"); + public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) { + LOGGER.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for Ovm3StorageProcessor"); + + return new SnapshotAndCopyAnswer("Not implemented"); + } + + @Override + public ResignatureAnswer resignature(final ResignatureCommand cmd) { + LOGGER.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for Ovm3StorageProcessor"); + + return new ResignatureAnswer("Not implemented"); } /** diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java index cae926179f3..9d86bc31b71 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java @@ -35,6 +35,8 @@ import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.to.SnapshotObjectTO; @@ -66,6 +68,13 @@ public class SimulatorStorageProcessor implements StorageProcessor { return new SnapshotAndCopyAnswer(); } + @Override + public ResignatureAnswer resignature(ResignatureCommand cmd) { + s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for SimulatorStorageProcessor"); + + return new ResignatureAnswer(); + } + @Override public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { TemplateObjectTO template = new TemplateObjectTO(); diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 21f79f983e2..b2766e666d0 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -62,6 +62,8 @@ import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; @@ -144,6 +146,13 @@ public class VmwareStorageProcessor implements StorageProcessor { return new SnapshotAndCopyAnswer(); } + @Override + public ResignatureAnswer resignature(ResignatureCommand cmd) { + s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for VmwareStorageProcessor"); + + return new ResignatureAnswer(); + } + private String getOVFFilePath(String srcOVAFileName) { File file = new File(srcOVAFileName); assert (_storage != null); diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index efc3f81ed19..026d0c18cd2 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -37,6 +37,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Properties; import java.util.Queue; import java.util.Random; @@ -164,9 +165,16 @@ import com.xensource.xenapi.XenAPIObject; * */ public abstract class CitrixResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer { - + /** + * used to describe what type of resource a storage device is of + */ public enum SRType { - EXT, FILE, ISCSI, ISO, LVM, LVMOHBA, LVMOISCSI, NFS; + EXT, FILE, ISCSI, ISO, LVM, LVMOHBA, LVMOISCSI, + /** + * used for resigning metadata (like SR UUID and VDI UUID when a + * particular storage manager is installed on a XenServer host (for back-end snapshots to work)) + */ + RELVMOISCSI, NFS; String _str; @@ -1794,10 +1802,26 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe cmd.setPod(_pod); cmd.setVersion(CitrixResourceBase.class.getPackage().getImplementationVersion()); + try { + final String cmdLine = "xe sm-list | grep \"resigning of duplicates\""; + + final XenServerUtilitiesHelper xenServerUtilitiesHelper = getXenServerUtilitiesHelper(); + + Pair result = xenServerUtilitiesHelper.executeSshWrapper(_host.getIp(), 22, _username, null, getPwdFromQueue(), cmdLine); + + boolean supportsClonedVolumes = result != null && result.first() != null && result.first() && + result.second() != null && result.second().length() > 0; + + cmd.setSupportsClonedVolumes(supportsClonedVolumes); + } catch (NumberFormatException ex) { + s_logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage()); + } } catch (final XmlRpcException e) { - throw new CloudRuntimeException("XML RPC Exception" + e.getMessage(), e); + throw new CloudRuntimeException("XML RPC Exception: " + e.getMessage(), e); } catch (final XenAPIException e) { - throw new CloudRuntimeException("XenAPIException" + e.toString(), e); + throw new CloudRuntimeException("XenAPIException: " + e.toString(), e); + } catch (final Exception e) { + throw new CloudRuntimeException("Exception: " + e.toString(), e); } } @@ -2264,6 +2288,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword, final boolean ignoreIntroduceException) { + return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, false, ignoreIntroduceException); + } + + public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, + final String chapInitiatorPassword, final boolean resignature, final boolean ignoreIntroduceException) { synchronized (srNameLabel.intern()) { final Map deviceConfig = new HashMap(); try { @@ -2353,17 +2382,52 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe throw new CloudRuntimeException(msg, e); } } + deviceConfig.put("SCSIid", scsiid); - final String result = SR.probe(conn, host, deviceConfig, type, smConfig); + String result = SR.probe(conn, host, deviceConfig, type, smConfig); + String pooluuid = null; + if (result.indexOf("") != -1) { pooluuid = result.substring(result.indexOf("") + 6, result.indexOf("")).trim(); } if (pooluuid == null || pooluuid.length() != 36) { sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig); - } else { + } + else { + if (resignature) { + try { + SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig); + + // The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected + // toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()). + // That being the case, if this CloudRuntimeException statement is executed, there appears to have been some kind + // of failure in the execution of the above SR.create (resign) method. + throw new CloudRuntimeException("Problem resigning the metadata"); + } + catch (XenAPIException ex) { + String msg = ex.toString(); + + if (!msg.contains("successfully resigned")) { + throw ex; + } + + result = SR.probe(conn, host, deviceConfig, type, smConfig); + + pooluuid = null; + + if (result.indexOf("") != -1) { + pooluuid = result.substring(result.indexOf("") + 6, result.indexOf("")).trim(); + } + + if (pooluuid == null || pooluuid.length() != 36) { + throw new CloudRuntimeException("Non-existent or invalid SR UUID"); + } + } + } + try { sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, type, "user", true, smConfig); } catch (final XenAPIException ex) { @@ -2375,11 +2439,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } final Set setHosts = Host.getAll(conn); + if (setHosts == null) { - final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to hosts not available."; + final String msg = "Unable to create iSCSI SR " + deviceConfig + " due to hosts not available."; + s_logger.warn(msg); + throw new CloudRuntimeException(msg); } + for (final Host currentHost : setHosts) { final PBD.Record rec = new PBD.Record(); @@ -2392,7 +2460,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe pbd.plug(conn); } } + sr.scan(conn); + return sr; } catch (final XenAPIException e) { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); @@ -3969,11 +4039,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - // the idea here is to see if the DiskTO in question is from managed storage - // and - // does not yet have an SR - // if no SR, create it and create a VDI in it - public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final String vmName) throws Exception { + // The idea here is to see if the DiskTO in question is from managed storage and does not yet have an SR. + // If no SR, create it and create a VDI in it. + public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final long vmId, final String vmName) throws Exception { final Map details = disk.getDetails(); if (details == null) { @@ -3994,7 +4062,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return null; } - final String vdiNameLabel = vmName + "-DATA"; + final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA"); return prepareManagedStorage(conn, details, null, vdiNameLabel); } @@ -4024,19 +4092,25 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe VDI vdi = getVDIbyUuid(conn, path, false); final Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE)); + Set vdisInSr = sr.getVDIs(conn); + + // If a VDI already exists in the SR (in case we cloned from a template cache), use that. + if (vdisInSr.size() == 1) { + vdi = vdisInSr.iterator().next(); + } + if (vdi == null) { vdi = createVdi(sr, vdiNameLabel, volumeSize); } else { - // if VDI is not null, it must have already been created, so check - // whether a resize of the volume was performed - // if true, resize the VDI to the volume size + // If vdi is not null, it must have already been created, so check whether a resize of the volume was performed. + // If true, resize the VDI to the volume size. - s_logger.info("checking for the resize of the datadisk"); + s_logger.info("Checking for the resize of the datadisk"); final long vdiVirtualSize = vdi.getVirtualSize(conn); if (vdiVirtualSize != volumeSize) { - s_logger.info("resizing the data disk (vdi) from vdiVirtualsize: " + vdiVirtualSize + " to volumeSize: " + volumeSize); + s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + vdiVirtualSize + " to volumeSize: " + volumeSize); try { vdi.resize(conn, volumeSize); @@ -4044,6 +4118,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn("Unable to resize volume", e); } } + + // change the name-label in case of a cloned VDI + if (!Objects.equals(vdi.getNameLabel(conn), vdiNameLabel)) { + try { + vdi.setNameLabel(conn, vdiNameLabel); + } catch (final Exception e) { + s_logger.warn("Unable to rename volume", e); + } + } } return vdi; diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java index 38b45d02466..63916114d2b 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java @@ -44,6 +44,8 @@ import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.ForgetObjectCmd; import org.apache.cloudstack.storage.command.IntroduceObjectAnswer; import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.command.ResignatureAnswer; +import org.apache.cloudstack.storage.command.ResignatureCommand; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; @@ -159,6 +161,50 @@ public class XenServerStorageProcessor implements StorageProcessor { } } + @Override + public ResignatureAnswer resignature(final ResignatureCommand cmd) { + SR newSr = null; + + final Connection conn = hypervisorResource.getConnection(); + + try { + final Map details = cmd.getDetails(); + + final String iScsiName = details.get(DiskTO.IQN); + final String storageHost = details.get(DiskTO.STORAGE_HOST); + final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME); + final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET); + + newSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, true, false); + + Set vdis = newSr.getVDIs(conn); + + if (vdis.size() != 1) { + throw new RuntimeException("There were " + vdis.size() + " VDIs in the SR."); + } + + VDI vdi = vdis.iterator().next(); + + final ResignatureAnswer resignatureAnswer = new ResignatureAnswer(); + + resignatureAnswer.setSize(vdi.getVirtualSize(conn)); + resignatureAnswer.setPath(vdi.getUuid(conn)); + resignatureAnswer.setFormat(ImageFormat.VHD); + + return resignatureAnswer; + } + catch (final Exception ex) { + s_logger.warn("Failed to resignature: " + ex.toString(), ex); + + return new ResignatureAnswer(ex.getMessage()); + } + finally { + if (newSr != null) { + hypervisorResource.removeSR(conn, newSr); + } + } + } + @Override public AttachAnswer attachIso(final AttachCommand cmd) { final DiskTO disk = cmd.getDisk(); @@ -763,6 +809,9 @@ public class XenServerStorageProcessor implements StorageProcessor { final DataTO destDataTo = cmd.getDestTO(); final int wait = cmd.getWait(); final DataStoreTO srcDataStoreTo = srcDataTo.getDataStore(); + final Connection conn = hypervisorResource.getConnection(); + SR sr = null; + boolean removeSrAfterCopy = false; try { if (srcDataStoreTo instanceof NfsTO && srcDataTo.getObjectType() == DataObjectType.TEMPLATE) { @@ -796,14 +845,11 @@ public class XenServerStorageProcessor implements StorageProcessor { managedStoragePoolRootVolumeSize = details.get(PrimaryDataStoreTO.VOLUME_SIZE); chapInitiatorUsername = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_USERNAME); chapInitiatorSecret = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_SECRET); + removeSrAfterCopy = Boolean.parseBoolean(details.get(PrimaryDataStoreTO.REMOVE_AFTER_COPY)); } } } - final Connection conn = hypervisorResource.getConnection(); - - final SR sr; - if (managed) { final Map details = new HashMap(); @@ -861,9 +907,11 @@ public class XenServerStorageProcessor implements StorageProcessor { newVol.setUuid(uuidToReturn); newVol.setPath(uuidToReturn); + if (physicalSize != null) { newVol.setSize(physicalSize); } + newVol.setFormat(ImageFormat.VHD); return new CopyCmdAnswer(newVol); @@ -875,6 +923,11 @@ public class XenServerStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(msg); } + finally { + if (removeSrAfterCopy && sr != null) { + hypervisorResource.removeSR(conn, sr); + } + } return new CopyCmdAnswer("not implemented yet"); } diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index e58bade2c4e..02c3197e51b 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -171,6 +171,8 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { final DataStoreTO srcStore = srcData.getDataStore(); final Connection conn = hypervisorResource.getConnection(); SR srcSr = null; + SR destSr = null; + boolean removeSrAfterCopy = false; Task task = null; try { @@ -198,7 +200,8 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { final Set setVdis = srcSr.getVDIs(conn); if (setVdis.size() != 1) { - return new CopyCmdAnswer("Expected 1 VDI template but found " + setVdis.size() + " VDI template(s) on: " + uri.getHost() + ":" + uri.getPath() + "/" + volumeDirectory); + return new CopyCmdAnswer("Expected 1 VDI template, but found " + setVdis.size() + " VDI templates on: " + + uri.getHost() + ":" + uri.getPath() + "/" + volumeDirectory); } final VDI srcVdi = setVdis.iterator().next(); @@ -225,11 +228,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { managedStoragePoolRootVolumeSize = details.get(PrimaryDataStoreTO.VOLUME_SIZE); chapInitiatorUsername = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_USERNAME); chapInitiatorSecret = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_SECRET); + removeSrAfterCopy = Boolean.parseBoolean(details.get(PrimaryDataStoreTO.REMOVE_AFTER_COPY)); } } - final SR destSr; - if (managed) { details = new HashMap(); @@ -291,9 +293,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { newVol.setUuid(uuidToReturn); newVol.setPath(uuidToReturn); + if (physicalSize != null) { newVol.setSize(physicalSize); } + newVol.setFormat(Storage.ImageFormat.VHD); return new CopyCmdAnswer(newVol); @@ -316,6 +320,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { if (srcSr != null) { hypervisorResource.removeSR(conn, srcSr); } + + if (removeSrAfterCopy && destSr != null) { + hypervisorResource.removeSR(conn, destSr); + } } return new CopyCmdAnswer("not implemented yet"); diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java index 02069397255..b8e0f565df5 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java @@ -27,28 +27,80 @@ import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Connection; +import com.xensource.xenapi.PBD; +import com.xensource.xenapi.SR; import com.xensource.xenapi.VDI; +import java.util.HashSet; +import java.util.Set; + @ResourceWrapper(handles = ResizeVolumeCommand.class) public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixResizeVolumeCommandWrapper.class); @Override public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBase citrixResourceBase) { - final Connection conn = citrixResourceBase.getConnection(); - final String volid = command.getPath(); - final long newSize = command.getNewSize(); + Connection conn = citrixResourceBase.getConnection(); + + String volId = command.getPath(); + long newSize = command.getNewSize(); try { - final VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volid); + if (command.isManaged()) { + resizeSr(conn, command); + } + + VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId); + vdi.resize(conn, newSize); + return new ResizeVolumeAnswer(command, true, "success", newSize); - } catch (final Exception e) { - s_logger.warn("Unable to resize volume", e); - final String error = "failed to resize volume:" + e; + } catch (Exception ex) { + s_logger.warn("Unable to resize volume", ex); + + String error = "Failed to resize volume: " + ex; + return new ResizeVolumeAnswer(command, false, error); } } -} \ No newline at end of file + + private void resizeSr(Connection conn, ResizeVolumeCommand command) { + // If this is managed storage, re-size the SR, too. + // The logical unit/volume has already been re-sized, so the SR needs to fill up the new space. + + String iScsiName = command.get_iScsiName(); + + try { + Set srs = SR.getByNameLabel(conn, iScsiName); + Set allPbds = new HashSet<>(); + + for (SR sr : srs) { + if (!CitrixResourceBase.SRType.LVMOISCSI.equals(sr.getType(conn))) { + continue; + } + + Set pbds = sr.getPBDs(conn); + + if (pbds.size() <= 0) { + s_logger.debug("No PBDs found for the following SR: " + sr.getNameLabel(conn)); + } + + allPbds.addAll(pbds); + } + + for (PBD pbd: allPbds) { + PBD.Record pbdr = pbd.getRecord(conn); + + if (pbdr.currentlyAttached) { + pbd.unplug(conn); + pbd.plug(conn); + } + } + } + catch (Throwable ex) { + throw new CloudRuntimeException("Unable to resize volume: " + ex.getMessage()); + } + } +} diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java index 241110253c3..073f00096b0 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java @@ -108,11 +108,13 @@ public final class CitrixStartCommandWrapper extends CommandWrapper extends AsyncRpcContext { - private final DataObject volume; - public CreateVolumeContext(AsyncCompletionCallback callback, DataObject volume) { super(callback); - this.volume = volume; } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 5f647db1109..af969e168af 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -17,32 +17,13 @@ package org.apache.cloudstack.storage.datastore.driver; import java.text.NumberFormat; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; @@ -55,16 +36,19 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume; +import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; -import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.user.AccountDetailVO; @@ -74,27 +58,64 @@ import com.cloud.user.dao.AccountDao; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; -public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class); - private static final int s_lowestHypervisorSnapshotReserve = 10; +import com.google.common.base.Preconditions; - @Inject private AccountDao _accountDao; - @Inject private AccountDetailsDao _accountDetailsDao; - @Inject private ClusterDao _clusterDao; - @Inject private ClusterDetailsDao _clusterDetailsDao; - @Inject private HostDao _hostDao; - @Inject private SnapshotDao _snapshotDao; - @Inject private SnapshotDetailsDao _snapshotDetailsDao; - @Inject private PrimaryDataStoreDao _storagePoolDao; - @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; - @Inject private VolumeDao _volumeDao; - @Inject private VolumeDetailsDao _volumeDetailsDao; +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + +public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { + private static final Logger LOGGER = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class); + private static final int LOCK_TIME_IN_SECONDS = 300; + private static final int LOWEST_HYPERVISOR_SNAPSHOT_RESERVE = 10; + private static final long MIN_IOPS_FOR_TEMPLATE_VOLUME = 100L; + private static final long MAX_IOPS_FOR_TEMPLATE_VOLUME = 20000L; + private static final long MIN_IOPS_FOR_TEMP_VOLUME = 100L; + private static final long MAX_IOPS_FOR_TEMP_VOLUME = 20000L; + private static final long MIN_IOPS_FOR_SNAPSHOT_VOLUME = 100L; + private static final long MAX_IOPS_FOR_SNAPSHOT_VOLUME = 20000L; + + @Inject private AccountDao accountDao; + @Inject private AccountDetailsDao accountDetailsDao; + @Inject private ClusterDao clusterDao; + @Inject private ClusterDetailsDao clusterDetailsDao; + @Inject private DataStoreManager dataStoreMgr; + @Inject private HostDao hostDao; + @Inject private SnapshotDao snapshotDao; + @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject private VMTemplatePoolDao tmpltPoolDao; + @Inject private VolumeDao volumeDao; + @Inject private VolumeDetailsDao volumeDetailsDao; + @Inject private VolumeDataFactory volumeFactory; @Override public Map getCapabilities() { - Map mapCapabilities = new HashMap(); + Map mapCapabilities = new HashMap<>(); mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); return mapCapabilities; } @@ -116,7 +137,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public ChapInfo getChapInfo(VolumeInfo volumeInfo) { + public ChapInfo getChapInfo(DataObject dataObject) { return null; } @@ -128,38 +149,42 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - if (dataObject == null || host == null || dataStore == null) { - return false; - } + Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'"); + Preconditions.checkArgument(host != null, "'host' should not be 'null'"); + Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'"); long sfVolumeId = getSolidFireVolumeId(dataObject); long clusterId = host.getClusterId(); long storagePoolId = dataStore.getId(); - ClusterVO cluster = _clusterDao.findById(clusterId); + ClusterVO cluster = clusterDao.findById(clusterId); GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); - if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + if (!lock.lock(LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid(); - s_logger.debug(errMsg); + LOGGER.warn(errMsg); throw new CloudRuntimeException(errMsg); } try { - ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId)); + ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId)); String vagId = clusterDetail != null ? clusterDetail.getValue() : null; - List hosts = _hostDao.findByClusterId(clusterId); + List hosts = hostDao.findByClusterId(clusterId); if (!SolidFireUtil.hostsSupport_iScsi(hosts)) { - return false; + String errMsg = "Not all hosts in the compute cluster support iSCSI."; + + LOGGER.warn(errMsg); + + throw new CloudRuntimeException(errMsg); } - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); if (vagId != null) { SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); @@ -169,7 +194,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds); } else { - SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, _clusterDetailsDao); + SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, clusterDetailsDao); } return true; @@ -194,25 +219,25 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { long clusterId = host.getClusterId(); long storagePoolId = dataStore.getId(); - ClusterVO cluster = _clusterDao.findById(clusterId); + ClusterVO cluster = clusterDao.findById(clusterId); GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); - if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) { + if (!lock.lock(LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid(); - s_logger.debug(errMsg); + LOGGER.debug(errMsg); throw new CloudRuntimeException(errMsg); } try { - ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId)); + ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId)); String vagId = clusterDetail != null ? clusterDetail.getValue() : null; if (vagId != null) { - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId)); @@ -233,7 +258,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } if (dataObject.getType() == DataObjectType.SNAPSHOT) { - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(dataObject.getId(), SolidFireUtil.VOLUME_ID); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(dataObject.getId(), SolidFireUtil.VOLUME_ID); if (snapshotDetails == null || snapshotDetails.getValue() == null) { throw new CloudRuntimeException("Unable to locate the volume ID associated with the following snapshot ID: " + dataObject.getId()); @@ -242,11 +267,26 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { return Long.parseLong(snapshotDetails.getValue()); } + if (dataObject.getType() == DataObjectType.TEMPLATE) { + return getVolumeIdFrom_iScsiPath(((TemplateInfo)dataObject).getInstallPath()); + } + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject)"); } + private long getVolumeIdFrom_iScsiPath(String iScsiPath) { + String[] splits = iScsiPath.split("/"); + String iqn = splits[1]; + + String sequenceToSearchFor = "."; + int lastIndexOf = iqn.lastIndexOf(sequenceToSearchFor); + String volumeIdAsString = iqn.substring(lastIndexOf + sequenceToSearchFor.length()); + + return Long.parseLong(volumeIdAsString); + } + private long getDefaultMinIops(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS); + StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS); String clusterDefaultMinIops = storagePoolDetail.getValue(); @@ -254,7 +294,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } private long getDefaultMaxIops(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS); + StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS); String clusterDefaultMaxIops = storagePoolDetail.getValue(); @@ -262,7 +302,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } private long getDefaultBurstIops(long storagePoolId, long maxIops) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS); + StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS); String clusterDefaultBurstIopsPercentOfMaxIops = storagePoolDetail.getValue(); @@ -271,28 +311,54 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { return (long)(maxIops * fClusterDefaultBurstIopsPercentOfMaxIops); } - private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, VolumeInfo volumeInfo, long sfAccountId) { - long storagePoolId = volumeInfo.getDataStore().getId(); + private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) { + long storagePoolId = dataObject.getDataStore().getId(); + Long minIops = null; + Long maxIops = null; + Long volumeSize = dataObject.getSize(); + String volumeName = null; - final Iops iops; + final Map mapAttributes; - Long minIops = volumeInfo.getMinIops(); - Long maxIops = volumeInfo.getMaxIops(); + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volumeInfo = (VolumeInfo)dataObject; + minIops = volumeInfo.getMinIops(); + maxIops = volumeInfo.getMaxIops(); + volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePoolDao.findById(storagePoolId)); + volumeName = volumeInfo.getName(); + + mapAttributes = getVolumeAttributes(volumeInfo); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + TemplateInfo templateInfo = (TemplateInfo)dataObject; + + minIops = MIN_IOPS_FOR_TEMPLATE_VOLUME; + maxIops = MAX_IOPS_FOR_TEMPLATE_VOLUME; + volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePoolDao.findById(storagePoolId)); + volumeName = templateInfo.getUniqueName(); + + mapAttributes = getTemplateAttributes(templateInfo); + } + else { + throw new CloudRuntimeException("Invalid type passed to createSolidFireVolume: " + dataObject.getType()); + } + + final Iops iops = getIops(minIops, maxIops, storagePoolId); + + long sfVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeName), sfAccountId, + volumeSize, true, mapAttributes, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); + + return SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId); + } + + private Iops getIops(Long minIops, Long maxIops, long storagePoolId) { if (minIops == null || minIops <= 0 || maxIops == null || maxIops <= 0) { long defaultMaxIops = getDefaultMaxIops(storagePoolId); - iops = new Iops(getDefaultMinIops(storagePoolId), defaultMaxIops, getDefaultBurstIops(storagePoolId, defaultMaxIops)); - } else { - iops = new Iops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), getDefaultBurstIops(storagePoolId, volumeInfo.getMaxIops())); + return new Iops(getDefaultMinIops(storagePoolId), defaultMaxIops, getDefaultBurstIops(storagePoolId, defaultMaxIops)); } - long volumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(volumeInfo, _storagePoolDao.findById(storagePoolId)); - - long sfVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeInfo.getName()), sfAccountId, volumeSize, true, - NumberFormat.getInstance().format(volumeInfo.getSize()), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); - - return SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId); + return new Iops(minIops, maxIops, getDefaultBurstIops(storagePoolId, maxIops)); } @Override @@ -303,7 +369,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { long usedSpace = 0; - List lstVolumes = _volumeDao.findByPoolId(storagePool.getId(), null); + List lstVolumes = volumeDao.findByPoolId(storagePool.getId(), null); if (lstVolumes != null) { for (VolumeVO volume : lstVolumes) { @@ -311,7 +377,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { continue; } - VolumeDetailVO volumeDetail = _volumeDetailsDao.findDetail(volume.getId(), SolidFireUtil.VOLUME_SIZE); + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volume.getId(), SolidFireUtil.VOLUME_SIZE); if (volumeDetail != null && volumeDetail.getValue() != null) { long volumeSize = Long.parseLong(volumeDetail.getValue()); @@ -319,18 +385,22 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { usedSpace += volumeSize; } else { - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), storagePoolDetailsDao); try { long lVolumeId = Long.parseLong(volume.getFolder()); SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, lVolumeId); + long volumeSize = sfVolume.getTotalSize(); + // SolidFireUtil.VOLUME_SIZE was introduced in 4.5. // To be backward compatible with releases prior to 4.5, call updateVolumeDetails here. // That way if SolidFireUtil.VOLUME_SIZE wasn't put in the volume_details table when the // volume was initially created, it can be placed in volume_details here. - updateVolumeDetails(volume.getId(), sfVolume.getTotalSize()); + updateVolumeDetails(volume.getId(), volumeSize); + + usedSpace += volumeSize; } catch (NumberFormatException ex) { // can be ignored (the "folder" column didn't have a valid "long" in it (hasn't been placed there yet)) @@ -339,15 +409,15 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } - List lstSnapshots = _snapshotDao.listAll(); + List lstSnapshots = snapshotDao.listAll(); if (lstSnapshots != null) { for (SnapshotVO snapshot : lstSnapshots) { - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.STORAGE_POOL_ID); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.STORAGE_POOL_ID); // if this snapshot belongs to the storagePool that was passed in if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePool.getId()) { - snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.VOLUME_SIZE); + snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.VOLUME_SIZE); if (snapshotDetails != null && snapshotDetails.getValue() != null) { long snapshotSize = Long.parseLong(snapshotDetails.getValue()); @@ -358,6 +428,14 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } + List lstTemplatePoolRefs = tmpltPoolDao.listByPoolId(storagePool.getId()); + + if (lstTemplatePoolRefs != null) { + for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) { + usedSpace += templatePoolRef.getTemplateSize(); + } + } + return usedSpace; } @@ -365,7 +443,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { public long getUsedIops(StoragePool storagePool) { long usedIops = 0; - List volumes = _volumeDao.findByPoolId(storagePool.getId(), null); + List volumes = volumeDao.findByPoolId(storagePool.getId(), null); if (volumes != null) { for (VolumeVO volume : volumes) { @@ -377,12 +455,25 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { - long volumeSize = volume.getSize(); - Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + long volumeSize = 0; + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volume = (VolumeInfo)dataObject; + + volumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(volume.getSize(), volume.getHypervisorSnapshotReserve()); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + TemplateInfo templateInfo = (TemplateInfo)dataObject; + + volumeSize = (long)(templateInfo.getSize() + templateInfo.getSize() * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f)); + } + + return volumeSize; + } + + private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, Integer hypervisorSnapshotReserve) { if (hypervisorSnapshotReserve != null) { - hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve); + hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, LOWEST_HYPERVISOR_SNAPSHOT_RESERVE); volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); } @@ -390,6 +481,29 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { return volumeSize; } + /** + * This method is only relevant when storagePool is being used with a compute cluster that supports UUID resigning. + */ + @Override + public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) { + List lstTemplatePoolRefs = tmpltPoolDao.listByPoolId(storagePool.getId()); + + if (lstTemplatePoolRefs != null) { + for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) { + if (templatePoolRef.getTemplateId() == templateInfo.getId()) { + // This indicates that we already have this template stored on this primary storage, so + // we do not require additional space. + return 0; + } + } + } + + // This indicates that we do not have a copy of this template on this primary storage, so + // we need to take it into consideration from a space standpoint (ex. when a new VM is spun + // up and wants to use this particular template for its root disk). + return getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePool); + } + private static class Iops { private final long _minIops; private final long _maxIops; @@ -436,7 +550,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { long sfVolumeId = Long.parseLong(volumeInfo.getFolder()); - SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId); + deleteSolidFireVolume(sfConnection, volumeInfo.getId(), sfVolumeId); } @Override @@ -444,114 +558,234 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { String iqn = null; String errMsg = null; - if (dataObject.getType() == DataObjectType.VOLUME) { - VolumeInfo volumeInfo = (VolumeInfo)dataObject; + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + iqn = createVolume((VolumeInfo)dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { + createTempVolume((SnapshotInfo)dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + iqn = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; - long storagePoolId = dataStore.getId(); - - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); - - AccountDetailVO accountDetail = SolidFireUtil.getAccountDetail(volumeInfo.getAccountId(), storagePoolId, _accountDetailsDao); - - if (accountDetail == null || accountDetail.getValue() == null) { - AccountVO account = _accountDao.findById(volumeInfo.getAccountId()); - String sfAccountName = SolidFireUtil.getSolidFireAccountName(account.getUuid(), account.getAccountId()); - SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getSolidFireAccount(sfConnection, sfAccountName); - - if (sfAccount == null) { - sfAccount = createSolidFireAccount(sfConnection, sfAccountName); - } - - SolidFireUtil.updateCsDbWithSolidFireAccountInfo(account.getId(), sfAccount, storagePoolId, _accountDetailsDao); - - accountDetail = SolidFireUtil.getAccountDetail(volumeInfo.getAccountId(), storagePoolId, _accountDetailsDao); + LOGGER.error(errMsg); } + } + catch (Exception ex) { + errMsg = ex.getMessage(); - long sfAccountId = Long.parseLong(accountDetail.getValue()); + LOGGER.error(errMsg); - SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId); - - iqn = sfVolume.getIqn(); - - VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); - - volume.set_iScsiName(iqn); - volume.setFolder(String.valueOf(sfVolume.getId())); - volume.setPoolType(StoragePoolType.IscsiLUN); - volume.setPoolId(storagePoolId); - - _volumeDao.update(volume.getId(), volume); - - updateVolumeDetails(volume.getId(), sfVolume.getTotalSize()); - - StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); - - long capacityBytes = storagePool.getCapacityBytes(); - // getUsedBytes(StoragePool) will include the bytes of the newly created volume because - // updateVolumeDetails(long, long) has already been called for this volume - long usedBytes = getUsedBytes(storagePool); - - storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); - - _storagePoolDao.update(storagePoolId, storagePool); - } else { - errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; + if (callback == null) { + throw ex; + } } - // path = iqn - // size is pulled from DataObject instance, if errMsg is null - CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); + if (callback != null) { + // path = iqn + // size is pulled from DataObject instance, if errMsg is null + CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); - result.setResult(errMsg); + result.setResult(errMsg); - callback.complete(result); + callback.complete(result); + } + } + + private long getCreateSolidFireAccountId(SolidFireUtil.SolidFireConnection sfConnection, long csAccountId, long storagePoolId) { + AccountDetailVO accountDetail = SolidFireUtil.getAccountDetail(csAccountId, storagePoolId, accountDetailsDao); + + if (accountDetail == null || accountDetail.getValue() == null) { + AccountVO account = accountDao.findById(csAccountId); + String sfAccountName = SolidFireUtil.getSolidFireAccountName(account.getUuid(), account.getAccountId()); + SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getSolidFireAccount(sfConnection, sfAccountName); + + if (sfAccount == null) { + sfAccount = createSolidFireAccount(sfConnection, sfAccountName); + } + + SolidFireUtil.updateCsDbWithSolidFireAccountInfo(account.getId(), sfAccount, storagePoolId, accountDetailsDao); + + accountDetail = SolidFireUtil.getAccountDetail(csAccountId, storagePoolId, accountDetailsDao); + } + + return Long.parseLong(accountDetail.getValue()); + } + + private void handleSnapshotDetails(long csSnapshotId, String name, String value) { + snapshotDetailsDao.removeDetail(csSnapshotId, name); + + SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false); + + snapshotDetailsDao.persist(snapshotDetails); + } + + private void addTempVolumeId(long csSnapshotId, String tempVolumeId) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID); + + if (snapshotDetails == null || snapshotDetails.getValue() == null) { + throw new CloudRuntimeException("'addTempVolumeId' should not be invoked unless " + SolidFireUtil.VOLUME_ID + " exists."); + } + + String originalVolumeId = snapshotDetails.getValue(); + + handleSnapshotDetails(csSnapshotId, SolidFireUtil.TEMP_VOLUME_ID, originalVolumeId); + handleSnapshotDetails(csSnapshotId, SolidFireUtil.VOLUME_ID, tempVolumeId); + } + + private void removeTempVolumeId(long csSnapshotId) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.TEMP_VOLUME_ID); + + if (snapshotDetails == null || snapshotDetails.getValue() == null) { + throw new CloudRuntimeException("'removeTempVolumeId' should not be invoked unless " + SolidFireUtil.TEMP_VOLUME_ID + " exists."); + } + + String originalVolumeId = snapshotDetails.getValue(); + + handleSnapshotDetails(csSnapshotId, SolidFireUtil.VOLUME_ID, originalVolumeId); + + snapshotDetailsDao.remove(snapshotDetails.getId()); + } + + private long getCsIdForCloning(long volumeId, String cloneOf) { + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, cloneOf); + + if (volumeDetail != null && volumeDetail.getValue() != null) { + return new Long(volumeDetail.getValue()); + } + + return Long.MIN_VALUE; + } + + private boolean shouldTakeSnapshot(long snapshotId) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, "takeSnapshot"); + + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + return new Boolean(snapshotDetails.getValue()); + } + + return false; + } + + private SolidFireUtil.SolidFireVolume createClone(SolidFireUtil.SolidFireConnection sfConnection, long dataObjectId, VolumeInfo volumeInfo, long sfAccountId, + long storagePoolId, DataObjectType dataObjectType) { + String sfNewVolumeName = volumeInfo.getName(); + + long sfVolumeId = Long.MIN_VALUE; + long sfSnapshotId = Long.MIN_VALUE; + + if (dataObjectType == DataObjectType.SNAPSHOT) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, SolidFireUtil.SNAPSHOT_ID); + + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + sfSnapshotId = Long.parseLong(snapshotDetails.getValue()); + } + + snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, SolidFireUtil.VOLUME_ID); + + sfVolumeId = Long.parseLong(snapshotDetails.getValue()); + } else if (dataObjectType == DataObjectType.TEMPLATE) { + // get the cached template on this storage + VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, dataObjectId); + + if (templatePoolRef != null) { + sfVolumeId = Long.parseLong(templatePoolRef.getLocalDownloadPath()); + } + } + + if (sfVolumeId <= 0) { + throw new CloudRuntimeException("Unable to find SolidFire volume for the following data-object ID: " + dataObjectId + + " and data-object type: " + dataObjectType); + } + + final long newSfVolumeId = SolidFireUtil.createSolidFireClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfNewVolumeName, + getVolumeAttributes(volumeInfo)); + + final Iops iops = getIops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), storagePoolId); + + SolidFireUtil.modifySolidFireVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); + + return SolidFireUtil.getSolidFireVolume(sfConnection, newSfVolumeId); + } + + private Map getVolumeAttributes(VolumeInfo volumeInfo) { + Map mapAttributes = new HashMap<>(); + + mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId())); + mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(volumeInfo.getSize())); + + return mapAttributes; + } + + private Map getSnapshotAttributes(SnapshotInfo snapshotInfo) { + Map mapAttributes = new HashMap<>(); + + mapAttributes.put(SolidFireUtil.CloudStackSnapshotId, String.valueOf(snapshotInfo.getId())); + mapAttributes.put(SolidFireUtil.CloudStackSnapshotSize, NumberFormat.getInstance().format(snapshotInfo.getSize())); + + return mapAttributes; + } + + private Map getTemplateAttributes(TemplateInfo templateInfo) { + Map mapAttributes = new HashMap<>(); + + mapAttributes.put(SolidFireUtil.CloudStackTemplateId, String.valueOf(templateInfo.getId())); + mapAttributes.put(SolidFireUtil.CloudStackTemplateSize, NumberFormat.getInstance().format(templateInfo.getSize())); + + return mapAttributes; + } + + private SolidFireUtil.SolidFireVolume createCloneFromSnapshot(SolidFireUtil.SolidFireConnection sfConnection, long csSnapshotId, long sfAccountId) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID); + + long sfVolumeId = Long.parseLong(snapshotDetails.getValue()); + + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.SNAPSHOT_ID); + + long sfSnapshotId = Long.parseLong(snapshotDetails.getValue()); + + SolidFireUtil.SolidFireSnapshot sfSnapshot = SolidFireUtil.getSolidFireSnapshot(sfConnection, sfVolumeId, sfSnapshotId); + + long newSfVolumeId = SolidFireUtil.createSolidFireClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfSnapshot.getName(), null); + + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.STORAGE_POOL_ID); + + long storagePoolId = Long.parseLong(snapshotDetails.getValue()); + + final Iops iops = getIops(MIN_IOPS_FOR_TEMP_VOLUME, MAX_IOPS_FOR_TEMP_VOLUME, storagePoolId); + + SolidFireUtil.modifySolidFireVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); + + return SolidFireUtil.getSolidFireVolume(sfConnection, newSfVolumeId); } private void updateVolumeDetails(long volumeId, long sfVolumeSize) { - VolumeDetailVO volumeDetailVo = _volumeDetailsDao.findDetail(volumeId, SolidFireUtil.VOLUME_SIZE); + volumeDetailsDao.removeDetail(volumeId, SolidFireUtil.VOLUME_SIZE); - if (volumeDetailVo == null || volumeDetailVo.getValue() == null) { - volumeDetailVo = new VolumeDetailVO(volumeId, SolidFireUtil.VOLUME_SIZE, String.valueOf(sfVolumeSize), false); + VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, SolidFireUtil.VOLUME_SIZE, String.valueOf(sfVolumeSize), false); - _volumeDetailsDao.persist(volumeDetailVo); - } + volumeDetailsDao.persist(volumeDetailVo); } @Override public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { String errMsg = null; - if (dataObject.getType() == DataObjectType.VOLUME) { - try { - VolumeInfo volumeInfo = (VolumeInfo)dataObject; - long volumeId = volumeInfo.getId(); - - long storagePoolId = dataStore.getId(); - - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); - - deleteSolidFireVolume(sfConnection, volumeInfo); - - _volumeDetailsDao.removeDetails(volumeId); - - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); - - long usedBytes = getUsedBytes(storagePool, volumeId); - - storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); - - _storagePoolDao.update(storagePoolId, storagePool); + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + deleteVolume((VolumeInfo)dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { + deleteSnapshot((SnapshotInfo)dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + deleteTemplate((TemplateInfo)dataObject, dataStore.getId()); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; } - catch (Exception ex) { - s_logger.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume", ex); + } + catch (Exception ex) { + errMsg = ex.getMessage(); - errMsg = ex.getMessage(); - } - } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - // should return null when no error message - errMsg = deleteSnapshot((SnapshotInfo)dataObject, dataStore.getId()); - } else { - errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; + LOGGER.error(errMsg); } CommandResult result = new CommandResult(); @@ -577,49 +811,67 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); - VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); long sfVolumeId = Long.parseLong(volumeVO.getFolder()); long storagePoolId = volumeVO.getPoolId(); - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId); - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); long capacityBytes = storagePool.getCapacityBytes(); - // getUsedBytes(StoragePool) will not include the bytes of the proposed new volume because - // updateSnapshotDetails(long, long, long, long, String) has not yet been called for this new volume + // getUsedBytes(StoragePool) will not include the bytes of the proposed new volume or snapshot because + // updateSnapshotDetails has not yet been called for this new volume or snapshot long usedBytes = getUsedBytes(storagePool); long sfVolumeSize = sfVolume.getTotalSize(); usedBytes += sfVolumeSize; - // For creating a volume, we need to check to make sure a sufficient amount of space remains in the primary storage. - // For the purpose of "charging" these bytes against storage_pool.capacityBytes, we take the full size of the SolidFire volume. + // For creating a volume or a snapshot, we need to check to make sure a sufficient amount of space remains in the primary storage. + // For the purpose of "charging" these bytes against storage_pool.capacity_bytes, we take the full size of the SolidFire volume + // that is serving as the volume the snapshot is of (either a new SolidFire volume or a SolidFire snapshot). if (usedBytes > capacityBytes) { throw new CloudRuntimeException("Insufficient amount of space remains in this primary storage to take a snapshot"); } storagePool.setUsedBytes(usedBytes); - String volumeName = volumeInfo.getName() + "-" + snapshotInfo.getUuid(); - - long sfNewVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, volumeName, sfVolume.getAccountId(), sfVolumeSize, - sfVolume.isEnable512e(), NumberFormat.getInstance().format(volumeInfo.getSize()), sfVolume.getMinIops(), 50000, 75000); - - // Now that we have successfully created a volume, update the space usage in the storage_pool table - // (even though storage_pool.used_bytes is likely no longer in use). - _storagePoolDao.update(storagePoolId, storagePool); - - SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfNewVolumeId); - - updateSnapshotDetails(snapshotInfo.getId(), sfNewVolumeId, storagePoolId, sfVolumeSize, sfNewVolume.getIqn()); - SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO(); - snapshotObjectTo.setPath(String.valueOf(sfNewVolumeId)); + if (shouldTakeSnapshot(snapshotInfo.getId())) { + // We are supposed to take a SolidFire snapshot to serve as the back-end for our CloudStack volume snapshot. + + String sfNewSnapshotName = volumeInfo.getName() + "-" + snapshotInfo.getUuid(); + + long sfNewSnapshotId = SolidFireUtil.createSolidFireSnapshot(sfConnection, sfVolumeId, sfNewSnapshotName, getSnapshotAttributes(snapshotInfo)); + + updateSnapshotDetails(snapshotInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize); + + snapshotObjectTo.setPath("SfSnapshotId=" + sfNewSnapshotId); + } + else { + // We are supposed to create a new SolidFire volume to serve as the back-end for our CloudStack volume snapshot. + + String sfNewVolumeName = volumeInfo.getName() + "-" + snapshotInfo.getUuid(); + + final Iops iops = getIops(MIN_IOPS_FOR_SNAPSHOT_VOLUME, MAX_IOPS_FOR_SNAPSHOT_VOLUME, storagePoolId); + + long sfNewVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, sfNewVolumeName, sfVolume.getAccountId(), sfVolumeSize, + sfVolume.isEnable512e(), getSnapshotAttributes(snapshotInfo), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); + + SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfNewVolumeId); + + updateSnapshotDetails(snapshotInfo.getId(), sfNewVolumeId, storagePoolId, sfVolumeSize, sfNewVolume.getIqn()); + + snapshotObjectTo.setPath("SfVolumeId=" + sfNewVolumeId); + } + + // Now that we have successfully created a volume or a snapshot, update the space usage in the cloud.storage_pool table + // (even though cloud.storage_pool.used_bytes is likely no longer in use). + storagePoolDao.update(storagePoolId, storagePool); CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo); @@ -628,7 +880,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { result.setResult(null); } catch (Exception ex) { - s_logger.debug(SolidFireUtil.LOG_PREFIX + "Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex); + LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex); result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString())); @@ -638,69 +890,293 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { callback.complete(result); } - private void updateSnapshotDetails(long csSnapshotId, long sfNewVolumeId, long storagePoolId, long sfNewVolumeSize, String sfNewVolumeIqn) { + private void updateSnapshotDetails(long csSnapshotId, long sfVolumeId, long sfNewSnapshotId, long storagePoolId, long sfNewVolumeSize) { SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, SolidFireUtil.VOLUME_ID, - String.valueOf(sfNewVolumeId), + String.valueOf(sfVolumeId), false); - _snapshotDetailsDao.persist(snapshotDetail); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + SolidFireUtil.SNAPSHOT_ID, + String.valueOf(sfNewSnapshotId), + false); + + snapshotDetailsDao.persist(snapshotDetail); snapshotDetail = new SnapshotDetailsVO(csSnapshotId, SolidFireUtil.STORAGE_POOL_ID, String.valueOf(storagePoolId), false); - _snapshotDetailsDao.persist(snapshotDetail); + snapshotDetailsDao.persist(snapshotDetail); snapshotDetail = new SnapshotDetailsVO(csSnapshotId, SolidFireUtil.VOLUME_SIZE, String.valueOf(sfNewVolumeSize), false); - _snapshotDetailsDao.persist(snapshotDetail); + snapshotDetailsDao.persist(snapshotDetail); + } + + private void updateSnapshotDetails(long csSnapshotId, long sfNewVolumeId, long storagePoolId, long sfNewVolumeSize, String sfNewVolumeIqn) { + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + SolidFireUtil.VOLUME_ID, + String.valueOf(sfNewVolumeId), + false); + + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + SolidFireUtil.STORAGE_POOL_ID, + String.valueOf(storagePoolId), + false); + + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + SolidFireUtil.VOLUME_SIZE, + String.valueOf(sfNewVolumeSize), + false); + + snapshotDetailsDao.persist(snapshotDetail); snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DiskTO.IQN, sfNewVolumeIqn, false); - _snapshotDetailsDao.persist(snapshotDetail); + snapshotDetailsDao.persist(snapshotDetail); } - // return null for no error message - private String deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) { - String errMsg = null; + private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { + verifySufficientBytesForStoragePool(volumeInfo, storagePoolId); + verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId); - long snapshotId = snapshotInfo.getId(); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); + + long sfAccountId = getCreateSolidFireAccountId(sfConnection, volumeInfo.getAccountId(), storagePoolId); + + long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot"); + long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate"); + + SolidFireUtil.SolidFireVolume sfVolume; + + if (csSnapshotId > 0) { + // We are supposed to create a clone of the underlying volume or snapshot that supports the CloudStack snapshot. + sfVolume = createClone(sfConnection, csSnapshotId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.SNAPSHOT); + } else if (csTemplateId > 0) { + // Clone from template. + sfVolume = createClone(sfConnection, csTemplateId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.TEMPLATE); + + long volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePoolDao.findById(storagePoolId)); + + if (volumeSize > sfVolume.getTotalSize()) { + // Expand the volume to include HSR. + SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolume.getId(), volumeSize, getVolumeAttributes(volumeInfo), + sfVolume.getMinIops(), sfVolume.getMaxIops(), sfVolume.getBurstIops()); + + // Get the SolidFire volume from the SAN again because we just updated its size. + sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolume.getId()); + } + } + else { + sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId); + } + + String iqn = sfVolume.getIqn(); + + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + + volume.set_iScsiName(iqn); + volume.setFolder(String.valueOf(sfVolume.getId())); + volume.setPoolType(StoragePoolType.IscsiLUN); + volume.setPoolId(storagePoolId); + + volumeDao.update(volume.getId(), volume); + + updateVolumeDetails(volume.getId(), sfVolume.getTotalSize()); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long capacityBytes = storagePool.getCapacityBytes(); + // getUsedBytes(StoragePool) will include the bytes of the newly created volume because + // updateVolumeDetails(long, long) has already been called for this volume + long usedBytes = getUsedBytes(storagePool); + + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + + storagePoolDao.update(storagePoolId, storagePool); + + return iqn; + } + + private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { + long csSnapshotId = snapshotInfo.getId(); + + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.SNAPSHOT_ID); + + if (snapshotDetails == null || snapshotDetails.getValue() == null) { + throw new CloudRuntimeException("'createTempVolume(SnapshotInfo, long)' should not be invoked unless " + SolidFireUtil.SNAPSHOT_ID + " exists."); + } + + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); + + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, "tempVolume"); + + if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("create")) { + long sfAccountId = getCreateSolidFireAccountId(sfConnection, snapshotInfo.getAccountId(), storagePoolId); + + SolidFireUtil.SolidFireVolume sfVolume = createCloneFromSnapshot(sfConnection, csSnapshotId, sfAccountId); + + addTempVolumeId(csSnapshotId, String.valueOf(sfVolume.getId())); + + handleSnapshotDetails(csSnapshotId, DiskTO.IQN, sfVolume.getIqn()); + } + else if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("delete")) { + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(snapshotDetails.getValue())); + + removeTempVolumeId(csSnapshotId); + + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DiskTO.IQN); + + snapshotDetailsDao.remove(snapshotDetails.getId()); + } + else { + throw new CloudRuntimeException("Invalid state in 'createTempVolume(SnapshotInfo, long)'"); + } + } + + private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { + verifySufficientBytesForStoragePool(templateInfo, storagePoolId); + + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); + + long sfAccountId = getCreateSolidFireAccountId(sfConnection, templateInfo.getAccountId(), storagePoolId); + + SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(sfConnection, templateInfo, sfAccountId); + + String iqn = sfVolume.getIqn(); + + VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId()); + + templatePoolRef.setInstallPath(iqn); + templatePoolRef.setLocalDownloadPath(Long.toString(sfVolume.getId())); + templatePoolRef.setTemplateSize(sfVolume.getTotalSize()); + + tmpltPoolDao.update(templatePoolRef.getId(), templatePoolRef); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long capacityBytes = storagePool.getCapacityBytes(); + // getUsedBytes(StoragePool) will include the bytes of the newly created template volume because + // _tmpltPoolDao.update(Long, VMTemplateStoragePoolVO) has already been invoked + long usedBytes = getUsedBytes(storagePool); + + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + + storagePoolDao.update(storagePoolId, storagePool); + + return iqn; + } + + private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { + try { + long volumeId = volumeInfo.getId(); + + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); + + deleteSolidFireVolume(sfConnection, volumeInfo); + + volumeDetailsDao.removeDetails(volumeId); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long usedBytes = getUsedBytes(storagePool, volumeId); + + storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); + + storagePoolDao.update(storagePoolId, storagePool); + } + catch (Exception ex) { + LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex); + + throw ex; + } + } + + private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) { + long csSnapshotId = snapshotInfo.getId(); try { - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, SolidFireUtil.VOLUME_ID); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.SNAPSHOT_ID); - long volumeId = Long.parseLong(snapshotDetails.getValue()); + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + // A SolidFire snapshot is being used to support the CloudStack volume snapshot. - SolidFireUtil.deleteSolidFireVolume(sfConnection, volumeId); + long sfSnapshotId = Long.parseLong(snapshotDetails.getValue()); - _snapshotDetailsDao.removeDetails(snapshotId); + deleteSolidFireSnapshot(sfConnection, csSnapshotId, sfSnapshotId); + } + else { + // A SolidFire volume is being used to support the CloudStack volume snapshot. - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID); + + long sfVolumeId = Long.parseLong(snapshotDetails.getValue()); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId); + } + + snapshotDetailsDao.removeDetails(csSnapshotId); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); // getUsedBytes(StoragePool) will not include the snapshot to delete because it has already been deleted by this point long usedBytes = getUsedBytes(storagePool); storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); - _storagePoolDao.update(storagePoolId, storagePool); + storagePoolDao.update(storagePoolId, storagePool); } catch (Exception ex) { - s_logger.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack snapshot ID: " + snapshotId, ex); + LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Issue in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, ex); - errMsg = ex.getMessage(); + throw ex; } + } - return errMsg; + private void deleteTemplate(TemplateInfo template, long storagePoolId) { + try { + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); + + long sfTemplateVolumeId = getVolumeIdFrom_iScsiPath(template.getInstallPath()); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, sfTemplateVolumeId); + + VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, template.getId()); + + tmpltPoolDao.remove(templatePoolRef.getId()); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + // getUsedBytes(StoragePool) will not include the template to delete because the "template_spool_ref" table has already been updated by this point + long usedBytes = getUsedBytes(storagePool); + + storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); + + storagePoolDao.update(storagePoolId, storagePool); + } + catch (Exception ex) { + LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire template volume. CloudStack template ID: " + template.getId(), ex); + + throw ex; + } } @Override @@ -720,26 +1196,49 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { long sfVolumeId = Long.parseLong(volumeInfo.getFolder()); ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); - SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId); verifySufficientIopsForStoragePool(storagePoolId, volumeInfo.getId(), payload.newMinIops); + verifySufficientBytesForStoragePool(storagePoolId, volumeInfo.getId(), payload.newSize, payload.newHypervisorSnapshotReserve); - SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolumeId, sfVolume.getTotalSize(), NumberFormat.getInstance().format(payload.newSize), + long sfNewVolumeSize = sfVolume.getTotalSize(); + + Integer hsr = volumeInfo.getHypervisorSnapshotReserve(); + + if (payload.newSize != null || payload.newHypervisorSnapshotReserve != null) { + if (payload.newHypervisorSnapshotReserve != null) { + if (hsr != null) { + if (payload.newHypervisorSnapshotReserve > hsr) { + hsr = payload.newHypervisorSnapshotReserve; + } + } + else { + hsr = payload.newHypervisorSnapshotReserve; + } + } + + sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(payload.newSize, hsr); + } + + Map mapAttributes = new HashMap<>(); + + mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId())); + mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize)); + + SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes, payload.newMinIops, payload.newMaxIops, getDefaultBurstIops(storagePoolId, payload.newMaxIops)); - VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); volume.setMinIops(payload.newMinIops); volume.setMaxIops(payload.newMaxIops); + volume.setHypervisorSnapshotReserve(hsr); - _volumeDao.update(volume.getId(), volume); + volumeDao.update(volume.getId(), volume); // SolidFireUtil.VOLUME_SIZE was introduced in 4.5. - // To be backward compatible with releases prior to 4.5, call updateVolumeDetails here. - // That way if SolidFireUtil.VOLUME_SIZE wasn't put in the volume_details table when the - // volume was initially created, it can be placed in volume_details if the volume is resized. - updateVolumeDetails(volume.getId(), sfVolume.getTotalSize()); + updateVolumeDetails(volume.getId(), sfNewVolumeSize); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; } @@ -751,21 +1250,140 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { callback.complete(result); } + private void verifySufficientBytesForStoragePool(long requestedBytes, long storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = getUsedBytes(storagePool); + + usedBytes += requestedBytes; + + if (usedBytes > capacityBytes) { + throw new CloudRuntimeException("Insufficient amount of space remains in this primary storage"); + } + } + + private void verifySufficientBytesForStoragePool(DataObject dataObject, long storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long requestedBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(dataObject, storagePool); + + verifySufficientBytesForStoragePool(requestedBytes, storagePoolId); + } + + private void verifySufficientBytesForStoragePool(long storagePoolId, long volumeId, long newSize, Integer newHypervisorSnapshotReserve) { + DataStore primaryDataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + VolumeInfo volumeInfo = volumeFactory.getVolume(volumeId, primaryDataStore); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + long currentSizeWithHsr = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePool); + + newHypervisorSnapshotReserve = newHypervisorSnapshotReserve == null ? LOWEST_HYPERVISOR_SNAPSHOT_RESERVE : + Math.max(newHypervisorSnapshotReserve, LOWEST_HYPERVISOR_SNAPSHOT_RESERVE); + + long newSizeWithHsr = (long)(newSize + newSize * (newHypervisorSnapshotReserve / 100f)); + + if (newSizeWithHsr < currentSizeWithHsr) { + throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not support shrinking a volume."); + } + + long availableBytes = storagePool.getCapacityBytes() - getUsedBytes(storagePool); + + if ((newSizeWithHsr - currentSizeWithHsr) > availableBytes) { + throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume."); + } + } + + private void verifySufficientIopsForStoragePool(long requestedIops, long storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + long usedIops = getUsedIops(storagePool); + long capacityIops = storagePool.getCapacityIops(); + + if (usedIops + requestedIops > capacityIops) { + throw new CloudRuntimeException("Insufficient number of IOPS available in this storage pool"); + } + } + private void verifySufficientIopsForStoragePool(long storagePoolId, long volumeId, long newMinIops) { - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); - VolumeVO volume = _volumeDao.findById(volumeId); + VolumeVO volume = volumeDao.findById(volumeId); long currentMinIops = volume.getMinIops(); long diffInMinIops = newMinIops - currentMinIops; // if the desire is for more IOPS if (diffInMinIops > 0) { - long usedIops = getUsedIops(storagePool); - long capacityIops = storagePool.getCapacityIops(); + verifySufficientIopsForStoragePool(diffInMinIops, storagePoolId); + } + } - if (usedIops + diffInMinIops > capacityIops) { - throw new CloudRuntimeException("Insufficient number of IOPS available in this storage pool"); + private void deleteSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, long csVolumeId, long sfVolumeId) { + List lstSnapshots = getNonDestroyedSnapshots(csVolumeId); + + boolean deleteVolume = true; + + for (SnapshotVO snapshot : lstSnapshots) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.SNAPSHOT_ID); + + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + deleteVolume = false; + + break; + } + } + + if (deleteVolume) { + SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId); + } + } + + private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnection, long csSnapshotId, long sfSnapshotId) { + SolidFireUtil.deleteSolidFireSnapshot(sfConnection, sfSnapshotId); + + SnapshotVO snapshot = snapshotDao.findById(csSnapshotId); + VolumeVO volume = volumeDao.findById(snapshot.getVolumeId()); + + if (volume == null) { // if the CloudStack volume has been deleted + List lstSnapshots = getNonDestroyedSnapshots(snapshot.getVolumeId()); + + List lstSnapshots2 = new ArrayList<>(); + + for (SnapshotVO snapshotVo : lstSnapshots) { + // The CloudStack volume snapshot has not yet been set to the DESTROYED state, so check to make + // sure snapshotVo.getId() != csSnapshotId when determining if any volume snapshots remain for the given CloudStack volume. + if (snapshotVo.getId() != csSnapshotId) { + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotVo.getId(), SolidFireUtil.SNAPSHOT_ID); + + // We are only interested here in volume snapshots that make use of SolidFire snapshots (as opposed to ones + // that make use of SolidFire volumes). + if (snapshotDetails != null && snapshotDetails.getValue() != null) { + lstSnapshots2.add(snapshotVo); + } + } + } + + if (lstSnapshots2.isEmpty()) { + volume = volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId()); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volume.getFolder())); } } } + + private List getNonDestroyedSnapshots(long csVolumeId) { + List lstSnapshots = snapshotDao.listByVolumeId(csVolumeId); + + if (lstSnapshots == null) { + lstSnapshots = new ArrayList<>(); + } + + List lstSnapshots2 = new ArrayList<>(); + + for (SnapshotVO snapshot : lstSnapshots) { + if (!State.Destroyed.equals(snapshot.getState())) { + lstSnapshots2.add(snapshot); + } + } + + return lstSnapshots2; + } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index f89c97a8999..c47411e39a3 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCy import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; @@ -49,23 +50,27 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.SnapshotVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class); @Inject private CapacityManager _capacityMgr; - @Inject private DataCenterDao zoneDao; - @Inject private PrimaryDataStoreDao storagePoolDao; - @Inject private PrimaryDataStoreHelper dataStoreHelper; + @Inject private DataCenterDao _zoneDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private PrimaryDataStoreHelper _dataStoreHelper; @Inject private ResourceManager _resourceMgr; @Inject private SnapshotDao _snapshotDao; @Inject private SnapshotDetailsDao _snapshotDetailsDao; @Inject private StorageManager _storageMgr; - @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject private StoragePoolAutomation _storagePoolAutomation; + @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private VMTemplatePoolDao _tmpltPoolDao; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -83,7 +88,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC String storageVip = SolidFireUtil.getStorageVip(url); int storagePort = SolidFireUtil.getStoragePort(url); - DataCenterVO zone = zoneDao.findById(zoneId); + DataCenterVO zone = _zoneDao.findById(zoneId); String uuid = SolidFireUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip; @@ -179,7 +184,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC details.put(SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS, String.valueOf(fClusterDefaultBurstIopsPercentOfMaxIops)); // this adds a row in the cloud.storage_pool table for this SolidFire cluster - return dataStoreHelper.createPrimaryDataStore(parameters); + return _dataStoreHelper.createPrimaryDataStore(parameters); } // do not implement this method for SolidFire's plug-in @@ -196,7 +201,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - dataStoreHelper.attachZone(dataStore); + _dataStoreHelper.attachZone(dataStore); List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); @@ -220,23 +225,25 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC @Override public boolean maintain(DataStore dataStore) { - storagePoolAutomation.maintain(dataStore); - dataStoreHelper.maintain(dataStore); + _storagePoolAutomation.maintain(dataStore); + _dataStoreHelper.maintain(dataStore); return true; } @Override public boolean cancelMaintain(DataStore store) { - dataStoreHelper.cancelMaintain(store); - storagePoolAutomation.cancelMaintain(store); + _dataStoreHelper.cancelMaintain(store); + _storagePoolAutomation.cancelMaintain(store); return true; } // invoked to delete primary storage that is based on the SolidFire plug-in @Override - public boolean deleteDataStore(DataStore store) { + public boolean deleteDataStore(DataStore dataStore) { + long storagePoolId = dataStore.getId(); + List lstSnapshots = _snapshotDao.listAll(); if (lstSnapshots != null) { @@ -244,13 +251,39 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.STORAGE_POOL_ID); // if this snapshot belongs to the storagePool that was passed in - if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == store.getId()) { + if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePoolId) { throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots."); } } } - return dataStoreHelper.deletePrimaryDataStore(store); + List lstTemplatePoolRefs = _tmpltPoolDao.listByPoolId(storagePoolId); + + if (lstTemplatePoolRefs != null) { + for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) { + try { + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + long sfTemplateVolumeId = Long.parseLong(templatePoolRef.getLocalDownloadPath()); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, sfTemplateVolumeId); + } + catch (Exception ex) { + s_logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume"); + } + + _tmpltPoolDao.remove(templatePoolRef.getId()); + } + } + + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + + storagePool.setUsedBytes(0); + + _storagePoolDao.update(storagePoolId, storagePool); + + _storagePoolDetailsDao.removeDetails(storagePoolId); + + return _dataStoreHelper.deletePrimaryDataStore(dataStore); } /* (non-Javadoc) @@ -263,7 +296,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC @Override public void updateStoragePool(StoragePool storagePool, Map details) { - StoragePoolVO storagePoolVo = storagePoolDao.findById(storagePool.getId()); + StoragePoolVO storagePoolVo = _storagePoolDao.findById(storagePool.getId()); String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES); Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null; @@ -290,11 +323,11 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC @Override public void enableStoragePool(DataStore dataStore) { - dataStoreHelper.enable(dataStore); + _dataStoreHelper.enable(dataStore); } @Override public void disableStoragePool(DataStore dataStore) { - dataStoreHelper.disable(dataStore); + _dataStoreHelper.disable(dataStore); } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index e505cd07ba5..f88041a3c49 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -52,24 +52,24 @@ import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireSharedHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(SolidFireSharedHostListener.class); + private static final Logger LOGGER = Logger.getLogger(SolidFireSharedHostListener.class); - @Inject private AgentManager _agentMgr; - @Inject private AlertManager _alertMgr; - @Inject private ClusterDao _clusterDao; - @Inject private ClusterDetailsDao _clusterDetailsDao; - @Inject private DataStoreManager _dataStoreMgr; - @Inject private HostDao _hostDao; - @Inject private PrimaryDataStoreDao _storagePoolDao; - @Inject private StoragePoolHostDao _storagePoolHostDao; - @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private AgentManager agentMgr; + @Inject private AlertManager alertMgr; + @Inject private ClusterDao clusterDao; + @Inject private ClusterDetailsDao clusterDetailsDao; + @Inject private DataStoreManager dataStoreMgr; + @Inject private HostDao hostDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private StoragePoolHostDao storagePoolHostDao; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Override public boolean hostAdded(long hostId) { - HostVO host = _hostDao.findById(hostId); + HostVO host = hostDao.findById(hostId); SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME, - _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao); handleVMware(hostId, true); @@ -78,37 +78,37 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { @Override public boolean hostConnect(long hostId, long storagePoolId) { - StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + StoragePool storagePool = (StoragePool) dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); - StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); if (storagePoolHost != null) { storagePoolHost.setLocalPath(answer.getPoolInfo().getLocalPath().replaceAll("//", "/")); } else { storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, answer.getPoolInfo().getLocalPath().replaceAll("//", "/")); - _storagePoolHostDao.persist(storagePoolHost); + storagePoolHostDao.persist(storagePoolHost); } - StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId); storagePoolVO.setCapacityBytes(answer.getPoolInfo().getCapacityBytes()); storagePoolVO.setUsedBytes(answer.getPoolInfo().getCapacityBytes() - answer.getPoolInfo().getAvailableBytes()); - _storagePoolDao.update(storagePoolId, storagePoolVO); + storagePoolDao.update(storagePoolId, storagePoolVO); return true; } @Override public boolean hostDisconnected(long hostId, long storagePoolId) { - StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); if (storagePoolHost != null) { - _storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId); + storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId); } return true; @@ -124,16 +124,16 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { @Override public boolean hostRemoved(long hostId, long clusterId) { SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME, - _clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao); + clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao); return true; } private void handleVMware(long hostId, boolean add) { - HostVO host = _hostDao.findById(hostId); + HostVO host = hostDao.findById(hostId); if (HypervisorType.VMware.equals(host.getHypervisorType())) { - List storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME); + List storagePools = storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME); if (storagePools != null && storagePools.size() > 0) { List> targets = new ArrayList<>(); @@ -142,15 +142,15 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { if (storagePool.getClusterId().equals(host.getClusterId())) { long storagePoolId = storagePool.getId(); - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN); + StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN); String iqn = storagePoolDetail.getValue(); - storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP); + storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP); String sVip = storagePoolDetail.getValue(); - storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT); + storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT); String sPort = storagePoolDetail.getValue(); @@ -177,7 +177,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { } private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); @@ -186,16 +186,16 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { if (!answer.getResult()) { String msg = "Unable to modify targets on the following host: " + hostId; - HostVO host = _hostDao.findById(hostId); + HostVO host = hostDao.findById(hostId); - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); + alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); throw new CloudRuntimeException(msg); } } private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId()); @@ -204,7 +204,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { if (!answer.getResult()) { String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId; - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); throw new CloudRuntimeException(msg); } @@ -212,7 +212,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener { assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + storagePool.getId() + "; Host = " + hostId; - s_logger.info("Connection established between storage pool " + storagePool + " and host " + hostId); + LOGGER.info("Connection established between storage pool " + storagePool + " and host " + hostId); return (ModifyStoragePoolAnswer)answer; } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index 7268e72914a..a9c12271206 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -30,6 +30,7 @@ import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -55,6 +56,7 @@ import org.apache.log4j.Logger; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; @@ -104,6 +106,15 @@ public class SolidFireUtil { public static final String ACCOUNT_ID = "accountId"; public static final String VOLUME_ID = "volumeId"; + public static final String TEMP_VOLUME_ID = "tempVolumeId"; + public static final String SNAPSHOT_ID = "snapshotId"; + + public static final String CloudStackVolumeId = "CloudStackVolumeId"; + public static final String CloudStackVolumeSize = "CloudStackVolumeSize"; + public static final String CloudStackSnapshotId = "CloudStackSnapshotId"; + public static final String CloudStackSnapshotSize = "CloudStackSnapshotSize"; + public static final String CloudStackTemplateId = "CloudStackTemplateId"; + public static final String CloudStackTemplateSize = "CloudStackTemplateSize"; public static final String VOLUME_SIZE = "sfVolumeSize"; @@ -562,13 +573,44 @@ public class SolidFireUtil { } public static long createSolidFireVolume(SolidFireConnection sfConnection, String strSfVolumeName, long lSfAccountId, long lTotalSize, - boolean bEnable512e, String strCloudStackVolumeSize, long minIops, long maxIops, long burstIops) + boolean bEnable512e, Map mapAttributes, long minIops, long maxIops, long burstIops) { - final Gson gson = new GsonBuilder().create(); + JsonObject volumeToCreate = new JsonObject(); - Object volumeToCreate = strCloudStackVolumeSize != null && strCloudStackVolumeSize.trim().length() > 0 ? - new VolumeToCreateWithCloudStackVolumeSize(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, strCloudStackVolumeSize, minIops, maxIops, burstIops) : - new VolumeToCreate(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, minIops, maxIops, burstIops); + volumeToCreate.addProperty("method", "CreateVolume"); + + JsonObject params = new JsonObject(); + + volumeToCreate.add("params", params); + + params.addProperty("name", strSfVolumeName); + params.addProperty("accountID", lSfAccountId); + params.addProperty("totalSize", lTotalSize); + params.addProperty("enable512e", bEnable512e); + + JsonObject qos = new JsonObject(); + + params.add("qos", qos); + + qos.addProperty("minIOPS", minIops); + qos.addProperty("maxIOPS", maxIops); + qos.addProperty("burstIOPS", burstIops); + + if (mapAttributes != null && mapAttributes.size() > 0) { + JsonObject attributes = new JsonObject(); + + params.add("attributes", attributes); + + Iterator> itr = mapAttributes.entrySet().iterator(); + + while (itr.hasNext()) { + Map.Entry pair = itr.next(); + + attributes.addProperty(pair.getKey(), pair.getValue()); + } + } + + final Gson gson = new GsonBuilder().create(); String strVolumeToCreateJson = gson.toJson(volumeToCreate); @@ -581,14 +623,46 @@ public class SolidFireUtil { return volumeCreateResult.result.volumeID; } - public static void modifySolidFireVolume(SolidFireConnection sfConnection, long volumeId, long totalSize, String strCloudStackVolumeSize, + public static void modifySolidFireVolume(SolidFireConnection sfConnection, long volumeId, Long totalSize, Map mapAttributes, long minIops, long maxIops, long burstIops) { - final Gson gson = new GsonBuilder().create(); + JsonObject volumeToModify = new JsonObject(); - Object volumeToModify = strCloudStackVolumeSize != null && strCloudStackVolumeSize.trim().length() > 0 ? - new VolumeToModifyWithCloudStackVolumeSize(volumeId, totalSize, strCloudStackVolumeSize, minIops, maxIops, burstIops) : - new VolumeToModify(volumeId, totalSize, minIops, maxIops, burstIops); + volumeToModify.addProperty("method", "ModifyVolume"); + + JsonObject params = new JsonObject(); + + volumeToModify.add("params", params); + + params.addProperty("volumeID", volumeId); + + if (totalSize != null) { + params.addProperty("totalSize", totalSize); + } + + JsonObject qos = new JsonObject(); + + params.add("qos", qos); + + qos.addProperty("minIOPS", minIops); + qos.addProperty("maxIOPS", maxIops); + qos.addProperty("burstIOPS", burstIops); + + if (mapAttributes != null && mapAttributes.size() > 0) { + JsonObject attributes = new JsonObject(); + + params.add("attributes", attributes); + + Iterator> itr = mapAttributes.entrySet().iterator(); + + while (itr.hasNext()) { + Map.Entry pair = itr.next(); + + attributes.addProperty(pair.getKey(), pair.getValue()); + } + } + + final Gson gson = new GsonBuilder().create(); String strVolumeToModifyJson = gson.toJson(volumeToModify); @@ -687,7 +761,7 @@ public class SolidFireUtil { executeJsonRpc(sfConnection, strVolumeToDeleteJson); } - public static void purgeSolidFireVolume(SolidFireConnection sfConnection, long lVolumeId) + public static void purgeSolidFireVolume(SolidFireConnection sfConnection, long lVolumeId) { final Gson gson = new GsonBuilder().create(); @@ -800,10 +874,51 @@ public class SolidFireUtil { } } - public static long createSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, String snapshotName) { - final Gson gson = new GsonBuilder().create(); + public static class SolidFireSnapshot { + private final long _id; + private final String _name; - SnapshotToCreate snapshotToCreate = new SnapshotToCreate(lVolumeId, snapshotName); + public SolidFireSnapshot(long id, String name) { + _id = id; + _name = name; + } + + public long getId() { + return _id; + } + + public String getName() { + return _name; + } + } + + public static long createSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, String snapshotName, Map mapAttributes) { + JsonObject snapshotToCreate = new JsonObject(); + + snapshotToCreate.addProperty("method", "CreateSnapshot"); + + JsonObject params = new JsonObject(); + + snapshotToCreate.add("params", params); + + params.addProperty("volumeID", lVolumeId); + params.addProperty("name", snapshotName); + + if (mapAttributes != null && mapAttributes.size() > 0) { + JsonObject attributes = new JsonObject(); + + params.add("attributes", attributes); + + Iterator> itr = mapAttributes.entrySet().iterator(); + + while (itr.hasNext()) { + Map.Entry pair = itr.next(); + + attributes.addProperty(pair.getKey(), pair.getValue()); + } + } + + final Gson gson = new GsonBuilder().create(); String strSnapshotToCreateJson = gson.toJson(snapshotToCreate); @@ -816,6 +931,38 @@ public class SolidFireUtil { return snapshotCreateResult.result.snapshotID; } + public static SolidFireSnapshot getSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId) { + final Gson gson = new GsonBuilder().create(); + + SnapshotsToGet snapshotsToGet = new SnapshotsToGet(lVolumeId); + + String strSnapshotsToGetJson = gson.toJson(snapshotsToGet); + + String strSnapshotsGetResultJson = executeJsonRpc(sfConnection, strSnapshotsToGetJson); + + SnapshotsGetResult snapshotsGetResult = gson.fromJson(strSnapshotsGetResultJson, SnapshotsGetResult.class); + + verifyResult(snapshotsGetResult.result, strSnapshotsGetResultJson, gson); + + String snapshotName = null; + + if (snapshotsGetResult.result.snapshots != null) { + for (SnapshotsGetResult.Result.Snapshot snapshot : snapshotsGetResult.result.snapshots) { + if (snapshot.snapshotID == lSnapshotId) { + snapshotName = snapshot.name; + + break; + } + } + } + + if (snapshotName == null) { + throw new CloudRuntimeException("Could not find SolidFire snapshot ID: " + lSnapshotId + " for the following SolidFire volume ID: " + lVolumeId); + } + + return new SolidFireSnapshot(lSnapshotId, snapshotName); + } + public static void deleteSolidFireSnapshot(SolidFireConnection sfConnection, long lSnapshotId) { final Gson gson = new GsonBuilder().create(); @@ -841,10 +988,40 @@ public class SolidFireUtil { verifyResult(rollbackInitiatedResult.result, strRollbackInitiatedResultJson, gson); } - public static long createSolidFireClone(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId, String cloneName) { - final Gson gson = new GsonBuilder().create(); + public static long createSolidFireClone(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId, long sfAccountId, + String cloneName, Map mapAttributes) { + JsonObject cloneToCreate = new JsonObject(); - CloneToCreate cloneToCreate = new CloneToCreate(lVolumeId, lSnapshotId, cloneName); + cloneToCreate.addProperty("method", "CloneVolume"); + + JsonObject params = new JsonObject(); + + cloneToCreate.add("params", params); + + params.addProperty("volumeID", lVolumeId); + + if (lSnapshotId > 0) { + params.addProperty("snapshotID", lSnapshotId); + } + + params.addProperty("newAccountID", sfAccountId); + params.addProperty("name", cloneName); + + if (mapAttributes != null && mapAttributes.size() > 0) { + JsonObject attributes = new JsonObject(); + + params.add("attributes", attributes); + + Iterator> itr = mapAttributes.entrySet().iterator(); + + while (itr.hasNext()) { + Map.Entry pair = itr.next(); + + attributes.addProperty(pair.getKey(), pair.getValue()); + } + } + + final Gson gson = new GsonBuilder().create(); String strCloneToCreateJson = gson.toJson(cloneToCreate); @@ -854,7 +1031,33 @@ public class SolidFireUtil { verifyResult(cloneCreateResult.result, strCloneCreateResultJson, gson); - return cloneCreateResult.result.cloneID; + // Clone is an async operation. Poll until we get data. + + AsyncJobToPoll asyncJobToPoll = new AsyncJobToPoll(cloneCreateResult.result.asyncHandle); + + String strAsyncJobToPollJson = gson.toJson(asyncJobToPoll); + + do { + String strAsyncJobResultJson = executeJsonRpc(sfConnection, strAsyncJobToPollJson); + + AsyncJobResult asyncJobResult = gson.fromJson(strAsyncJobResultJson, AsyncJobResult.class); + + verifyResult(asyncJobResult.result, strAsyncJobResultJson, gson); + + if (asyncJobResult.result.status.equals("complete")) { + break; + } + + try { + Thread.sleep(500); // sleep for 1/2 of a second + } + catch (Exception ex) { + // ignore + } + } + while (true); + + return cloneCreateResult.result.volumeID; } public static long createSolidFireAccount(SolidFireConnection sfConnection, String strAccountName) @@ -1134,189 +1337,6 @@ public class SolidFireUtil { } } - @SuppressWarnings("unused") - private static final class VolumeToCreateWithCloudStackVolumeSize { - private final String method = "CreateVolume"; - private final VolumeToCreateParams params; - - private VolumeToCreateWithCloudStackVolumeSize(final String strVolumeName, final long lAccountId, final long lTotalSize, - final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToCreateParams { - private final String name; - private final long accountID; - private final long totalSize; - private final boolean enable512e; - private final VolumeToCreateParamsAttributes attributes; - private final VolumeToCreateParamsQoS qos; - - private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e, - final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - name = strVolumeName; - accountID = lAccountId; - totalSize = lTotalSize; - enable512e = bEnable512e; - - attributes = new VolumeToCreateParamsAttributes(strCloudStackVolumeSize); - qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToCreateParamsAttributes { - private final String CloudStackVolumeSize; - - private VolumeToCreateParamsAttributes(final String strCloudStackVolumeSize) { - CloudStackVolumeSize = strCloudStackVolumeSize; - } - } - - private static final class VolumeToCreateParamsQoS { - private final long minIOPS; - private final long maxIOPS; - private final long burstIOPS; - - private VolumeToCreateParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - minIOPS = lMinIOPS; - maxIOPS = lMaxIOPS; - burstIOPS = lBurstIOPS; - } - } - } - } - - @SuppressWarnings("unused") - private static final class VolumeToCreate { - private final String method = "CreateVolume"; - private final VolumeToCreateParams params; - - private VolumeToCreate(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e, - final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToCreateParams { - private final String name; - private final long accountID; - private final long totalSize; - private final boolean enable512e; - private final VolumeToCreateParamsQoS qos; - - private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e, - final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - name = strVolumeName; - accountID = lAccountId; - totalSize = lTotalSize; - enable512e = bEnable512e; - - qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToCreateParamsQoS { - private final long minIOPS; - private final long maxIOPS; - private final long burstIOPS; - - private VolumeToCreateParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - minIOPS = lMinIOPS; - maxIOPS = lMaxIOPS; - burstIOPS = lBurstIOPS; - } - } - } - } - - @SuppressWarnings("unused") - private static final class VolumeToModifyWithCloudStackVolumeSize - { - private final String method = "ModifyVolume"; - private final VolumeToModifyParams params; - - private VolumeToModifyWithCloudStackVolumeSize(final long lVolumeId, final long lTotalSize, final String strCloudStackVolumeSize, - final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) - { - params = new VolumeToModifyParams(lVolumeId, lTotalSize, strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToModifyParams - { - private final long volumeID; - private final long totalSize; - private final VolumeToModifyParamsAttributes attributes; - private final VolumeToModifyParamsQoS qos; - - private VolumeToModifyParams(final long lVolumeId, final long lTotalSize, String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) - { - volumeID = lVolumeId; - - totalSize = lTotalSize; - - attributes = new VolumeToModifyParamsAttributes(strCloudStackVolumeSize); - qos = new VolumeToModifyParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); - } - } - - private static final class VolumeToModifyParamsAttributes { - private final String CloudStackVolumeSize; - - private VolumeToModifyParamsAttributes(final String strCloudStackVolumeSize) { - CloudStackVolumeSize = strCloudStackVolumeSize; - } - } - - private static final class VolumeToModifyParamsQoS { - private final long minIOPS; - private final long maxIOPS; - private final long burstIOPS; - - private VolumeToModifyParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - minIOPS = lMinIOPS; - maxIOPS = lMaxIOPS; - burstIOPS = lBurstIOPS; - } - } - } - - @SuppressWarnings("unused") - private static final class VolumeToModify - { - private final String method = "ModifyVolume"; - private final VolumeToModifyParams params; - - private VolumeToModify(final long lVolumeId, final long lTotalSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) - { - params = new VolumeToModifyParams(lVolumeId, lTotalSize, lMinIOPS, lMaxIOPS, lBurstIOPS); - } - - private static final class VolumeToModifyParams - { - private final long volumeID; - private final long totalSize; - private final VolumeToModifyParamsQoS qos; - - private VolumeToModifyParams(final long lVolumeId, final long lTotalSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) - { - volumeID = lVolumeId; - - totalSize = lTotalSize; - - qos = new VolumeToModifyParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); - } - } - - private static final class VolumeToModifyParamsQoS { - private final long minIOPS; - private final long maxIOPS; - private final long burstIOPS; - - private VolumeToModifyParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { - minIOPS = lMinIOPS; - maxIOPS = lMaxIOPS; - burstIOPS = lBurstIOPS; - } - } - } - @SuppressWarnings("unused") private static final class VolumeToGet { @@ -1407,21 +1427,20 @@ public class SolidFireUtil { } @SuppressWarnings("unused") - private static final class SnapshotToCreate { - private final String method = "CreateSnapshot"; - private final SnapshotToCreateParams params; + private static final class SnapshotsToGet + { + private final String method = "ListSnapshots"; + private final SnapshotsToGetParams params; - private SnapshotToCreate(final long lVolumeId, final String snapshotName) { - params = new SnapshotToCreateParams(lVolumeId, snapshotName); + private SnapshotsToGet(final long lVolumeId) { + params = new SnapshotsToGetParams(lVolumeId); } - private static final class SnapshotToCreateParams { + private static final class SnapshotsToGetParams { private final long volumeID; - private final String name; - private SnapshotToCreateParams(final long lVolumeId, final String snapshotName) { + private SnapshotsToGetParams(final long lVolumeId) { volumeID = lVolumeId; - name = snapshotName; } } } @@ -1465,28 +1484,6 @@ public class SolidFireUtil { } } - @SuppressWarnings("unused") - private static final class CloneToCreate { - private final String method = "CloneVolume"; - private final CloneToCreateParams params; - - private CloneToCreate(final long lVolumeId, final long lSnapshotId, final String cloneName) { - params = new CloneToCreateParams(lVolumeId, lSnapshotId, cloneName); - } - - private static final class CloneToCreateParams { - private final long volumeID; - private final long snapshotID; - private final String name; - - private CloneToCreateParams(final long lVolumeId, final long lSnapshotId, final String cloneName) { - volumeID = lVolumeId; - snapshotID = lSnapshotId; - name = cloneName; - } - } - } - @SuppressWarnings("unused") private static final class AccountToAdd { @@ -1680,6 +1677,28 @@ public class SolidFireUtil { } } + @SuppressWarnings("unused") + private static final class AsyncJobToPoll + { + private final String method = "GetAsyncResult"; + private final AsyncJobToPollParams params; + + private AsyncJobToPoll(final long asyncHandle) + { + params = new AsyncJobToPollParams(asyncHandle); + } + + private static final class AsyncJobToPollParams + { + private final long asyncHandle; + + private AsyncJobToPollParams(final long asyncHandle) + { + this.asyncHandle = asyncHandle; + } + } + } + private static final class VolumeCreateResult { private Result result; @@ -1721,6 +1740,19 @@ public class SolidFireUtil { } } + private static final class SnapshotsGetResult { + private Result result; + + private static final class Result { + private Snapshot[] snapshots; + + private static final class Snapshot { + private long snapshotID; + private String name; + } + } + } + @SuppressWarnings("unused") private static final class RollbackInitiatedResult { private Result result; @@ -1734,7 +1766,8 @@ public class SolidFireUtil { private Result result; private static final class Result { - private long cloneID; + private long volumeID; + private long asyncHandle; } } @@ -1786,6 +1819,15 @@ public class SolidFireUtil { } } + private static final class AsyncJobResult { + private AsyncResult result; + + private static final class AsyncResult + { + private String status; + } + } + private static final class JsonError { private Error error; diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 13794c76361..d0ae3e99695 100644 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -549,28 +549,35 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return getUsedBytes(pool); } else { - // Get size for all the non-destroyed volumes + // Get size for all the non-destroyed volumes. Pair sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId()); totalAllocatedSize = sizes.second() + sizes.first() * _extraBytesPerVolume; } - // Get size for VM Snapshots - totalAllocatedSize = totalAllocatedSize + _volumeDao.getVMSnapshotSizeByPool(pool.getId()); + // Get size for VM Snapshots. + totalAllocatedSize += _volumeDao.getVMSnapshotSizeByPool(pool.getId()); - // Iterate through all templates on this storage pool - boolean tmpinstalled = false; - List templatePoolVOs; - templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId()); + boolean tmpInstalled = false; + // Iterate through all templates on this storage pool. + List templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId()); for (VMTemplateStoragePoolVO templatePoolVO : templatePoolVOs) { - if ((templateForVmCreation != null) && !tmpinstalled && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) { - tmpinstalled = true; + if ((templateForVmCreation != null) && !tmpInstalled && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) { + tmpInstalled = true; } + long templateSize = templatePoolVO.getTemplateSize(); + totalAllocatedSize += templateSize + _extraBytesPerVolume; } + if ((templateForVmCreation != null) && !tmpInstalled) { + long templateForVmCreationSize = templateForVmCreation.getSize() != null ? templateForVmCreation.getSize() : 0; + + totalAllocatedSize += templateForVmCreationSize + _extraBytesPerVolume; + } + return totalAllocatedSize; } diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 2b3358aee8e..ef0ad19079f 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1234,7 +1234,8 @@ StateListener { requestVolumes = new ArrayList(); requestVolumes.add(vol); - if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) + if (!_storageMgr.storagePoolHasEnoughIops(requestVolumes, potentialSPool) || + !_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool, potentialHost.getClusterId())) continue; volumeAllocationMap.put(potentialSPool, requestVolumes); } diff --git a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 4bdb2bc1d47..2aa9b04b2ae 100644 --- a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -44,9 +44,11 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.ConnectionException; +import com.cloud.host.DetailVO; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; @@ -69,6 +71,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo @Inject HostDao _hostDao = null; @Inject + private HostDetailsDao hostDetailsDao; + @Inject HostPodDao _podDao = null; @Inject DataCenterDetailsDao _zoneDetailsDao = null; @@ -319,6 +323,25 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo host.setHypervisorType(hyType); host.setHypervisorVersion(scc.getHypervisorVersion()); + updateHostDetails(host, scc); + } + + private void updateHostDetails(HostVO host, StartupRoutingCommand startupRoutingCmd) { + final String name = "supportsResign"; + final String value = String.valueOf(startupRoutingCmd.getSupportsClonedVolumes()); + + DetailVO hostDetail = hostDetailsDao.findDetail(host.getId(), name); + + if (hostDetail != null) { + hostDetail.setValue(value); + + hostDetailsDao.update(hostDetail.getId(), hostDetail); + } + else { + hostDetail = new DetailVO(host.getId(), name, value); + + hostDetailsDao.persist(hostDetail); + } } private boolean checkCIDR(Host.Type type, HostPodVO pod, String serverPrivateIP, String serverPrivateNetmask) { diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index d6333b96fe0..89893a2f806 100644 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -1734,6 +1734,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, _hostDao.update(host.getId(), host); } + if (startup instanceof StartupRoutingCommand) { + final StartupRoutingCommand ssCmd = (StartupRoutingCommand)startup; + + updateSupportsClonedVolumes(host, ssCmd.getSupportsClonedVolumes()); + } + try { resourceStateTransitTo(host, ResourceState.Event.InternalCreated, _nodeId); /* Agent goes to Connecting status */ @@ -1751,6 +1757,64 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return host; } + private void updateSupportsClonedVolumes(HostVO host, boolean supportsClonedVolumes) { + final String name = "supportsResign"; + + DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), name); + + if (hostDetail != null) { + if (supportsClonedVolumes) { + hostDetail.setValue(Boolean.TRUE.toString()); + + _hostDetailsDao.update(hostDetail.getId(), hostDetail); + } + else { + _hostDetailsDao.remove(hostDetail.getId()); + } + } + else { + if (supportsClonedVolumes) { + hostDetail = new DetailVO(host.getId(), name, Boolean.TRUE.toString()); + + _hostDetailsDao.persist(hostDetail); + } + } + + boolean clusterSupportsResigning = true; + + List hostVOs = _hostDao.findByClusterId(host.getClusterId()); + + for (HostVO hostVO : hostVOs) { + DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostVO.getId(), name); + + if (hostDetailVO == null || Boolean.parseBoolean(hostDetailVO.getValue()) == false) { + clusterSupportsResigning = false; + + break; + } + } + + ClusterDetailsVO clusterDetailsVO = _clusterDetailsDao.findDetail(host.getClusterId(), name); + + if (clusterDetailsVO != null) { + if (clusterSupportsResigning) { + clusterDetailsVO.setValue(Boolean.TRUE.toString()); + + _clusterDetailsDao.update(clusterDetailsVO.getId(), clusterDetailsVO); + } + else { + _clusterDetailsDao.remove(clusterDetailsVO.getId()); + } + } + else { + if (clusterSupportsResigning) { + clusterDetailsVO = new ClusterDetailsVO(host.getClusterId(), name, Boolean.TRUE.toString()); + + _clusterDetailsDao.persist(clusterDetailsVO); + } + } + } + private boolean isFirstHostInCluster(final HostVO host) { boolean isFirstHost = true; if (host.getClusterId() != null) { diff --git a/server/src/com/cloud/storage/ResizeVolumePayload.java b/server/src/com/cloud/storage/ResizeVolumePayload.java index 7a927b2179e..9e4c3ec528c 100644 --- a/server/src/com/cloud/storage/ResizeVolumePayload.java +++ b/server/src/com/cloud/storage/ResizeVolumePayload.java @@ -21,16 +21,21 @@ public class ResizeVolumePayload { public final Long newSize; public final Long newMinIops; public final Long newMaxIops; + public final Integer newHypervisorSnapshotReserve; public final boolean shrinkOk; public final String instanceName; public final long[] hosts; + public final boolean isManaged; - public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, boolean shrinkOk, String instanceName, long[] hosts) { + public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, boolean shrinkOk, + String instanceName, long[] hosts, boolean isManaged) { this.newSize = newSize; this.newMinIops = newMinIops; this.newMaxIops = newMaxIops; + this.newHypervisorSnapshotReserve = newHypervisorSnapshotReserve; this.shrinkOk = shrinkOk; this.instanceName = instanceName; this.hosts = hosts; + this.isManaged = isManaged; } } diff --git a/server/src/com/cloud/storage/StorageManager.java b/server/src/com/cloud/storage/StorageManager.java index a399a083f7c..0bf00110863 100644 --- a/server/src/com/cloud/storage/StorageManager.java +++ b/server/src/com/cloud/storage/StorageManager.java @@ -106,6 +106,30 @@ public interface StorageManager extends StorageService { boolean storagePoolHasEnoughSpace(List volume, StoragePool pool); + /** + * This comment is relevant to managed storage only. + * + * Long clusterId = only used for managed storage + * + * Some managed storage can be more efficient handling VM templates (via cloning) if it knows the capabilities of the compute cluster it is dealing with. + * If the compute cluster supports UUID resigning and the storage system can clone a volume from a volume, then this determines how much more space a + * new root volume (that makes use of a template) will take up on the storage system. + * + * For example, if a storage system can clone a volume from a volume and the compute cluster supports UUID resigning (relevant for hypervisors like + * XenServer and ESXi that put virtual disks in clustered file systems), then the storage system will need to determine if it already has a copy of + * the template or if it will need to create one first before cloning the template to a new volume to be used for the new root disk (assuming the root + * disk is being deployed from a template). If the template doesn't already exists on the storage system, then you need to take into consideration space + * required for that template (stored in one volume) and space required for a new volume created from that template volume (for your new root volume). + * + * If UUID resigning is not available in the compute cluster or the storage system doesn't support cloning a volume from a volume, then for each new + * root disk that uses a template, CloudStack will have the template be copied down to a newly created volume on the storage system (i.e. no need + * to take into consideration the possible need to first create a volume on the storage system for a template that will be used for the root disk + * via cloning). + * + * Cloning volumes on the back-end instead of copying down a new template for each new volume helps to alleviate load on the hypervisors. + */ + boolean storagePoolHasEnoughSpace(List volume, StoragePool pool, Long clusterId); + boolean registerHostListener(String providerUuid, HypervisorHostListener listener); void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 0509abc87c6..0c56d2d5abd 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -70,6 +70,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCy import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; @@ -1668,9 +1669,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } - // Only IOPS guaranteed primary storage like SolidFire is using/setting IOPS. + // Only IOPS-guaranteed primary storage like SolidFire is using/setting IOPS. // This check returns true for storage that does not specify IOPS. - if (pool.getCapacityIops() == null ) { + if (pool.getCapacityIops() == null) { s_logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); return true; @@ -1696,6 +1697,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool) { + return storagePoolHasEnoughSpace(volumes, pool, null); + } + + @Override + public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, Long clusterId) { if (volumes == null || volumes.isEmpty()) { return false; } @@ -1704,10 +1710,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } - // allocated space includes template of specified volume + // allocated space includes templates StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); - long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null); + long allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null); long totalAskingSize = 0; + for (Volume volume : volumes) { // refreshing the volume from the DB to get latest hv_ss_reserve (hypervisor snapshot reserve) field // I could have just assigned this to "volume", but decided to make a new variable for it so that it @@ -1718,18 +1725,37 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage) volService.updateHypervisorSnapshotReserveForVolume(getDiskOfferingVO(volumeVO), volumeVO.getId(), getHypervisorType(volumeVO)); - // hv_ss_reserve field might have been updated; refresh from DB to make use of it in getVolumeSizeIncludingHypervisorSnapshotReserve + // hv_ss_reserve field might have been updated; refresh from DB to make use of it in getDataObjectSizeIncludingHypervisorSnapshotReserve volumeVO = _volumeDao.findById(volume.getId()); } - if (volumeVO.getTemplateId() != null) { - VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId()); - if (tmpl != null && tmpl.getFormat() != ImageFormat.ISO) { - allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl); + // this if statement should resolve to true at most once per execution of the for loop its contained within (for a root disk that is + // to leverage a template) + if (volume.getTemplateId() != null) { + VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volume.getTemplateId()); + + if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) { + allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl); } } + if (volumeVO.getState() != Volume.State.Ready) { - totalAskingSize = totalAskingSize + getVolumeSizeIncludingHypervisorSnapshotReserve(volumeVO, pool); + totalAskingSize += getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeVO, pool); + + if (ScopeType.ZONE.equals(poolVO.getScope()) && volumeVO.getTemplateId() != null) { + VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId()); + + if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) { + // Storage plug-ins for zone-wide primary storage can be designed in such a way as to store a template on the + // primary storage once and make use of it in different clusters (via cloning). + // This next call leads to CloudStack asking how many more bytes it will need for the template (if the template is + // already stored on the primary storage, then the answer is 0). + + if (clusterId != null && _clusterDao.getSupportsResigning(clusterId)) { + totalAskingSize += getBytesRequiredForTemplate(tmpl, pool); + } + } + } } } @@ -1749,11 +1775,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + - ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold); } - double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity); + double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { if (s_logger.isDebugEnabled()) { s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + @@ -1763,10 +1789,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } - if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) { + if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) { if (s_logger.isDebugEnabled()) { s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + - ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize); } return false; @@ -1792,19 +1818,36 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return null; } - private long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { + private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); if (storeDriver instanceof PrimaryDataStoreDriver) { PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; - return primaryStoreDriver.getVolumeSizeIncludingHypervisorSnapshotReserve(volume, pool); + VolumeInfo volumeInfo = volFactory.getVolume(volume.getId()); + + return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool); } return volume.getSize(); } + private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + + if (storeDriver instanceof PrimaryDataStoreDriver) { + PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; + + TemplateInfo templateInfo = tmplFactory.getReadyTemplateOnImageStore(tmpl.getId(), pool.getDataCenterId()); + + return primaryStoreDriver.getBytesRequiredForTemplate(templateInfo, pool); + } + + return tmpl.getSize(); + } + @Override public void createCapacityEntry(long poolId) { StoragePoolVO storage = _storagePoolDao.findById(poolId); diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index 4c3de3eabce..395b00c13c9 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -839,6 +839,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Long newSize = null; Long newMinIops = null; Long newMaxIops = null; + Integer newHypervisorSnapshotReserve = null; boolean shrinkOk = cmd.getShrinkOk(); VolumeVO volume = _volsDao.findById(cmd.getEntityId()); @@ -881,6 +882,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // if we are to use the existing disk offering if (newDiskOffering == null) { newSize = cmd.getSize(); + newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); // if the caller is looking to change the size of the volume if (newSize != null) { @@ -939,10 +941,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well."); } - if (!areIntegersEqual(diskOffering.getHypervisorSnapshotReserve(), newDiskOffering.getHypervisorSnapshotReserve())) { - throw new InvalidParameterValueException("The hypervisor snapshot reverse on the new and old disk offerings must be equal."); - } - if (newDiskOffering.getDomainId() != null) { // not a public offering; check access _configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering); @@ -975,6 +973,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic newMinIops = newDiskOffering.getMinIops(); newMaxIops = newDiskOffering.getMaxIops(); } + + // if the hypervisor snapshot reserve value is null, it must remain null (currently only KVM uses null and null is all KVM uses for a value here) + newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve() != null ? newDiskOffering.getHypervisorSnapshotReserve() : null; } long currentSize = volume.getSize(); @@ -1013,6 +1014,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volume.setSize(newSize); volume.setMinIops(newMinIops); volume.setMaxIops(newMaxIops); + volume.setHypervisorSnapshotReserve(newHypervisorSnapshotReserve); if (newDiskOffering != null) { volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); @@ -1038,13 +1040,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, - newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); + newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); } finally { _workJobDao.expunge(placeHolder.getId()); } } else { Outcome outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, - newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); + newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); try { outcome.get(); @@ -1079,19 +1081,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, - newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); - } - - private static boolean areIntegersEqual(Integer i1, Integer i2) { - if (i1 == null) { - i1 = 0; - } - - if (i2 == null) { - i2 = 0; - } - - return i1.equals(i2); + newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); } private void validateIops(Long minIops, Long maxIops) { @@ -1106,9 +1096,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } - private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newDiskOfferingId, boolean shrinkOk) { + private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, + Integer newHypervisorSnapshotReserve, Long newDiskOfferingId, boolean shrinkOk) { VolumeVO volume = _volsDao.findById(volumeId); UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); + StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); + boolean isManaged = storagePool.isManaged(); /* * get a list of hosts to send the commands to, try the system the * associated vm is running on first, then the last known place it ran. @@ -1127,8 +1120,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic final String errorMsg = "The VM must be stopped or the disk detached in order to resize with the XenServer Hypervisor."; - StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); - if (storagePool.isManaged() && storagePool.getHypervisor() == HypervisorType.Any && hosts != null && hosts.length > 0) { HostVO host = _hostDao.findById(hosts[0]); @@ -1143,13 +1134,20 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } - ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, shrinkOk, instanceName, hosts); + ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, + shrinkOk, instanceName, hosts, isManaged); try { VolumeInfo vol = volFactory.getVolume(volume.getId()); vol.addPayload(payload); - StoragePoolVO storagePool = _storagePoolDao.findById(vol.getPoolId()); + // this call to resize has a different impact depending on whether the + // underlying primary storage is managed or not + // if managed, this is the chance for the plug-in to change IOPS value, if applicable + // if not managed, this is the chance for the plug-in to talk to the hypervisor layer + // to change the size of the disk + AsyncCallFuture future = volService.resize(vol); + VolumeApiResult result = future.get(); // managed storage is designed in such a way that the storage plug-in does not // talk to the hypervisor layer; as such, if the storage is managed and the @@ -1165,14 +1163,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _volsDao.update(volume.getId(), volume); } - // this call to resize has a different impact depending on whether the - // underlying primary storage is managed or not - // if managed, this is the chance for the plug-in to change IOPS value, if applicable - // if not managed, this is the chance for the plug-in to talk to the hypervisor layer - // to change the size of the disk - AsyncCallFuture future = volService.resize(vol); - VolumeApiResult result = future.get(); - if (result.isFailed()) { s_logger.warn("Failed to resize the volume " + volume); String details = ""; @@ -2758,9 +2748,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return new VmJobVolumeOutcome(workJob, volumeId); } - public Outcome resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, - final long currentSize, final long newSize, final Long newMinIops, final Long newMaxIops, final Long newServiceOfferingId, final boolean shrinkOk) { - + public Outcome resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, final long currentSize, final long newSize, + final Long newMinIops, final Long newMaxIops, final Integer newHypervisorSnapshotReserve, + final Long newServiceOfferingId, final boolean shrinkOk) { final CallContext context = CallContext.current(); final User callingUser = context.getCallingUser(); final Account callingAccount = context.getCallingAccount(); @@ -2781,7 +2771,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // save work context info (there are some duplications) VmWorkResizeVolume workInfo = new VmWorkResizeVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), - VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newServiceOfferingId, shrinkOk); + VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newServiceOfferingId, shrinkOk); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); @@ -2915,7 +2905,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @ReflectionUse private Pair orchestrateResizeVolume(VmWorkResizeVolume work) throws Exception { Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), work.getNewMinIops(), work.getNewMaxIops(), - work.getNewServiceOfferingId(), work.isShrinkOk()); + work.getNewHypervisorSnapshotReserve(), work.getNewServiceOfferingId(), work.isShrinkOk()); return new Pair(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(new Long(vol.getId()))); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 757ab6110cd..16762c50137 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -1013,9 +1014,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement try { postCreateSnapshot(volume.getId(), snapshotId, payload.getSnapshotPolicyId()); - SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Image); + + DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr); + + SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + // Correct the resource count of snapshot in case of delta snapshots. _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); } catch (Exception e) { @@ -1030,6 +1036,30 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return snapshot; } + private static DataStoreRole getDataStoreRole(Snapshot snapshot, SnapshotDataStoreDao snapshotStoreDao, DataStoreManager dataStoreMgr) { + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); + + if (snapshotStore == null) { + return DataStoreRole.Image; + } + + long storagePoolId = snapshotStore.getDataStoreId(); + DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + + Map mapCapabilities = dataStore.getDriver().getCapabilities(); + + if (mapCapabilities != null) { + String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString()); + Boolean supportsStorageSystemSnapshots = new Boolean(value); + + if (supportsStorageSystemSnapshots) { + return DataStoreRole.Primary; + } + } + + return DataStoreRole.Image; + } + @Override public boolean configure(String name, Map params) throws ConfigurationException { diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 4b27c8b326c..ceee61604c8 100644 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -38,7 +38,7 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; -import org.apache.cloudstack.api.response.GetUploadParamsResponse; +import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; import org.apache.commons.collections.CollectionUtils; @@ -62,12 +62,14 @@ import org.apache.cloudstack.api.command.user.template.ListTemplatePermissionsCm import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd; +import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -79,7 +81,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.Templa import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -689,7 +690,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } try { + templateStoragePoolRef.setTemplateSize(0); templateStoragePoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED); + _tmpltPoolDao.update(templateStoragePoolRefId, templateStoragePoolRef); } finally { _tmpltPoolDao.releaseFromLockTable(templateStoragePoolRefId); @@ -873,41 +876,55 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Override @DB public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) { - //Need to hold the lock, otherwise, another thread may create a volume from the template at the same time. - //Assumption here is that, we will hold the same lock during create volume from template + // Need to hold the lock; otherwise, another thread may create a volume from the template at the same time. + // Assumption here is that we will hold the same lock during create volume from template. VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); + if (templatePoolRef == null) { - s_logger.debug("can't aquire the lock for template pool ref:" + templatePoolVO.getId()); + s_logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); + return; } - try { - StoragePool pool = (StoragePool)_dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); - VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId()); + PrimaryDataStore pool = (PrimaryDataStore)_dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); + TemplateInfo template = _tmplFactory.getTemplate(templatePoolRef.getTemplateId(), pool); + try { if (s_logger.isDebugEnabled()) { s_logger.debug("Evicting " + templatePoolVO); } - DestroyCommand cmd = new DestroyCommand(pool, templatePoolVO); - try { + if (pool.isManaged()) { + // For managed store, just delete the template volume. + AsyncCallFuture future = _tmpltSvr.deleteTemplateOnPrimary(template, pool); + TemplateApiResult result = future.get(); + + if (result.isFailed()) { + s_logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); + } else { + // Remove the templatePoolVO. + if (_tmpltPoolDao.remove(templatePoolVO.getId())) { + s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + } + } + } else { + DestroyCommand cmd = new DestroyCommand(pool, templatePoolVO); Answer answer = _storageMgr.sendToPool(pool, cmd); if (answer != null && answer.getResult()) { - // Remove the templatePoolVO + // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template: " + template.getName() + " from storage pool: " + pool.getName()); + s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); } } else { - s_logger.info("Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); + s_logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); } - } catch (StorageUnavailableException e) { - s_logger.info("Storage is unavailable currently. Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); } + } catch (StorageUnavailableException | InterruptedException | ExecutionException e) { + s_logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); } - } @Override @@ -1482,14 +1499,17 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, future = _tmpltSvr.createTemplateFromSnapshotAsync(snapInfo, tmplInfo, store); } else if (volumeId != null) { VolumeInfo volInfo = _volFactory.getVolume(volumeId); + future = _tmpltSvr.createTemplateFromVolumeAsync(volInfo, tmplInfo, store); } else { throw new CloudRuntimeException("Creating private Template need to specify snapshotId or volumeId"); } CommandResult result = null; + try { result = future.get(); + if (result.isFailed()) { privateTemplate = null; s_logger.debug("Failed to create template" + result.getResult()); diff --git a/ui/scripts/storage.js b/ui/scripts/storage.js index 39dfccf2ebe..29cd4c1cdc3 100644 --- a/ui/scripts/storage.js +++ b/ui/scripts/storage.js @@ -1550,6 +1550,8 @@ preFilter: function(args) { if (args.context.volumes != null && args.context.volumes[0].type == 'ROOT') { args.$form.find('.form-item[rel=newdiskoffering]').hide(); + + selectedDiskOfferingObj = null; } else { args.$form.find('.form-item[rel=newsize]').hide(); } diff --git a/utils/src/main/java/com/cloud/utils/Utils.java b/utils/src/main/java/com/cloud/utils/Utils.java new file mode 100644 index 00000000000..53f0a8059cd --- /dev/null +++ b/utils/src/main/java/com/cloud/utils/Utils.java @@ -0,0 +1,38 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.utils; + +import java.util.Map; + +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; + +public class Utils { + public static Map getImmutableMap(Map map) { + Map filteredMap = Maps.filterValues(map, new Predicate() { + public boolean apply(final V input) { + return input != null; + } + }); + + return ImmutableMap.builder().putAll(filteredMap).build(); + } +}