Support for backend snapshots with XenServer

This commit is contained in:
Mike Tutkowski 2015-11-16 12:18:25 -07:00
parent 2b4b8aa40c
commit 2bd035d199
82 changed files with 3935 additions and 1254 deletions

View File

@ -35,7 +35,7 @@ public class StartupRoutingCommand extends StartupCommand {
long memory;
long dom0MinMemory;
boolean poolSync;
private boolean supportsClonedVolumes;
String caps;
String pool;
@ -180,4 +180,12 @@ public class StartupRoutingCommand extends StartupCommand {
public void setGpuGroupDetails(HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails) {
this.groupDetails = groupDetails;
}
public boolean getSupportsClonedVolumes() {
return supportsClonedVolumes;
}
public void setSupportsClonedVolumes(boolean supportsClonedVolumes) {
this.supportsClonedVolumes = supportsClonedVolumes;
}
}

View File

@ -25,22 +25,34 @@ import com.cloud.agent.api.to.StorageFilerTO;
public class ResizeVolumeCommand extends Command {
private String path;
private StorageFilerTO pool;
private String vmInstance;
private Long newSize;
private Long currentSize;
private Long newSize;
private boolean shrinkOk;
private String vmInstance;
/* For managed storage */
private boolean managed;
private String iScsiName;
protected ResizeVolumeCommand() {
}
public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance) {
this.path = path;
this.pool = pool;
this.vmInstance = vmInstance;
this.currentSize = currentSize;
this.newSize = newSize;
this.shrinkOk = shrinkOk;
this.vmInstance = vmInstance;
this.managed = false;
}
public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance,
boolean isManaged, String iScsiName) {
this(path, pool, currentSize, newSize, shrinkOk, vmInstance);
this.iScsiName = iScsiName;
this.managed = isManaged;
}
public String getPath() {
@ -55,22 +67,20 @@ public class ResizeVolumeCommand extends Command {
return pool;
}
public long getNewSize() {
return newSize;
}
public long getCurrentSize() { return currentSize; }
public long getCurrentSize() {
return currentSize;
}
public long getNewSize() { return newSize; }
public boolean getShrinkOk() {
return shrinkOk;
}
public boolean getShrinkOk() { return shrinkOk; }
public String getInstanceName() {
return vmInstance;
}
public boolean isManaged() { return managed; }
public String get_iScsiName() {return iScsiName; }
/**
* {@inheritDoc}
*/
@ -78,5 +88,4 @@ public class ResizeVolumeCommand extends Command {
public boolean executeInSequence() {
return false;
}
}

View File

@ -26,6 +26,7 @@ import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import com.cloud.agent.api.Answer;
@ -68,4 +69,6 @@ public interface StorageProcessor {
public Answer forgetObject(ForgetObjectCmd cmd);
public Answer snapshotAndCopy(SnapshotAndCopyCommand cmd);
public Answer resignature(ResignatureCommand cmd);
}

View File

@ -28,6 +28,7 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand;
import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
@ -64,6 +65,8 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma
return processor.introduceObject((IntroduceObjectCmd)command);
} else if (command instanceof SnapshotAndCopyCommand) {
return processor.snapshotAndCopy((SnapshotAndCopyCommand)command);
} else if (command instanceof ResignatureCommand) {
return processor.resignature((ResignatureCommand)command);
}
return new Answer((Command)command, false, "not implemented yet");

View File

@ -0,0 +1,60 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.command;
import com.cloud.agent.api.Answer;
import com.cloud.storage.Storage.ImageFormat;
public class ResignatureAnswer extends Answer {
private long size;
private String path;
private ImageFormat format;
public ResignatureAnswer() {
}
public ResignatureAnswer(String errMsg) {
super(null, false, errMsg);
}
public void setSize(long size) {
this.size = size;
}
public long getSize() {
return size;
}
public void setPath(String path) {
this.path = path;
}
public String getPath() {
return path;
}
public void setFormat(ImageFormat format) {
this.format = format;
}
public ImageFormat getFormat() {
return format;
}
}

View File

@ -0,0 +1,48 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.command;
import com.cloud.utils.Utils;
import java.util.Map;
public final class ResignatureCommand extends StorageSubSystemCommand {
private final Map<String, String> details;
private boolean executeInSequence = true;
public ResignatureCommand(final Map<String, String> details) {
this.details = Utils.getImmutableMap(details);
}
public Map<String, String> getDetails() {
return details;
}
@Override
public void setExecuteInSequence(final boolean executeInSequence) {
this.executeInSequence = executeInSequence;
}
@Override
public boolean executeInSequence() {
return executeInSequence;
}
}

View File

@ -37,6 +37,7 @@ public class PrimaryDataStoreTO implements DataStoreTO {
public static final String CHAP_INITIATOR_SECRET = PrimaryDataStore.CHAP_INITIATOR_SECRET;
public static final String CHAP_TARGET_USERNAME = PrimaryDataStore.CHAP_TARGET_USERNAME;
public static final String CHAP_TARGET_SECRET = PrimaryDataStore.CHAP_TARGET_SECRET;
public static final String REMOVE_AFTER_COPY = PrimaryDataStore.REMOVE_AFTER_COPY;
public static final String VOLUME_SIZE = PrimaryDataStore.VOLUME_SIZE;
private final String uuid;

View File

@ -18,7 +18,23 @@
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
/**
* enumerates different capabilities storage drivers may have
*/
public enum DataStoreCapabilities {
VOLUME_SNAPSHOT_QUIESCEVM,
STORAGE_SYSTEM_SNAPSHOT // indicates to the StorageSystemSnapshotStrategy that this driver takes snapshots on its own system
/**
* indicates that this driver takes CloudStack volume snapshots on its own system (as either back-end snapshots or back-end clones)
*/
STORAGE_SYSTEM_SNAPSHOT,
/**
* indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a back-end volume
* from a back-end snapshot or a back-end clone) and that it supports the invocation of the createAsync method where a SnapshotInfo is passed in while using
* the "tempVolume" property of snapshot_details
*/
CAN_CREATE_VOLUME_FROM_SNAPSHOT,
/**
* indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a volume from a volume)
*/
CAN_CREATE_VOLUME_FROM_VOLUME
}

View File

@ -23,6 +23,8 @@ import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat;
public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo {
DataObject create(DataObject dataObject, boolean createEntryInTempSpoolRef);
VolumeInfo getVolume(long id);
List<VolumeInfo> getVolumes();

View File

@ -23,27 +23,38 @@ import org.apache.cloudstack.storage.command.CommandResult;
import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
public interface PrimaryDataStoreDriver extends DataStoreDriver {
ChapInfo getChapInfo(VolumeInfo volumeInfo);
ChapInfo getChapInfo(DataObject dataObject);
boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
void revokeAccess(DataObject dataObject, Host host, DataStore dataStore);
// intended for managed storage (cloud.storage_pool.managed = true)
// if not managed, return volume.getSize()
long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool storagePool);
/**
* intended for managed storage (cloud.storage_pool.managed = true)
* if not managed, return volume.getSize()
*/
long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool storagePool);
// intended for managed storage (cloud.storage_pool.managed = true)
// if managed storage, return the total number of bytes currently in use for the storage pool in question
// if not managed storage, return 0
/**
* intended for zone-wide primary storage that is capable of storing a template once and using it in multiple clusters
* if not this kind of storage, return 0
*/
long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool);
/**
* intended for managed storage (cloud.storage_pool.managed = true)
* if managed storage, return the total number of bytes currently in use for the storage pool in question
* if not managed storage, return 0
*/
long getUsedBytes(StoragePool storagePool);
// intended for managed storage (cloud.storage_pool.managed = true)
// if managed storage, return the total number of IOPS currently in use for the storage pool in question
// if not managed storage, return 0
/**
* intended for managed storage (cloud.storage_pool.managed = true)
* if managed storage, return the total number of IOPS currently in use for the storage pool in question
* if not managed storage, return 0
*/
long getUsedIops(StoragePool storagePool);
void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback);

View File

@ -36,6 +36,7 @@ public interface PrimaryDataStoreInfo extends StoragePool {
static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret";
static final String CHAP_TARGET_USERNAME = "chapTargetUsername";
static final String CHAP_TARGET_SECRET = "chapTargetSecret";
static final String REMOVE_AFTER_COPY = "removeAfterCopy";
static final String VOLUME_SIZE = "volumeSize";
boolean isHypervisorSupported(HypervisorType hypervisor);

View File

@ -32,6 +32,7 @@ public interface TemplateService {
public TemplateApiResult(TemplateInfo template) {
super();
this.template = template;
}
@ -52,6 +53,8 @@ public interface TemplateService {
AsyncCallFuture<TemplateApiResult> prepareTemplateOnPrimary(TemplateInfo srcTemplate, StoragePool pool);
AsyncCallFuture<TemplateApiResult> deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool);
void syncTemplateToRegionStore(long templateId, DataStore store);
void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId);

View File

@ -45,7 +45,7 @@ public interface VolumeService {
}
}
ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore);
ChapInfo getChapInfo(DataObject dataObject, DataStore dataStore);
boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);
@ -81,7 +81,7 @@ public interface VolumeService {
VolumeEntity getVolumeEntity(long volumeId);
AsyncCallFuture<VolumeApiResult> createManagedStorageAndVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
TemplateInfo srcTemplateInfo, long destHostId);
AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId,

View File

@ -24,12 +24,12 @@ public class VmWorkResizeVolume extends VmWork {
private long newSize;
private Long newMinIops;
private Long newMaxIops;
private Integer newHypervisorSnapshotReserve;
private Long newServiceOfferingId;
private boolean shrinkOk;
public VmWorkResizeVolume(long userId, long accountId, long vmId, String handlerName,
long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newServiceOfferingId, boolean shrinkOk) {
public VmWorkResizeVolume(long userId, long accountId, long vmId, String handlerName, long volumeId, long currentSize, long newSize,
Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, Long newServiceOfferingId, boolean shrinkOk) {
super(userId, accountId, vmId, handlerName);
this.volumeId = volumeId;
@ -37,6 +37,7 @@ public class VmWorkResizeVolume extends VmWork {
this.newSize = newSize;
this.newMinIops = newMinIops;
this.newMaxIops = newMaxIops;
this.newHypervisorSnapshotReserve = newHypervisorSnapshotReserve;
this.newServiceOfferingId = newServiceOfferingId;
this.shrinkOk = shrinkOk;
}
@ -68,4 +69,6 @@ public class VmWorkResizeVolume extends VmWork {
public boolean isShrinkOk() {
return shrinkOk;
}
public Integer getNewHypervisorSnapshotReserve() { return newHypervisorSnapshotReserve; }
}

View File

@ -1242,10 +1242,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
future = volService.createVolumeAsync(volume, destPool);
} else {
TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
if (templ == null) {
s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
}
@ -1260,13 +1261,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
long hostId = vm.getVirtualMachine().getHostId();
future = volService.createManagedStorageAndVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
}
else {
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
VolumeApiResult result = null;
VolumeApiResult result;
try {
result = future.get();
if (result.isFailed()) {
@ -1290,10 +1291,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
newVol = _volsDao.findById(newVol.getId());
break; //break out of template-redeploy retry loop
} catch (InterruptedException e) {
s_logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
} catch (ExecutionException e) {
} catch (InterruptedException | ExecutionException e) {
s_logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
}

View File

@ -45,4 +45,6 @@ public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<ClusterVO> listClustersByDcId(long zoneId);
List<Long> listAllCusters(long zoneId);
boolean computeWhetherClusterSupportsResigning(long clusterId);
}

View File

@ -30,6 +30,9 @@ import org.springframework.stereotype.Component;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Grouping;
import com.cloud.utils.db.GenericDaoBase;
@ -57,7 +60,11 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( ";
private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )";
@Inject
protected HostPodDao _hostPodDao;
private HostDao hostDao;
@Inject
private HostDetailsDao hostDetailsDao;
@Inject
protected HostPodDao hostPodDao;
public ClusterDaoImpl() {
super();
@ -214,7 +221,7 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
@Override
public List<Long> listClustersWithDisabledPods(long zoneId) {
GenericSearchBuilder<HostPodVO, Long> disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class);
GenericSearchBuilder<HostPodVO, Long> disabledPodIdSearch = hostPodDao.createSearchBuilder(Long.class);
disabledPodIdSearch.selectFields(disabledPodIdSearch.entity().getId());
disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ);
disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ);
@ -260,4 +267,35 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
sc.setParameters("dataCenterId", zoneId);
return customSearch(sc, null);
}
@Override
public boolean computeWhetherClusterSupportsResigning(long clusterId) {
ClusterVO cluster = findById(clusterId);
if (cluster == null || cluster.getAllocationState() != Grouping.AllocationState.Enabled) {
return false;
}
List<HostVO> hosts = hostDao.findByClusterId(clusterId);
if (hosts == null) {
return false;
}
Map<Long, String> mapSupportsResign = hostDetailsDao.findDetails("supportsResign");
for (HostVO host : hosts) {
if (host == null) {
return false;
}
String value = mapSupportsResign.get(host.getId());
if (Boolean.parseBoolean(value) == false) {
return false;
}
}
return true;
}
}

View File

@ -23,6 +23,7 @@ import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.info.RunningHostCountInfo;
import com.cloud.resource.ResourceState;
import com.cloud.utils.db.GenericDao;
@ -89,6 +90,8 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> listByDataCenterId(long id);
List<HostVO> listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType);
List<Long> listAllHosts(long zoneId);
List<HostVO> listAllHostsByType(Host.Type type);

View File

@ -47,7 +47,9 @@ import com.cloud.host.HostTagVO;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.Status.Event;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.info.RunningHostCountInfo;
import com.cloud.org.Grouping;
import com.cloud.org.Managed;
import com.cloud.resource.ResourceState;
import com.cloud.utils.DateUtil;
@ -421,6 +423,37 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return listBy(sc);
}
@Override
public List<HostVO> listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType) {
SearchBuilder<ClusterVO> clusterSearch = _clusterDao.createSearchBuilder();
clusterSearch.and("allocationState", clusterSearch.entity().getAllocationState(), SearchCriteria.Op.EQ);
clusterSearch.and("hypervisorType", clusterSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
SearchBuilder<HostVO> hostSearch = createSearchBuilder();
hostSearch.and("dc", hostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
hostSearch.and("type", hostSearch.entity().getType(), Op.EQ);
hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ);
hostSearch.and("resourceState", hostSearch.entity().getResourceState(), Op.EQ);
hostSearch.join("clusterSearch", clusterSearch, hostSearch.entity().getClusterId(), clusterSearch.entity().getId(), JoinBuilder.JoinType.INNER);
hostSearch.done();
SearchCriteria<HostVO> sc = hostSearch.create();
sc.setParameters("dc", zoneId);
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("status", Status.Up);
sc.setParameters("resourceState", ResourceState.Enabled);
sc.setJoinParameters("clusterSearch", "allocationState", Grouping.AllocationState.Enabled);
sc.setJoinParameters("clusterSearch", "hypervisorType", hypervisorType.toString());
return listBy(sc);
}
@Override
public HostVO findByGuid(String guid) {
SearchCriteria<HostVO> sc = GuidSearch.create("guid", guid);

View File

@ -24,6 +24,8 @@ import com.cloud.utils.db.GenericDao;
public interface HostDetailsDao extends GenericDao<DetailVO, Long> {
Map<String, String> findDetails(long hostId);
Map<Long, String> findDetails(String name);
void persist(long hostId, Map<String, String> details);
DetailVO findDetail(long hostId, String name);

View File

@ -37,6 +37,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
public class HostDetailsDaoImpl extends GenericDaoBase<DetailVO, Long> implements HostDetailsDao {
protected final SearchBuilder<DetailVO> HostSearch;
protected final SearchBuilder<DetailVO> DetailSearch;
protected final SearchBuilder<DetailVO> NameSearch;
public HostDetailsDaoImpl() {
HostSearch = createSearchBuilder();
@ -47,6 +48,10 @@ public class HostDetailsDaoImpl extends GenericDaoBase<DetailVO, Long> implement
DetailSearch.and("hostId", DetailSearch.entity().getHostId(), SearchCriteria.Op.EQ);
DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ);
DetailSearch.done();
NameSearch = createSearchBuilder();
NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ);
NameSearch.done();
}
@Override
@ -65,10 +70,13 @@ public class HostDetailsDaoImpl extends GenericDaoBase<DetailVO, Long> implement
@Override
public Map<String, String> findDetails(long hostId) {
SearchCriteria<DetailVO> sc = HostSearch.create();
sc.setParameters("hostId", hostId);
List<DetailVO> results = search(sc, null);
Map<String, String> details = new HashMap<String, String>(results.size());
for (DetailVO result : results) {
if ("password".equals(result.getName())) {
details.put(result.getName(), DBEncryptionUtil.decrypt(result.getValue()));
@ -76,6 +84,28 @@ public class HostDetailsDaoImpl extends GenericDaoBase<DetailVO, Long> implement
details.put(result.getName(), result.getValue());
}
}
return details;
}
@Override
public Map<Long, String> findDetails(String name) {
SearchCriteria<DetailVO> sc = NameSearch.create();
sc.setParameters("name", name);
List<DetailVO> results = search(sc, null);
Map<Long, String> details = new HashMap<>(results.size());
for (DetailVO result : results) {
if ("password".equals(result.getName())) {
details.put(result.getHostId(), DBEncryptionUtil.decrypt(result.getValue()));
} else {
details.put(result.getHostId(), result.getValue());
}
}
return details;
}

View File

@ -118,9 +118,9 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
}
@Override
public VMTemplateStoragePoolVO findByPoolTemplate(long hostId, long templateId) {
public VMTemplateStoragePoolVO findByPoolTemplate(long poolId, long templateId) {
SearchCriteria<VMTemplateStoragePoolVO> sc = PoolTemplateSearch.create();
sc.setParameters("pool_id", hostId);
sc.setParameters("pool_id", poolId);
sc.setParameters("template_id", templateId);
return findOneIncludingRemovedBy(sc);
}

View File

@ -18,18 +18,27 @@
*/
package org.apache.cloudstack.storage.motion;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
@ -43,8 +52,12 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -55,65 +68,98 @@ import com.cloud.configuration.Config;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Cluster;
import com.cloud.org.Grouping.AllocationState;
import com.cloud.resource.ResourceState;
import com.cloud.server.ManagementService;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachineManager;
import com.google.common.base.Preconditions;
@Component
public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private static final Logger s_logger = Logger.getLogger(StorageSystemDataMotionStrategy.class);
private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class);
private static final Random RANDOM = new Random(System.nanoTime());
@Inject private AgentManager _agentMgr;
@Inject private ConfigurationDao _configDao;
@Inject private DataStoreManager dataStoreMgr;
@Inject private DiskOfferingDao _diskOfferingDao;
@Inject private ClusterDao clusterDao;
@Inject private HostDao _hostDao;
@Inject private HostDetailsDao hostDetailsDao;
@Inject private ManagementService _mgr;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private SnapshotDao _snapshotDao;
@Inject private SnapshotDetailsDao _snapshotDetailsDao;
@Inject private VolumeDao _volumeDao;
@Inject private VolumeDataFactory _volumeDataFactory;
@Inject private VolumeDetailsDao volumeDetailsDao;
@Inject private VolumeService _volumeService;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
if (srcData instanceof SnapshotInfo) {
if (canHandle(srcData.getDataStore()) || canHandle(destData.getDataStore())) {
if (canHandle(srcData) || canHandle(destData)) {
return StrategyPriority.HIGHEST;
}
}
if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo &&
(srcData.getDataStore().getId() == destData.getDataStore().getId()) &&
(canHandle(srcData) || canHandle(destData))) {
// Both source and dest are on the same storage, so just clone them.
return StrategyPriority.HIGHEST;
}
return StrategyPriority.CANT_HANDLE;
}
private boolean canHandle(DataStore dataStore) {
private boolean canHandle(DataObject dataObject) {
Preconditions.checkArgument(dataObject != null, "Passing 'null' to dataObject of canHandle(DataObject) is not supported.");
DataStore dataStore = dataObject.getDataStore();
if (dataStore.getRole() == DataStoreRole.Primary) {
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
if (mapCapabilities != null) {
if (mapCapabilities == null) {
return false;
}
if (dataObject instanceof VolumeInfo || dataObject instanceof SnapshotInfo) {
String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
Boolean supportsStorageSystemSnapshots = new Boolean(value);
Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value);
if (supportsStorageSystemSnapshots) {
s_logger.info("Using 'StorageSystemDataMotionStrategy'");
LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a volume or snapshot and the storage system supports snapshots)");
return true;
}
} else if (dataObject instanceof TemplateInfo) {
// If the storage system can clone volumes, we can cache templates on it.
String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString());
Boolean canCloneVolume = Boolean.valueOf(value);
if (canCloneVolume) {
LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a template and the storage system can create a volume from a volume)");
return true;
}
}
}
@ -132,36 +178,92 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
validate(snapshotInfo);
boolean canHandleSrc = canHandle(srcData.getDataStore());
boolean canHandleSrc = canHandle(srcData);
if (canHandleSrc && destData instanceof TemplateInfo &&
(destData.getDataStore().getRole() == DataStoreRole.Image || destData.getDataStore().getRole() == DataStoreRole.ImageCache)) {
handleCreateTemplateFromSnapshot(snapshotInfo, (TemplateInfo)destData, callback);
return;
}
if (destData instanceof VolumeInfo) {
VolumeInfo volumeInfo = (VolumeInfo)destData;
boolean canHandleDest = canHandle(destData.getDataStore());
boolean canHandleDest = canHandle(destData);
if (canHandleSrc && canHandleDest) {
handleCreateVolumeFromSnapshotBothOnStorageSystem(snapshotInfo, volumeInfo, callback);
return;
if (snapshotInfo.getDataStore().getId() == volumeInfo.getDataStore().getId()) {
handleCreateVolumeFromSnapshotBothOnStorageSystem(snapshotInfo, volumeInfo, callback);
return;
}
else {
String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " +
"not supported by source or destination storage plug-in). " + getSrcDestDataStoreMsg(srcData, destData);
LOGGER.warn(errMsg);
throw new UnsupportedOperationException(errMsg);
}
}
if (canHandleSrc) {
throw new UnsupportedOperationException("This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " +
"not supported by destination storage plug-in).");
String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " +
"not supported by destination storage plug-in). " + getDestDataStoreMsg(destData);
LOGGER.warn(errMsg);
throw new UnsupportedOperationException(errMsg);
}
if (canHandleDest) {
throw new UnsupportedOperationException("This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " +
"not supported by source storage plug-in).");
String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT " +
"not supported by source storage plug-in). " + getSrcDataStoreMsg(srcData);
LOGGER.warn(errMsg);
throw new UnsupportedOperationException(errMsg);
}
}
} else if (srcData instanceof TemplateInfo && destData instanceof VolumeInfo) {
boolean canHandleSrc = canHandle(srcData);
if (!canHandleSrc) {
String errMsg = "This operation is not supported (DataStoreCapabilities.STORAGE_CAN_CREATE_VOLUME_FROM_VOLUME " +
"not supported by destination storage plug-in). " + getDestDataStoreMsg(destData);
LOGGER.warn(errMsg);
throw new UnsupportedOperationException(errMsg);
}
handleCreateVolumeFromTemplateBothOnStorageSystem((TemplateInfo)srcData, (VolumeInfo)destData, callback);
return;
}
throw new UnsupportedOperationException("This operation is not supported.");
}
private String getSrcDestDataStoreMsg(DataObject srcData, DataObject destData) {
Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported.");
Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getSrcDestDataStoreMsg(DataObject, DataObject) is not supported.");
return "Source data store = " + srcData.getDataStore().getName() + "; " + "Destination data store = " + destData.getDataStore().getName() + ".";
}
private String getSrcDataStoreMsg(DataObject srcData) {
Preconditions.checkArgument(srcData != null, "Passing 'null' to srcData of getSrcDataStoreMsg(DataObject) is not supported.");
return "Source data store = " + srcData.getDataStore().getName() + ".";
}
private String getDestDataStoreMsg(DataObject destData) {
Preconditions.checkArgument(destData != null, "Passing 'null' to destData of getDestDataStoreMsg(DataObject) is not supported.");
return "Destination data store = " + destData.getDataStore().getName() + ".";
}
private void validate(SnapshotInfo snapshotInfo) {
long volumeId = snapshotInfo.getVolumeId();
@ -172,7 +274,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
}
private Void handleCreateTemplateFromSnapshot(SnapshotInfo snapshotInfo, TemplateInfo templateInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
private boolean usingBackendSnapshotFor(SnapshotInfo snapshotInfo) {
String property = getProperty(snapshotInfo.getId(), "takeSnapshot");
return Boolean.parseBoolean(property);
}
private void handleCreateTemplateFromSnapshot(SnapshotInfo snapshotInfo, TemplateInfo templateInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
try {
snapshotInfo.processEvent(Event.CopyingRequested);
}
@ -180,57 +288,168 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("This snapshot is not currently in a state where it can be used to create a template.");
}
HostVO hostVO = getHost(snapshotInfo.getDataStore().getId());
DataStore srcDataStore = snapshotInfo.getDataStore();
HostVO hostVO = getHost(snapshotInfo);
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo);
boolean computeClusterSupportsResign = clusterDao.computeWhetherClusterSupportsResigning(hostVO.getClusterId());
String errMsg = null;
if (usingBackendSnapshot && !computeClusterSupportsResign) {
String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId();
CopyCmdAnswer copyCmdAnswer = null;
LOGGER.warn(noSupportForResignErrMsg);
throw new CloudRuntimeException(noSupportForResignErrMsg);
}
try {
_volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore);
if (usingBackendSnapshot) {
createVolumeFromSnapshot(hostVO, snapshotInfo, true);
}
Map<String, String> srcDetails = getSnapshotDetails(_storagePoolDao.findById(srcDataStore.getId()), snapshotInfo);
DataStore srcDataStore = snapshotInfo.getDataStore();
copyCommand.setOptions(srcDetails);
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
}
catch (Exception ex) {
throw new CloudRuntimeException(ex.getMessage());
String errMsg = null;
CopyCmdAnswer copyCmdAnswer = null;
try {
// If we are using a back-end snapshot, then we should still have access to it from the hosts in the cluster that hostVO is in
// (because we passed in true as the third parameter to createVolumeFromSnapshot above).
if (usingBackendSnapshot == false) {
_volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore);
}
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
copyCommand.setOptions(srcDetails);
copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
}
catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) {
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage());
}
finally {
try {
_volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore);
}
catch (Exception ex) {
LOGGER.warn("Error revoking access to snapshot (Snapshot ID = " + snapshotInfo.getId() + "): " + ex.getMessage(), ex);
}
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
errMsg = copyCmdAnswer.getDetails();
}
else {
errMsg = "Unable to create template from snapshot";
}
}
try {
if (StringUtils.isEmpty(errMsg)) {
snapshotInfo.processEvent(Event.OperationSuccessed);
}
else {
snapshotInfo.processEvent(Event.OperationFailed);
}
}
catch (Exception ex) {
LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex);
}
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
finally {
try {
_volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore);
if (usingBackendSnapshot) {
deleteVolumeFromSnapshot(snapshotInfo);
}
catch (Exception ex) {
s_logger.debug(ex.getMessage(), ex);
}
}
/**
* Clones a template present on the storage to a new volume and resignatures it.
*
* @param templateInfo source template
* @param volumeInfo destination ROOT volume
* @param callback for async
*/
private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo templateInfo, VolumeInfo volumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
Preconditions.checkArgument(templateInfo != null, "Passing 'null' to templateInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported.");
Preconditions.checkArgument(volumeInfo != null, "Passing 'null' to volumeInfo of handleCreateVolumeFromTemplateBothOnStorageSystem is not supported.");
CopyCmdAnswer copyCmdAnswer = null;
String errMsg = null;
HostVO hostVO = getHost(volumeInfo.getDataCenterId(), true);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + volumeInfo.getDataCenterId());
}
boolean computeClusterSupportsResign = clusterDao.computeWhetherClusterSupportsResigning(hostVO.getClusterId());
if (!computeClusterSupportsResign) {
String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId();
LOGGER.warn(noSupportForResignErrMsg);
throw new CloudRuntimeException(noSupportForResignErrMsg);
}
try {
VolumeDetailVO volumeDetail = new VolumeDetailVO(volumeInfo.getId(),
"cloneOfTemplate",
String.valueOf(templateInfo.getId()),
false);
volumeDetail = volumeDetailsDao.persist(volumeDetail);
AsyncCallFuture<VolumeApiResult> future = _volumeService.createVolumeAsync(volumeInfo, volumeInfo.getDataStore());
VolumeApiResult result = future.get();
if (volumeDetail != null) {
volumeDetailsDao.remove(volumeDetail.getId());
}
if (result.isFailed()) {
LOGGER.warn("Failed to create a volume: " + result.getResult());
throw new CloudRuntimeException(result.getResult());
}
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
volumeInfo.processEvent(Event.MigrationRequested);
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
copyCmdAnswer = performResignature(volumeInfo, hostVO);
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && copyCmdAnswer.getDetails() != null && !copyCmdAnswer.getDetails().isEmpty()) {
errMsg = copyCmdAnswer.getDetails();
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
throw new CloudRuntimeException(copyCmdAnswer.getDetails());
}
else {
errMsg = "Unable to perform host-side operation";
throw new CloudRuntimeException("Unable to create a volume from a template");
}
}
} catch (InterruptedException | ExecutionException ex) {
volumeInfo.getDataStore().getDriver().deleteAsync(volumeInfo.getDataStore(), volumeInfo, null);
try {
if (errMsg == null) {
snapshotInfo.processEvent(Event.OperationSuccessed);
}
else {
snapshotInfo.processEvent(Event.OperationFailed);
}
}
catch (Exception ex) {
s_logger.debug(ex.getMessage(), ex);
}
throw new CloudRuntimeException("Create volume from template (ID = " + templateInfo.getId() + ") failed " + ex.getMessage());
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
@ -238,12 +457,40 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
result.setResult(errMsg);
callback.complete(result);
return null;
}
private Void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
private void handleCreateVolumeFromSnapshotBothOnStorageSystem(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo, AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCmdAnswer copyCmdAnswer = null;
String errMsg = null;
try {
HostVO hostVO = getHost(snapshotInfo);
boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo);
boolean computeClusterSupportsResign = clusterDao.computeWhetherClusterSupportsResigning(hostVO.getClusterId());
if (usingBackendSnapshot && !computeClusterSupportsResign) {
String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId();
LOGGER.warn(noSupportForResignErrMsg);
throw new CloudRuntimeException(noSupportForResignErrMsg);
}
boolean canStorageSystemCreateVolumeFromVolume = canStorageSystemCreateVolumeFromVolume(snapshotInfo);
boolean useCloning = usingBackendSnapshot || (canStorageSystemCreateVolumeFromVolume && computeClusterSupportsResign);
VolumeDetailVO volumeDetail = null;
if (useCloning) {
volumeDetail = new VolumeDetailVO(volumeInfo.getId(),
"cloneOfSnapshot",
String.valueOf(snapshotInfo.getId()),
false);
volumeDetail = volumeDetailsDao.persist(volumeDetail);
}
// at this point, the snapshotInfo and volumeInfo should have the same disk offering ID (so either one should be OK to get a DiskOfferingVO instance)
DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volumeInfo.getDiskOfferingId());
SnapshotVO snapshot = _snapshotDao.findById(snapshotInfo.getId());
@ -255,72 +502,44 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
VolumeApiResult result = future.get();
if (volumeDetail != null) {
volumeDetailsDao.remove(volumeDetail.getId());
}
if (result.isFailed()) {
s_logger.debug("Failed to create a volume: " + result.getResult());
LOGGER.warn("Failed to create a volume: " + result.getResult());
throw new CloudRuntimeException(result.getResult());
}
}
catch (Exception ex) {
throw new CloudRuntimeException(ex.getMessage());
}
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
volumeInfo.processEvent(Event.MigrationRequested);
volumeInfo.processEvent(Event.MigrationRequested);
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
HostVO hostVO = getHost(snapshotInfo.getDataStore().getId());
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
CopyCmdAnswer copyCmdAnswer = null;
try {
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
Map<String, String> srcDetails = getSnapshotDetails(_storagePoolDao.findById(snapshotInfo.getDataStore().getId()), snapshotInfo);
copyCommand.setOptions(srcDetails);
Map<String, String> destDetails = getVolumeDetails(volumeInfo);
copyCommand.setOptions2(destDetails);
copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
}
catch (Exception ex) {
throw new CloudRuntimeException(ex.getMessage());
}
finally {
try {
_volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
}
catch (Exception ex) {
s_logger.debug(ex.getMessage(), ex);
}
try {
_volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
}
catch (Exception ex) {
s_logger.debug(ex.getMessage(), ex);
}
}
String errMsg = null;
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && copyCmdAnswer.getDetails() != null && !copyCmdAnswer.getDetails().isEmpty()) {
errMsg = copyCmdAnswer.getDetails();
if (useCloning) {
copyCmdAnswer = performResignature(volumeInfo, hostVO);
}
else {
errMsg = "Unable to perform host-side operation";
// asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning
// even when we don't need those hosts to do this kind of copy work
hostVO = getHost(snapshotInfo.getDataCenterId(), false);
copyCmdAnswer = performCopyOfVdi(volumeInfo, snapshotInfo, hostVO);
}
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
errMsg = copyCmdAnswer.getDetails();
}
else {
errMsg = "Unable to create volume from snapshot";
}
}
}
catch (Exception ex) {
errMsg = ex.getMessage() != null ? ex.getMessage() : "Copy operation failed in 'StorageSystemDataMotionStrategy.handleCreateVolumeFromSnapshotBothOnStorageSystem'";
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
@ -328,26 +547,78 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
result.setResult(errMsg);
callback.complete(result);
return null;
}
private Map<String, String> getSnapshotDetails(StoragePoolVO storagePoolVO, SnapshotInfo snapshotInfo) {
Map<String, String> details = new HashMap<String, String>();
/**
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
*
* The resultant volume must be writable because we need to resign the SR and the VDI that should be inside of it before we copy
* the VHD file to secondary storage.
*
* If the storage system is using writable snapshots, then nothing need be done by that storage system here because we can just
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
*/
private void createVolumeFromSnapshot(HostVO hostVO, SnapshotInfo snapshotInfo, boolean keepGrantedAccess) {
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "create");
details.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
details.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
}
finally {
_snapshotDetailsDao.remove(snapshotDetails.getId());
}
long snapshotId = snapshotInfo.getId();
CopyCmdAnswer copyCmdAnswer = performResignature(snapshotInfo, hostVO, keepGrantedAccess);
details.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN));
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
throw new CloudRuntimeException(copyCmdAnswer.getDetails());
}
else {
throw new CloudRuntimeException("Unable to create volume from snapshot");
}
}
}
details.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME));
details.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET));
details.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME));
details.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET));
/**
* If the underlying storage system needed to create a volume from a snapshot for createVolumeFromSnapshot(HostVO, SnapshotInfo), then
* this is its opportunity to delete that temporary volume and restore properties in snapshot_details to the way they were before the
* invocation of createVolumeFromSnapshot(HostVO, SnapshotInfo).
*/
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "tempVolume", "delete");
return details;
try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
}
finally {
_snapshotDetailsDao.remove(snapshotDetails.getId());
}
}
private SnapshotDetailsVO handleSnapshotDetails(long csSnapshotId, String name, String value) {
_snapshotDetailsDao.removeDetail(csSnapshotId, name);
SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false);
return _snapshotDetailsDao.persist(snapshotDetails);
}
private boolean canStorageSystemCreateVolumeFromVolume(SnapshotInfo snapshotInfo) {
boolean supportsCloningVolumeFromVolume = false;
DataStore dataStore = dataStoreMgr.getDataStore(snapshotInfo.getDataStore().getId(), DataStoreRole.Primary);
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
if (mapCapabilities != null) {
String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString());
supportsCloningVolumeFromVolume = Boolean.valueOf(value);
}
return supportsCloningVolumeFromVolume;
}
private String getProperty(long snapshotId, String property) {
@ -361,59 +632,209 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
private Map<String, String> getVolumeDetails(VolumeInfo volumeInfo) {
Map<String, String> sourceDetails = new HashMap<String, String>();
Map<String, String> volumeDetails = new HashMap<String, String>();
VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
long storagePoolId = volumeVO.getPoolId();
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName());
volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName());
ChapInfo chapInfo = _volumeService.getChapInfo(volumeInfo, volumeInfo.getDataStore());
if (chapInfo != null) {
sourceDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
sourceDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
sourceDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
sourceDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
volumeDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
volumeDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
volumeDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
volumeDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
return sourceDetails;
return volumeDetails;
}
public HostVO getHost(long dataStoreId) {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStoreId);
private Map<String, String> getSnapshotDetails(SnapshotInfo snapshotInfo) {
Map<String, String> snapshotDetails = new HashMap<String, String>();
List<? extends Cluster> clusters = _mgr.searchForClusters(storagePoolVO.getDataCenterId(), new Long(0), Long.MAX_VALUE, HypervisorType.XenServer.toString());
long storagePoolId = snapshotInfo.getDataStore().getId();
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
if (clusters == null) {
throw new CloudRuntimeException("Unable to locate an applicable cluster");
}
snapshotDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
snapshotDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
for (Cluster cluster : clusters) {
if (cluster.getAllocationState() == AllocationState.Enabled) {
List<HostVO> hosts = _hostDao.findByClusterId(cluster.getId());
long snapshotId = snapshotInfo.getId();
if (hosts != null) {
for (HostVO host : hosts) {
if (host.getResourceState() == ResourceState.Enabled) {
return host;
}
}
}
snapshotDetails.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN));
snapshotDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME));
snapshotDetails.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET));
snapshotDetails.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME));
snapshotDetails.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET));
return snapshotDetails;
}
private HostVO getHost(SnapshotInfo snapshotInfo) {
HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), true);
if (hostVO == null) {
hostVO = getHost(snapshotInfo.getDataCenterId(), false);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId());
}
}
throw new CloudRuntimeException("Unable to locate an applicable cluster");
return hostVO;
}
private HostVO getHost(Long zoneId, boolean computeClusterMustSupportResign) {
Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null.");
List<HostVO> hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, HypervisorType.XenServer);
if (hosts == null) {
return null;
}
List<Long> clustersToSkip = new ArrayList<>();
Collections.shuffle(hosts, RANDOM);
for (HostVO host : hosts) {
if (computeClusterMustSupportResign) {
long clusterId = host.getClusterId();
if (clustersToSkip.contains(clusterId)) {
continue;
}
if (clusterDao.computeWhetherClusterSupportsResigning(clusterId)) {
return host;
}
else {
clustersToSkip.add(clusterId);
}
}
else {
return host;
}
}
return null;
}
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCommandResult result = new CopyCommandResult(null, null);
result.setResult("Unsupported operation requested for copying data.");
callback.complete(result);
}
private Map<String, String> getDetails(DataObject dataObj) {
if (dataObj instanceof VolumeInfo) {
return getVolumeDetails((VolumeInfo)dataObj);
}
else if (dataObj instanceof SnapshotInfo) {
return getSnapshotDetails((SnapshotInfo)dataObj);
}
throw new CloudRuntimeException("'dataObj' must be of type 'VolumeInfo' or 'SnapshotInfo'.");
}
private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO) {
return performResignature(dataObj, hostVO, false);
}
private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, boolean keepGrantedAccess) {
long storagePoolId = dataObj.getDataStore().getId();
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
Map<String, String> details = getDetails(dataObj);
ResignatureCommand command = new ResignatureCommand(details);
ResignatureAnswer answer = null;
try {
_volumeService.grantAccess(dataObj, hostVO, dataStore);
answer = (ResignatureAnswer)_agentMgr.send(hostVO.getId(), command);
}
catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) {
keepGrantedAccess = false;
String msg = "Failed to resign the DataObject with the following ID: " + dataObj.getId();
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage());
}
finally {
if (keepGrantedAccess == false) {
_volumeService.revokeAccess(dataObj, hostVO, dataStore);
}
}
if (answer == null || !answer.getResult()) {
final String errMsg;
if (answer != null && answer.getDetails() != null && !answer.getDetails().isEmpty()) {
errMsg = answer.getDetails();
}
else {
errMsg = "Unable to perform resignature operation in 'StorageSystemDataMotionStrategy.performResignature'";
}
throw new CloudRuntimeException(errMsg);
}
VolumeObjectTO newVolume = new VolumeObjectTO();
newVolume.setSize(answer.getSize());
newVolume.setPath(answer.getPath());
newVolume.setFormat(answer.getFormat());
return new CopyCmdAnswer(newVolume);
}
private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo, HostVO hostVO) {
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
CopyCmdAnswer copyCmdAnswer = null;
try {
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
copyCommand.setOptions(srcDetails);
Map<String, String> destDetails = getVolumeDetails(volumeInfo);
copyCommand.setOptions2(destDetails);
copyCmdAnswer = (CopyCmdAnswer)_agentMgr.send(hostVO.getId(), copyCommand);
}
catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) {
String msg = "Failed to perform VDI copy : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage());
}
finally {
_volumeService.revokeAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
_volumeService.revokeAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
}
return copyCmdAnswer;
}
}

View File

@ -918,6 +918,25 @@ public class TemplateServiceImpl implements TemplateService {
return copyAsync(srcTemplate, srcTemplate, (DataStore)pool);
}
@Override
public AsyncCallFuture<TemplateApiResult> deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool) {
TemplateObject templateObject = (TemplateObject)_templateFactory.getTemplate(template.getId(), (DataStore)pool);
templateObject.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested);
DataStore dataStore = _storeMgr.getPrimaryDataStore(pool.getId());
AsyncCallFuture<TemplateApiResult> future = new AsyncCallFuture<>();
TemplateOpContext<TemplateApiResult> context = new TemplateOpContext<>(null, templateObject, future);
AsyncCallbackDispatcher<TemplateServiceImpl, CommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().deleteTemplateCallback(null, null)).setContext(context);
dataStore.getDriver().deleteAsync(dataStore, templateObject, caller);
return future;
}
protected Void copyTemplateCallBack(AsyncCallbackDispatcher<TemplateServiceImpl, CopyCommandResult> callback, TemplateOpContext<TemplateApiResult> context) {
TemplateInfo destTemplate = context.getTemplate();
CopyCommandResult result = callback.getResult();

View File

@ -28,7 +28,6 @@ import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
@ -54,6 +53,9 @@ import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.fsm.NoTransitionException;
import com.google.common.base.Strings;
@SuppressWarnings("serial")
public class TemplateObject implements TemplateInfo {
private static final Logger s_logger = Logger.getLogger(TemplateObject.class);
private VMTemplateVO imageVO;
@ -189,12 +191,15 @@ public class TemplateObject implements TemplateInfo {
TemplateObjectTO newTemplate = (TemplateObjectTO)cpyAnswer.getNewData();
VMTemplateStoragePoolVO templatePoolRef = templatePoolDao.findByPoolTemplate(getDataStore().getId(), getId());
templatePoolRef.setDownloadPercent(100);
if (newTemplate.getSize() != null) {
templatePoolRef.setTemplateSize(newTemplate.getSize());
}
setTemplateSizeIfNeeded(newTemplate, templatePoolRef);
templatePoolRef.setDownloadState(Status.DOWNLOADED);
templatePoolRef.setLocalDownloadPath(newTemplate.getPath());
templatePoolRef.setInstallPath(newTemplate.getPath());
setDownloadPathIfNeeded(newTemplate, templatePoolRef);
setInstallPathIfNeeded(newTemplate, templatePoolRef);
templatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
} else if (getDataStore().getRole() == DataStoreRole.Image || getDataStore().getRole() == DataStoreRole.ImageCache) {
@ -243,6 +248,33 @@ public class TemplateObject implements TemplateInfo {
}
}
/**
* In the case of managed storage, the install path may already be specified (by the storage plug-in), so do not overwrite it.
*/
private void setInstallPathIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) {
if (Strings.isNullOrEmpty(templatePoolRef.getInstallPath())) {
templatePoolRef.setInstallPath(template.getPath());
}
}
/**
* In the case of managed storage, the local download path may already be specified (by the storage plug-in), so do not overwrite it.
*/
private void setDownloadPathIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) {
if (Strings.isNullOrEmpty(templatePoolRef.getLocalDownloadPath())) {
templatePoolRef.setLocalDownloadPath(template.getPath());
}
}
/**
* In the case of managed storage, the template size may already be specified (by the storage plug-in), so do not overwrite it.
*/
private void setTemplateSizeIfNeeded(TemplateObjectTO template, VMTemplateStoragePoolVO templatePoolRef) {
if (templatePoolRef.getTemplateSize() == 0 && template.getSize() != null) {
templatePoolRef.setTemplateSize(template.getSize());
}
}
@Override
public void incRefCount() {
if (dataStore == null) {
@ -299,28 +331,17 @@ public class TemplateObject implements TemplateInfo {
@Override
public String getInstallPath() {
if (installPath != null)
if (installPath != null) {
return installPath;
}
if (dataStore == null) {
return null;
}
// managed primary data stores should not have an install path
if (dataStore instanceof PrimaryDataStore) {
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
Map<String, String> details = primaryDataStore.getDetails();
boolean managed = details != null && Boolean.parseBoolean(details.get(PrimaryDataStore.MANAGED));
if (managed) {
return null;
}
}
DataObjectInStore obj = objectInStoreMgr.findObject(this, dataStore);
return obj.getInstallPath();
return obj != null ? obj.getInstallPath() : null;
}
public void setInstallPath(String installPath) {
@ -435,7 +456,7 @@ public class TemplateObject implements TemplateInfo {
}
@Override
public Map getDetails() {
public Map<String, String> getDetails() {
return imageVO.getDetails();
}

View File

@ -28,6 +28,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
@ -44,8 +45,8 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
boolean snapshotResult = true;
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
return null; // To change body of implemented methods, use File | Settings | File Templates.
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@Override
@ -65,8 +66,13 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
@Override
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
return volume.getSize();
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
return dataObject.getSize();
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0L;
}
@Override
@ -90,23 +96,21 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
@Override
public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CommandResult> callback) {
//To change body of implemented methods use File | Settings | File Templates.
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {
}
@Override
public DataTO getTO(DataObject data) {
return null; //To change body of implemented methods use File | Settings | File Templates.
return null;
}
@Override
public DataStoreTO getStoreTO(DataStore store) {
return null; //To change body of implemented methods use File | Settings | File Templates.
return null;
}
@Override
public void createAsync(DataStore store, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
@ -119,22 +123,19 @@ public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
@Override
public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public boolean canCopy(DataObject srcData, DataObject destData) {
return false; //To change body of implemented methods use File | Settings | File Templates.
return false;
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public Map<String, String> getCapabilities() {
// TODO Auto-generated method stub
return null;
}
}

View File

@ -16,14 +16,16 @@
// under the License.
package org.apache.cloudstack.storage.snapshot;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
@ -38,11 +40,15 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.springframework.stereotype.Component;
import com.google.common.base.Optional;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
@ -71,18 +77,18 @@ import com.cloud.vm.dao.VMInstanceDao;
public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
private static final Logger s_logger = Logger.getLogger(StorageSystemSnapshotStrategy.class);
@Inject private AgentManager _agentMgr;
@Inject private DataStoreManager _dataStoreMgr;
@Inject private HostDao _hostDao;
@Inject private ManagementService _mgr;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private SnapshotDao _snapshotDao;
@Inject private SnapshotDataFactory _snapshotDataFactory;
@Inject private SnapshotDataStoreDao _snapshotStoreDao;
@Inject private SnapshotDetailsDao _snapshotDetailsDao;
@Inject private VMInstanceDao _vmInstanceDao;
@Inject private VolumeDao _volumeDao;
@Inject private VolumeService _volService;
@Inject private AgentManager agentMgr;
@Inject private ClusterDao clusterDao;
@Inject private DataStoreManager dataStoreMgr;
@Inject private HostDao hostDao;
@Inject private ManagementService mgr;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private SnapshotDao snapshotDao;
@Inject private SnapshotDataFactory snapshotDataFactory;
@Inject private SnapshotDetailsDao snapshotDetailsDao;
@Inject private VMInstanceDao vmInstanceDao;
@Inject private VolumeDao volumeDao;
@Inject private VolumeService volService;
@Override
public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
@ -91,14 +97,14 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
@Override
public boolean deleteSnapshot(Long snapshotId) {
SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId);
SnapshotVO snapshotVO = snapshotDao.findById(snapshotId);
if (Snapshot.State.Destroyed.equals(snapshotVO.getState())) {
return true;
}
if (Snapshot.State.Error.equals(snapshotVO.getState())) {
_snapshotDao.remove(snapshotId);
snapshotDao.remove(snapshotId);
return true;
}
@ -107,12 +113,12 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the following state: " + snapshotVO.getState());
}
SnapshotObject snapshotObj = (SnapshotObject)_snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
SnapshotObject snapshotObj = (SnapshotObject)snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
if (snapshotObj == null) {
s_logger.debug("Can't find snapshot; deleting it in DB");
_snapshotDao.remove(snapshotId);
snapshotDao.remove(snapshotId);
return true;
}
@ -165,7 +171,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
throw new CloudRuntimeException("Only the " + ImageFormat.VHD.toString() + " image type is currently supported.");
}
SnapshotVO snapshotVO = _snapshotDao.acquireInLockTable(snapshotInfo.getId());
SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId());
if (snapshotVO == null) {
throw new CloudRuntimeException("Failed to acquire lock on the following snapshot: " + snapshotInfo.getId());
@ -176,7 +182,24 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
try {
volumeInfo.stateTransit(Volume.Event.SnapshotRequested);
// tell the storage driver to create a back-end volume (eventually used to create a new SR on and to copy the VM snapshot VDI to)
// only XenServer is currently supported
HostVO hostVO = getHost(volumeInfo.getId());
boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId());
boolean computeClusterSupportsResign = clusterDao.computeWhetherClusterSupportsResigning(hostVO.getClusterId());
// if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign, then take a back-end snapshot or create a back-end clone;
// else, just create a new back-end volume (eventually used to create a new SR on and to copy a VDI to)
if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign) {
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(),
"takeSnapshot",
Boolean.TRUE.toString(),
false);
snapshotDetailsDao.persist(snapshotDetail);
}
result = snapshotSvr.takeSnapshot(snapshotInfo);
if (result.isFailed()) {
@ -185,9 +208,9 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
throw new CloudRuntimeException(result.getResult());
}
// send a command to XenServer to create a VM snapshot on the applicable SR (get back the VDI UUID of the VM snapshot)
performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo);
if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) {
performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo);
}
markAsBackedUp((SnapshotObject)result.getSnashot());
}
@ -199,19 +222,35 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
volumeInfo.stateTransit(Volume.Event.OperationFailed);
}
_snapshotDao.releaseFromLockTable(snapshotInfo.getId());
snapshotDao.releaseFromLockTable(snapshotInfo.getId());
}
return snapshotInfo;
}
private boolean canStorageSystemCreateVolumeFromSnapshot(long storagePoolId) {
boolean supportsCloningVolumeFromSnapshot = false;
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
if (mapCapabilities != null) {
String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString());
supportsCloningVolumeFromSnapshot = Boolean.valueOf(value);
}
return supportsCloningVolumeFromSnapshot;
}
private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo) {
Map<String, String> sourceDetails = null;
VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
Long vmInstanceId = volumeVO.getInstanceId();
VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vmInstanceId);
VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstanceId);
Long hostId = null;
@ -233,11 +272,30 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
sourceDetails = getSourceDetails(volumeInfo);
}
HostVO hostVO = getHost(hostId, volumeVO);
HostVO hostVO = null;
if (hostId != null) {
hostVO = hostDao.findById(hostId);
}
else {
Optional<HostVO> optHostVO = getHost(volumeInfo.getDataCenterId(), false);
if (optHostVO.isPresent()) {
hostVO = optHostVO.get();
}
}
if (hostVO == null) {
final String errMsg = "Unable to locate an applicable host";
s_logger.error("performSnapshotAndCopyOnHostSide: " + errMsg);
throw new CloudRuntimeException(errMsg);
}
long storagePoolId = volumeVO.getPoolId();
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
DataStore dataStore = _dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId);
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
Map<String, String> destDetails = getDestDetails(storagePoolVO, snapshotInfo);
@ -248,23 +306,23 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
try {
// if sourceDetails != null, we need to connect the host(s) to the volume
if (sourceDetails != null) {
_volService.grantAccess(volumeInfo, hostVO, dataStore);
volService.grantAccess(volumeInfo, hostVO, dataStore);
}
_volService.grantAccess(snapshotInfo, hostVO, dataStore);
volService.grantAccess(snapshotInfo, hostVO, dataStore);
snapshotAndCopyAnswer = (SnapshotAndCopyAnswer)_agentMgr.send(hostVO.getId(), snapshotAndCopyCommand);
snapshotAndCopyAnswer = (SnapshotAndCopyAnswer)agentMgr.send(hostVO.getId(), snapshotAndCopyCommand);
}
catch (Exception ex) {
throw new CloudRuntimeException(ex.getMessage());
}
finally {
try {
_volService.revokeAccess(snapshotInfo, hostVO, dataStore);
volService.revokeAccess(snapshotInfo, hostVO, dataStore);
// if sourceDetails != null, we need to disconnect the host(s) from the volume
if (sourceDetails != null) {
_volService.revokeAccess(volumeInfo, hostVO, dataStore);
volService.revokeAccess(volumeInfo, hostVO, dataStore);
}
}
catch (Exception ex) {
@ -292,22 +350,22 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
path,
false);
_snapshotDetailsDao.persist(snapshotDetail);
snapshotDetailsDao.persist(snapshotDetail);
}
private Map<String, String> getSourceDetails(VolumeInfo volumeInfo) {
Map<String, String> sourceDetails = new HashMap<String, String>();
Map<String, String> sourceDetails = new HashMap<>();
VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
long storagePoolId = volumeVO.getPoolId();
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId);
sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName());
ChapInfo chapInfo = _volService.getChapInfo(volumeInfo, volumeInfo.getDataStore());
ChapInfo chapInfo = volService.getChapInfo(volumeInfo, volumeInfo.getDataStore());
if (chapInfo != null) {
sourceDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
@ -320,7 +378,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
}
private Map<String, String> getDestDetails(StoragePoolVO storagePoolVO, SnapshotInfo snapshotInfo) {
Map<String, String> destDetails = new HashMap<String, String>();
Map<String, String> destDetails = new HashMap<>();
destDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
destDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
@ -338,7 +396,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
}
private String getProperty(long snapshotId, String property) {
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, property);
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, property);
if (snapshotDetails != null) {
return snapshotDetails.getValue();
@ -347,38 +405,87 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
return null;
}
private HostVO getHost(Long hostId, VolumeVO volumeVO) {
HostVO hostVO = _hostDao.findById(hostId);
private HostVO getHost(long volumeId) {
VolumeVO volumeVO = volumeDao.findById(volumeId);
Long vmInstanceId = volumeVO.getInstanceId();
VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstanceId);
Long hostId = null;
// if the volume to snapshot is associated with a VM
if (vmInstanceVO != null) {
hostId = vmInstanceVO.getHostId();
// if the VM is not associated with a host
if (hostId == null) {
hostId = vmInstanceVO.getLastHostId();
}
}
return getHost(volumeVO.getDataCenterId(), hostId);
}
private HostVO getHost(long zoneId, Long hostId) {
Optional<HostVO> optHostVO = getHost(zoneId, true);
if (optHostVO.isPresent()) {
return optHostVO.get();
}
HostVO hostVO = hostDao.findById(hostId);
if (hostVO != null) {
return hostVO;
}
// pick a host in any XenServer cluster that's in the applicable zone
optHostVO = getHost(zoneId, false);
long zoneId = volumeVO.getDataCenterId();
List<? extends Cluster> clusters = _mgr.searchForClusters(zoneId, new Long(0), Long.MAX_VALUE, HypervisorType.XenServer.toString());
if (clusters == null) {
throw new CloudRuntimeException("Unable to locate an applicable cluster");
if (optHostVO.isPresent()) {
return optHostVO.get();
}
throw new CloudRuntimeException("Unable to locate an applicable host");
}
private Optional<HostVO> getHost(long zoneId, boolean computeClusterMustSupportResign) {
List<? extends Cluster> clusters = mgr.searchForClusters(zoneId, 0L, Long.MAX_VALUE, HypervisorType.XenServer.toString());
if (clusters == null) {
clusters = new ArrayList<>();
}
Collections.shuffle(clusters, new Random(System.nanoTime()));
clusters:
for (Cluster cluster : clusters) {
if (cluster.getAllocationState() == AllocationState.Enabled) {
List<HostVO> hosts = _hostDao.findByClusterId(cluster.getId());
List<HostVO> hosts = hostDao.findByClusterId(cluster.getId());
if (hosts != null) {
Collections.shuffle(hosts, new Random(System.nanoTime()));
for (HostVO host : hosts) {
if (host.getResourceState() == ResourceState.Enabled) {
return host;
if (computeClusterMustSupportResign) {
if (clusterDao.computeWhetherClusterSupportsResigning(cluster.getId())) {
return Optional.of(host);
}
else {
// no other host in the cluster in question should be able to satisfy our requirements here, so move on to the next cluster
continue clusters;
}
}
else {
return Optional.of(host);
}
}
}
}
}
}
throw new CloudRuntimeException("Unable to locate an applicable cluster");
return Optional.absent();
}
private void markAsBackedUp(SnapshotObject snapshotObj) {
@ -406,18 +513,18 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
long volumeId = snapshot.getVolumeId();
VolumeVO volumeVO = _volumeDao.findByIdIncludingRemoved(volumeId);
VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId);
long storagePoolId = volumeVO.getPoolId();
DataStore dataStore = _dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
if (dataStore != null) {
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
if (mapCapabilities != null) {
String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
Boolean supportsStorageSystemSnapshots = new Boolean(value);
Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value);
if (supportsStorageSystemSnapshots) {
return StrategyPriority.HIGHEST;

View File

@ -78,6 +78,8 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
VolumeDao volumeDao;
@Inject
SnapshotDataFactory snapshotDataFactory;
@Inject
private SnapshotDao _snapshotDao;
@Override
public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) {
@ -289,7 +291,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
@Override
public boolean revertSnapshot(SnapshotInfo snapshot) {
if (canHandle(snapshot,SnapshotOperation.REVERT) == StrategyPriority.CANT_HANDLE) {
throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
throw new CloudRuntimeException("Reverting not supported. Create a template or volume based on the snapshot instead.");
}
SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId());

View File

@ -223,7 +223,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
Volume volume = _volumeDao.findById(dskCh.getVolumeId());
List<Volume> requestVolumes = new ArrayList<Volume>();
requestVolumes.add(volume);
return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId());
}
/*

View File

@ -25,6 +25,11 @@
<artifactId>cloud-engine-storage</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-image</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>

View File

@ -26,6 +26,7 @@ import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -65,6 +66,7 @@ import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.storage.encoding.EncodingType;
@SuppressWarnings("serial")
public class PrimaryDataStoreImpl implements PrimaryDataStore {
private static final Logger s_logger = Logger.getLogger(PrimaryDataStoreImpl.class);
@ -239,10 +241,34 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore {
return pdsv.isManaged();
}
private boolean canCloneVolume() {
return Boolean.valueOf(getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()));
}
/**
* The parameter createEntryInTempSpoolRef in the overloaded create(DataObject, boolean) method only applies to managed storage. We pass
* in "true" here.
*
* In the case of managed storage that can create a volume from a volume (clone), if the DataObject passed in is a TemplateInfo,
* we do want to create an entry in the cloud.template_spool_ref table (so that multiple uses of the template can be leveraged from
* the one copy on managed storage).
*
* In cases where UUID resigning is not available, then the code calling "create" should invoke the overloaded "create" method whose second
* parameter is a boolean. This code can pass in "false" so that an entry in the cloud.template_spool_ref table is not created (no template to share
* on the primary storage).
*/
@Override
public DataObject create(DataObject obj) {
public DataObject create(DataObject dataObject) {
return create(dataObject, true);
}
/**
* Please read the comment for the create(DataObject) method if you are planning on passing in "false" for createEntryInTempSpoolRef.
*/
@Override
public DataObject create(DataObject obj, boolean createEntryInTempSpoolRef) {
// create template on primary storage
if (obj.getType() == DataObjectType.TEMPLATE && !isManaged()) {
if (obj.getType() == DataObjectType.TEMPLATE && (!isManaged() || (createEntryInTempSpoolRef && canCloneVolume()))) {
try {
String templateIdPoolIdString = "templateId:" + obj.getId() + "poolId:" + getId();
VMTemplateStoragePoolVO templateStoragePoolRef;

View File

@ -19,14 +19,21 @@
package org.apache.cloudstack.storage.volume;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import javax.inject.Inject;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.offering.DiskOffering;
import com.cloud.org.Cluster;
import com.cloud.org.Grouping.AllocationState;
import com.cloud.resource.ResourceState;
import com.cloud.server.ManagementService;
import com.cloud.storage.RegisterVolumePayload;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
@ -36,6 +43,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
@ -59,8 +67,10 @@ import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.image.store.TemplateObject;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.log4j.Logger;
@ -80,7 +90,9 @@ import com.cloud.event.UsageEventUtils;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
@ -134,6 +146,14 @@ public class VolumeServiceImpl implements VolumeService {
EndPointSelector _epSelector;
@Inject
HostDao _hostDao;
@Inject
private PrimaryDataStoreDao storagePoolDao;
@Inject
private HostDetailsDao hostDetailsDao;
@Inject
private ManagementService mgr;
@Inject
private ClusterDao clusterDao;
public VolumeServiceImpl() {
}
@ -160,11 +180,11 @@ public class VolumeServiceImpl implements VolumeService {
}
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo, DataStore dataStore) {
public ChapInfo getChapInfo(DataObject dataObject, DataStore dataStore) {
DataStoreDriver dataStoreDriver = dataStore.getDriver();
if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(volumeInfo);
return ((PrimaryDataStoreDriver)dataStoreDriver).getChapInfo(dataObject);
}
return null;
@ -554,6 +574,49 @@ public class VolumeServiceImpl implements VolumeService {
return null;
}
protected Void createManagedTemplateImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback, CreateVolumeContext<CreateCmdResult> context) {
CreateCmdResult result = callback.getResult();
VolumeApiResult res = new VolumeApiResult(null);
res.setResult(result.getResult());
AsyncCallFuture<VolumeApiResult> future = context.getFuture();
DataObject templateOnPrimaryStoreObj = context.getVolume();
if (result.isSuccess()) {
((TemplateObject)templateOnPrimaryStoreObj).setInstallPath(result.getPath());
templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer());
}
else {
templateOnPrimaryStoreObj.processEvent(Event.OperationFailed);
}
future.complete(res);
return null;
}
protected Void copyManagedTemplateCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CreateBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
VolumeApiResult res = new VolumeApiResult(context.getVolume());
res.setResult(result.getResult());
AsyncCallFuture<VolumeApiResult> future = context.getFuture();
DataObject templateOnPrimaryStoreObj = context.destObj;
if (result.isSuccess()) {
templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer());
}
else {
templateOnPrimaryStoreObj.processEvent(Event.OperationFailed);
}
future.complete(res);
return null;
}
@DB
protected Void copyBaseImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CreateBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
@ -636,8 +699,10 @@ public class VolumeServiceImpl implements VolumeService {
if (templatePoolRef == null) {
s_logger.warn("Reset Template State On Pool failed - unable to lock TemplatePoolRef " + templatePoolRefId);
} else {
templatePoolRef.setTemplateSize(0);
templatePoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED);
templatePoolRef.setState(ObjectInDataStoreStateMachine.State.Allocated);
_tmpltPoolDao.update(templatePoolRefId, templatePoolRef);
}
}finally {
@ -653,50 +718,132 @@ public class VolumeServiceImpl implements VolumeService {
return null;
}
@Override
public AsyncCallFuture<VolumeApiResult> createManagedStorageAndVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
TemplateInfo srcTemplateInfo, long destHostId) {
PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId);
TemplateInfo destTemplateInfo = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo);
Host destHost = _hostDao.findById(destHostId);
/**
* Creates a template volume on managed storage, which will be used for creating ROOT volumes by cloning.
*
* @param srcTemplateInfo Source template on secondary storage
* @param destPrimaryDataStore Managed storage on which we need to create the volume
*/
private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, PrimaryDataStore destPrimaryDataStore) {
// create a template volume on primary storage
AsyncCallFuture<VolumeApiResult> createTemplateFuture = new AsyncCallFuture<>();
TemplateInfo templateOnPrimary = (TemplateInfo)destPrimaryDataStore.create(srcTemplateInfo);
if (destHost == null) {
throw new CloudRuntimeException("Destinatin host should not be null.");
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId());
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId());
}
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
// At this point, we have an entry in the DB that points to our cached template.
// We need to lock it as there may be other VMs that may get started using the same template.
// We want to avoid having to create multiple cache copies of the same template.
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
long templatePoolRefId = templatePoolRef.getId();
templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds);
if (templatePoolRef == null) {
throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId);
}
// Template already exists
if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
return templateOnPrimary;
}
try {
// must call driver to have a volume created
AsyncCallFuture<VolumeApiResult> createVolumeFuture = createVolumeAsync(volumeInfo, destPrimaryDataStore);
// create a cache volume on the back-end
VolumeApiResult createVolumeResult = createVolumeFuture.get();
templateOnPrimary.processEvent(Event.CreateOnlyRequested);
if (createVolumeResult.isFailed()) {
throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult());
CreateVolumeContext<CreateCmdResult> createContext = new CreateVolumeContext<>(null, templateOnPrimary, createTemplateFuture);
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> createCaller = AsyncCallbackDispatcher.create(this);
createCaller.setCallback(createCaller.getTarget().createManagedTemplateImageCallback(null, null)).setContext(createContext);
destPrimaryDataStore.getDriver().createAsync(destPrimaryDataStore, templateOnPrimary, createCaller);
VolumeApiResult result = createTemplateFuture.get();
if (result.isFailed()) {
String errMesg = result.getResult();
throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() +
" on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg);
}
} catch (Throwable e) {
s_logger.debug("Failed to create template volume on storage", e);
// refresh the volume from the DB
volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore);
templateOnPrimary.processEvent(Event.OperationFailed);
grantAccess(volumeInfo, destHost, destPrimaryDataStore);
throw new CloudRuntimeException(e.getMessage());
}
finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<CreateCmdResult>(null, volumeInfo,
destPrimaryDataStore, srcTemplateInfo, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context);
return templateOnPrimary;
}
/**
* This function copies a template from secondary storage to a template volume
* created on managed storage. This template volume will be used as a cache.
* Instead of copying the template to a ROOT volume every time, a clone is performed instead.
*
* @param srcTemplateInfo Source from which to copy the template
* @param templateOnPrimary Dest to copy to
* @param templatePoolRef Template reference on primary storage (entry in the template_spool_ref)
* @param destPrimaryDataStore The managed primary storage
* @param destHost The host that we will use for the copy
*/
private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef,
PrimaryDataStore destPrimaryDataStore, Host destHost)
{
AsyncCallFuture<VolumeApiResult> copyTemplateFuture = new AsyncCallFuture<>();
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
long templatePoolRefId = templatePoolRef.getId();
templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds);
if (templatePoolRef == null) {
throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId);
}
if (templatePoolRef.getDownloadState() == Status.DOWNLOADED) {
// There can be cases where we acquired the lock, but the template
// was already copied by a previous thread. Just return in that case.
s_logger.debug("Template already downloaded, nothing to do");
return;
}
try {
// copy the template from sec storage to the created volume
CreateBaseImageContext<CreateCmdResult> copyContext = new CreateBaseImageContext<>(
null, null, destPrimaryDataStore, srcTemplateInfo,
copyTemplateFuture, templateOnPrimary, templatePoolRefId
);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> copyCaller = AsyncCallbackDispatcher.create(this);
copyCaller.setCallback(copyCaller.getTarget().copyManagedTemplateCallback(null, null)).setContext(copyContext);
// Populate details which will be later read by the storage subsystem.
Map<String, String> details = new HashMap<String, String>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
// for managed storage, the storage repository (XenServer) or datastore (ESX) name is based off of the iScsiName property of a volume
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName());
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName());
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize()));
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, ((TemplateObject)templateOnPrimary).getInstallPath());
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName());
details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString());
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize()));
ChapInfo chapInfo = getChapInfo(volumeInfo, destPrimaryDataStore);
ChapInfo chapInfo = getChapInfo(templateOnPrimary, destPrimaryDataStore);
if (chapInfo != null) {
details.put(PrimaryDataStore.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
@ -705,17 +852,142 @@ public class VolumeServiceImpl implements VolumeService {
details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
templateOnPrimary.processEvent(Event.CopyingRequested);
destPrimaryDataStore.setDetails(details);
motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller);
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
VolumeApiResult result = null;
try {
motionSrv.copyAsync(srcTemplateInfo, templateOnPrimary, destHost, copyCaller);
result = copyTemplateFuture.get();
}
finally {
revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore);
}
if (result.isFailed()) {
throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() +
" to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult());
// XXX: I find it is useful to destroy the volume on primary storage instead of another thread trying the copy again because I've seen
// something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail).
// For now, I just retry the copy.
}
}
catch (Throwable t) {
catch (Throwable e) {
s_logger.debug("Failed to create a template on primary storage", e);
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
}
finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
}
/**
* Clones the template volume on managed storage to the ROOT volume
*
* @param volumeInfo ROOT volume to create
* @param templateOnPrimary Template from which to clone the ROOT volume
* @param destPrimaryDataStore Primary storage of the volume
* @param future For async
*/
private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, TemplateInfo templateOnPrimary, PrimaryDataStore destPrimaryDataStore,
AsyncCallFuture<VolumeApiResult> future) {
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId());
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " + templateOnPrimary.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId());
}
//XXX: not sure if this the right thing to do here. We can always fallback to the "copy from sec storage"
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
throw new CloudRuntimeException("Template " + templateOnPrimary.getUniqueName() + " has not been downloaded to primary storage.");
}
try {
volumeInfo.processEvent(Event.CreateOnlyRequested);
CreateVolumeFromBaseImageContext<VolumeApiResult> context =
new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, templateOnPrimary, future, null);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null));
caller.setContext(context);
motionSrv.copyAsync(templateOnPrimary, volumeInfo, caller);
} catch (Throwable e) {
s_logger.debug("Failed to clone template on primary storage", e);
volumeInfo.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
}
}
private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost,
AsyncCallFuture<VolumeApiResult> future) {
try {
// Create a volume on managed storage.
TemplateInfo destTemplateInfo = (TemplateInfo)primaryDataStore.create(srcTemplateInfo, false);
AsyncCallFuture<VolumeApiResult> createVolumeFuture = createVolumeAsync(volumeInfo, primaryDataStore);
VolumeApiResult createVolumeResult = createVolumeFuture.get();
if (createVolumeResult.isFailed()) {
throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult());
}
// Refresh the volume info from the DB.
volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore);
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<CreateCmdResult>(null, volumeInfo,
primaryDataStore, srcTemplateInfo, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context);
Map<String, String> details = new HashMap<String, String>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, primaryDataStore.getHostAddress());
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(primaryDataStore.getPort()));
// for managed storage, the storage repository (XenServer) or datastore (ESX) name is based off of the iScsiName property of a volume
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName());
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName());
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize()));
ChapInfo chapInfo = getChapInfo(volumeInfo, primaryDataStore);
if (chapInfo != null) {
details.put(PrimaryDataStore.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
details.put(PrimaryDataStore.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
details.put(PrimaryDataStore.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
primaryDataStore.setDetails(details);
grantAccess(volumeInfo, destHost, primaryDataStore);
try {
motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller);
}
finally {
revokeAccess(volumeInfo, destHost, primaryDataStore);
}
} catch (Throwable t) {
String errMsg = t.toString();
volumeInfo.processEvent(Event.DestroyRequested);
revokeAccess(volumeInfo, destHost, destPrimaryDataStore);
try {
AsyncCallFuture<VolumeApiResult> expungeVolumeFuture = expungeVolumeAsync(volumeInfo);
@ -735,10 +1007,112 @@ public class VolumeServiceImpl implements VolumeService {
future.complete(result);
}
}
@Override
public AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
TemplateInfo srcTemplateInfo, long destHostId) {
PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId);
Host destHost = _hostDao.findById(destHostId);
if (destHost == null) {
throw new CloudRuntimeException("Destination host should not be null.");
}
Boolean storageCanCloneVolume = new Boolean(
destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString())
);
boolean computeZoneSupportsResign = computeZoneSupportsResign(destHost.getDataCenterId(), destHost.getHypervisorType());
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
if (storageCanCloneVolume && computeZoneSupportsResign) {
s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and host cluster can perform UUID resigning.");
TemplateInfo templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId());
if (templateOnPrimary == null) {
templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore);
if (templateOnPrimary == null) {
throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId);
}
}
// Copy the template to the template volume.
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId());
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " +
srcTemplateInfo.getUniqueName() + " in storage pool " +
destPrimaryDataStore.getId()
);
}
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost);
}
// We have a template on primary storage. Clone it to new volume.
s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId);
createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future);
} else {
s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally");
createManagedVolumeCopyTemplateAsync(volumeInfo, destPrimaryDataStore, srcTemplateInfo, destHost, future);
}
return future;
}
private boolean computeZoneSupportsResign(long zoneId, HypervisorType hypervisorType) {
return getHost(zoneId, hypervisorType, true) != null;
}
private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
if (zoneId == null) {
throw new CloudRuntimeException("Zone ID cannot be null.");
}
List<? extends Cluster> clusters = mgr.searchForClusters(zoneId, new Long(0), Long.MAX_VALUE, hypervisorType.toString());
if (clusters == null) {
clusters = new ArrayList<>();
}
Collections.shuffle(clusters, new Random(System.nanoTime()));
clusters:
for (Cluster cluster : clusters) {
if (cluster.getAllocationState() == AllocationState.Enabled) {
List<HostVO> hosts = _hostDao.findByClusterId(cluster.getId());
if (hosts != null) {
Collections.shuffle(hosts, new Random(System.nanoTime()));
for (HostVO host : hosts) {
if (host.getResourceState() == ResourceState.Enabled) {
if (computeClusterMustSupportResign) {
if (clusterDao.computeWhetherClusterSupportsResigning(cluster.getId())) {
return host;
}
else {
// no other host in the cluster in question should be able to satisfy our requirements here, so move on to the next cluster
continue clusters;
}
}
else {
return host;
}
}
}
}
}
}
return null;
}
@DB
@Override
public AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template) {
@ -1332,7 +1706,8 @@ public class VolumeServiceImpl implements VolumeService {
if (ep != null) {
VolumeVO volume = volDao.findById(volumeId);
PrimaryDataStore primaryDataStore = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId());
ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore), volume.getSize(), newSize, true, instanceName);
ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore),
volume.getSize(), newSize, true, instanceName, primaryDataStore.isManaged(), volume.get_iScsiName());
answer = ep.sendMessage(resizeCmd);
} else {

View File

@ -19,7 +19,7 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-api-solidfire-intg-test</artifactId>
<name>Apache CloudStack Plugin - API SolidFire</name>
<name>Apache CloudStack Plugin - API SolidFire Integration Testing</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>

View File

@ -27,6 +27,8 @@
http://www.springframework.org/schema/context/spring-context-3.0.xsd"
>
<bean id="apiSolidFireServiceImpl" class="org.apache.cloudstack.solidfire.ApiSolidFireServiceImpl"/>
<bean id="sfIntgTestUtil" class="org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil"/>
<bean id="solidFireIntegrationTestManagerImpl" class="org.apache.cloudstack.solidfire.SolidFireIntegrationTestManagerImpl"/>
<bean id="apiSolidFireIntegrationTestServiceImpl" class="org.apache.cloudstack.api.solidfire.ApiSolidFireIntegrationTestServiceImpl" />
</beans>

View File

@ -0,0 +1,67 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.solidfire;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.solidfire.ApiPathForVolumeResponse;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getPathForVolume", responseObject = ApiPathForVolumeResponse.class, description = "Get the path associated with the provided volume UUID",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetPathForVolumeCmd extends BaseCmd {
private static final Logger LOGGER = Logger.getLogger(GetPathForVolumeCmd.class.getName());
private static final String NAME = "getpathforvolumeresponse";
@Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true)
private String _volumeUuid;
@Inject private SolidFireIntegrationTestUtil _util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return NAME;
}
@Override
public long getEntityOwnerId() {
return _util.getAccountIdForVolumeUuid(_volumeUuid);
}
@Override
public void execute() {
LOGGER.info("'GetPathForVolumeIdCmd.execute' method invoked");
String pathForVolume = _util.getPathForVolumeUuid(_volumeUuid);
ApiPathForVolumeResponse response = new ApiPathForVolumeResponse(pathForVolume);
response.setResponseName(getCommandName());
response.setObjectName("apipathforvolume");
setResponseObject(response);
}
}

View File

@ -14,10 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.solidfire;
import com.cloud.user.Account;
import com.cloud.user.dao.AccountDao;
package org.apache.cloudstack.api.command.admin.solidfire;
import javax.inject.Inject;
@ -27,26 +24,23 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.solidfire.ApiSolidFireService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.api.response.solidfire.ApiSolidFireAccountIdResponse;
import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getSolidFireAccountId", responseObject = ApiSolidFireAccountIdResponse.class, description = "Get SolidFire Account ID",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetSolidFireAccountIdCmd extends BaseCmd {
private static final Logger s_logger = Logger.getLogger(GetSolidFireAccountIdCmd.class.getName());
private static final String s_name = "getsolidfireaccountidresponse";
private static final Logger LOGGER = Logger.getLogger(GetSolidFireAccountIdCmd.class.getName());
private static final String NAME = "getsolidfireaccountidresponse";
@Parameter(name = ApiConstants.ACCOUNT_ID, type = CommandType.STRING, description = "CloudStack Account UUID", required = true)
private String accountUuid;
private String csAccountUuid;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true)
private String storagePoolUuid;
@Inject private ApiSolidFireService _apiSolidFireService;
@Inject private AccountDao _accountDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private SolidFireIntegrationTestManager manager;
@Inject private SolidFireIntegrationTestUtil util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@ -54,26 +48,21 @@ public class GetSolidFireAccountIdCmd extends BaseCmd {
@Override
public String getCommandName() {
return s_name;
return NAME;
}
@Override
public long getEntityOwnerId() {
Account account = CallContext.current().getCallingAccount();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
return util.getAccountIdForAccountUuid(csAccountUuid);
}
@Override
public void execute() {
Account account = _accountDao.findByUuid(accountUuid);
StoragePoolVO storagePool = _storagePoolDao.findByUuid(storagePoolUuid);
LOGGER.info("'GetSolidFireAccountIdCmd.execute' method invoked");
ApiSolidFireAccountIdResponse response = _apiSolidFireService.getSolidFireAccountId(account.getId(), storagePool.getId());
long sfAccountId = manager.getSolidFireAccountId(csAccountUuid, storagePoolUuid);
ApiSolidFireAccountIdResponse response = new ApiSolidFireAccountIdResponse(sfAccountId);
response.setResponseName(getCommandName());
response.setObjectName("apisolidfireaccountid");

View File

@ -14,12 +14,9 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.solidfire;
package org.apache.cloudstack.api.command.admin.solidfire;
import com.cloud.user.Account;
import com.cloud.org.Cluster;
import com.cloud.storage.StoragePool;
import com.cloud.dc.dao.ClusterDao;
import javax.inject.Inject;
@ -29,25 +26,24 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse;
import org.apache.cloudstack.api.response.solidfire.ApiSolidFireVolumeAccessGroupIdResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.solidfire.ApiSolidFireService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getSolidFireVolumeAccessGroupId", responseObject = ApiSolidFireVolumeAccessGroupIdResponse.class, description = "Get the SF Volume Access Group ID",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd {
private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeAccessGroupIdCmd.class.getName());
private static final String s_name = "getsolidfirevolumeaccessgroupidresponse";
private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeAccessGroupIdCmd.class.getName());
private static final String NAME = "getsolidfirevolumeaccessgroupidresponse";
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.STRING, description = "Cluster UUID", required = true)
private String clusterUuid;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true)
private String storagePoolUuid;
@Inject private ApiSolidFireService _apiSolidFireService;
@Inject private ClusterDao _clusterDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private SolidFireIntegrationTestManager manager;
@Inject private SolidFireIntegrationTestUtil util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@ -55,7 +51,7 @@ public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd {
@Override
public String getCommandName() {
return s_name;
return NAME;
}
@Override
@ -71,10 +67,11 @@ public class GetSolidFireVolumeAccessGroupIdCmd extends BaseCmd {
@Override
public void execute() {
Cluster cluster = _clusterDao.findByUuid(clusterUuid);
StoragePool storagePool = _storagePoolDao.findByUuid(storagePoolUuid);
LOGGER.info("'GetSolidFireVolumeAccessGroupIdCmd.execute' method invoked");
ApiSolidFireVolumeAccessGroupIdResponse response = _apiSolidFireService.getSolidFireVolumeAccessGroupId(cluster.getId(), storagePool.getId());
long sfVagId = manager.getSolidFireVolumeAccessGroupId(clusterUuid, storagePoolUuid);
ApiSolidFireVolumeAccessGroupIdResponse response = new ApiSolidFireVolumeAccessGroupIdResponse(sfVagId);
response.setResponseName(getCommandName());
response.setObjectName("apisolidfirevolumeaccessgroupid");

View File

@ -14,12 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.solidfire;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.StoragePool;
package org.apache.cloudstack.api.command.admin.solidfire;
import javax.inject.Inject;
@ -29,25 +24,21 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.solidfire.ApiSolidFireService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.api.response.solidfire.ApiSolidFireVolumeSizeResponse;
import org.apache.cloudstack.solidfire.SolidFireIntegrationTestManager;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getSolidFireVolumeSize", responseObject = ApiSolidFireVolumeSizeResponse.class, description = "Get the SF volume size including Hypervisor Snapshot Reserve",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetSolidFireVolumeSizeCmd extends BaseCmd {
private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeSizeCmd.class.getName());
private static final String s_name = "getsolidfirevolumesizeresponse";
private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeSizeCmd.class.getName());
private static final String NAME = "getsolidfirevolumesizeresponse";
@Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "Volume UUID", required = true)
private String volumeUuid;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.STRING, description = "Storage Pool UUID", required = true)
private String storagePoolUuid;
@Inject private ApiSolidFireService _apiSolidFireService;
@Inject private VolumeDao _volumeDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private SolidFireIntegrationTestManager manager;
@Inject private SolidFireIntegrationTestUtil util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@ -55,26 +46,21 @@ public class GetSolidFireVolumeSizeCmd extends BaseCmd {
@Override
public String getCommandName() {
return s_name;
return NAME;
}
@Override
public long getEntityOwnerId() {
Account account = CallContext.current().getCallingAccount();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
return util.getAccountIdForVolumeUuid(volumeUuid);
}
@Override
public void execute() {
Volume volume = _volumeDao.findByUuid(volumeUuid);
StoragePool storagePool = _storagePoolDao.findByUuid(storagePoolUuid);
LOGGER.info("'GetSolidFireVolumeSizeCmd.execute' method invoked");
ApiSolidFireVolumeSizeResponse response = _apiSolidFireService.getSolidFireVolumeSize(volume, storagePool);
long sfVolumeSize = manager.getSolidFireVolumeSize(volumeUuid);
ApiSolidFireVolumeSizeResponse response = new ApiSolidFireVolumeSizeResponse(sfVolumeSize);
response.setResponseName(getCommandName());
response.setObjectName("apisolidfirevolumesize");

View File

@ -0,0 +1,73 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.solidfire;
import java.util.List;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse;
import org.apache.cloudstack.api.response.solidfire.ApiVolumeiScsiNameResponse;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getVolumeSnapshotDetails", responseObject = ApiVolumeiScsiNameResponse.class, description = "Get Volume Snapshot Details",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetVolumeSnapshotDetailsCmd extends BaseCmd {
private static final Logger LOGGER = Logger.getLogger(GetVolumeSnapshotDetailsCmd.class.getName());
private static final String NAME = "getvolumesnapshotdetailsresponse";
@Parameter(name = ApiConstants.SNAPSHOT_ID, type = CommandType.STRING, description = "CloudStack Snapshot UUID", required = true)
private String snapshotUuid;
@Inject private SolidFireIntegrationTestUtil util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return NAME;
}
@Override
public long getEntityOwnerId() {
return util.getAccountIdForSnapshotUuid(snapshotUuid);
}
@Override
public void execute() {
LOGGER.info("'" + GetVolumeSnapshotDetailsCmd.class.getSimpleName() + ".execute' method invoked");
List<ApiVolumeSnapshotDetailsResponse> responses = util.getSnapshotDetails(snapshotUuid);
ListResponse<ApiVolumeSnapshotDetailsResponse> listReponse = new ListResponse<>();
listReponse.setResponses(responses);
listReponse.setResponseName(getCommandName());
listReponse.setObjectName("apivolumesnapshotdetails");
this.setResponseObject(listReponse);
}
}

View File

@ -14,11 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.solidfire;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
package org.apache.cloudstack.api.command.admin.solidfire;
import javax.inject.Inject;
@ -27,22 +23,20 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.solidfire.ApiSolidFireService;
import org.apache.cloudstack.api.response.solidfire.ApiVolumeiScsiNameResponse;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
@APICommand(name = "getSolidFireVolumeIscsiName", responseObject = ApiSolidFireVolumeIscsiNameResponse.class, description = "Get SolidFire Volume's Iscsi Name",
@APICommand(name = "getVolumeiScsiName", responseObject = ApiVolumeiScsiNameResponse.class, description = "Get Volume's iSCSI Name",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetSolidFireVolumeIscsiNameCmd extends BaseCmd {
private static final Logger s_logger = Logger.getLogger(GetSolidFireVolumeIscsiNameCmd.class.getName());
private static final String s_name = "getsolidfirevolumeiscsinameresponse";
public class GetVolumeiScsiNameCmd extends BaseCmd {
private static final Logger LOGGER = Logger.getLogger(GetVolumeiScsiNameCmd.class.getName());
private static final String NAME = "getvolumeiscsinameresponse";
@Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true)
private String volumeUuid;
@Inject private ApiSolidFireService _apiSolidFireService;
@Inject private VolumeDao _volumeDao;
@Inject private SolidFireIntegrationTestUtil _util;
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@ -50,28 +44,24 @@ public class GetSolidFireVolumeIscsiNameCmd extends BaseCmd {
@Override
public String getCommandName() {
return s_name;
return NAME;
}
@Override
public long getEntityOwnerId() {
Account account = CallContext.current().getCallingAccount();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
return _util.getAccountIdForVolumeUuid(volumeUuid);
}
@Override
public void execute() {
Volume volume = _volumeDao.findByUuid(volumeUuid);
LOGGER.info("'GetVolumeiScsiNameCmd.execute' method invoked");
ApiSolidFireVolumeIscsiNameResponse response = _apiSolidFireService.getSolidFireVolumeIscsiName(volume);
String volume_iScsiName = _util.getVolume_iScsiName(volumeUuid);
ApiVolumeiScsiNameResponse response = new ApiVolumeiScsiNameResponse(volume_iScsiName);
response.setResponseName(getCommandName());
response.setObjectName("apisolidfirevolumeiscsiname");
response.setObjectName("apivolumeiscsiname");
this.setResponseObject(response);
}

View File

@ -0,0 +1,33 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response.solidfire;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
public class ApiPathForVolumeResponse extends BaseResponse {
@SerializedName(ApiConstants.PATH)
@Param(description = "The path field for the volume")
private String path;
public ApiPathForVolumeResponse(String path) {
this.path = path;
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
package org.apache.cloudstack.api.response.solidfire;
import com.cloud.serializer.Param;
@ -30,8 +30,4 @@ public class ApiSolidFireAccountIdResponse extends BaseResponse {
public ApiSolidFireAccountIdResponse(long sfAccountId) {
solidFireAccountId = sfAccountId;
}
public long getSolidFireAccountId() {
return solidFireAccountId;
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
package org.apache.cloudstack.api.response.solidfire;
import com.cloud.serializer.Param;
@ -30,8 +30,4 @@ public class ApiSolidFireVolumeAccessGroupIdResponse extends BaseResponse {
public ApiSolidFireVolumeAccessGroupIdResponse(long sfVolumeAccessGroupId) {
solidFireVolumeAccessGroupId = sfVolumeAccessGroupId;
}
public long getSolidFireAccessGroupId() {
return solidFireVolumeAccessGroupId;
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
package org.apache.cloudstack.api.response.solidfire;
import com.cloud.serializer.Param;
@ -30,8 +30,4 @@ public class ApiSolidFireVolumeSizeResponse extends BaseResponse {
public ApiSolidFireVolumeSizeResponse(long sfVolumeSize) {
solidFireVolumeSize = sfVolumeSize;
}
public long getSolidFireVolumeSize() {
return solidFireVolumeSize;
}
}

View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response.solidfire;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;
public class ApiVolumeSnapshotDetailsResponse extends BaseResponse {
@SerializedName("volumeSnapshotId")
@Param(description = "CloudStack Volume Snapshot ID")
private long volumeSnapshotId;
@SerializedName("snapshotDetailsName")
@Param(description = "Snapshot Details Name")
private String volumeSnapshotDetailsName;
@SerializedName("snapshotDetailsValue")
@Param(description = "Snapshot Details Value")
private String volumeSnapshotDetailsValue;
public ApiVolumeSnapshotDetailsResponse(long volumeSnapshotId, String volumeSnapshotDetailsName, String volumeSnapshotDetailsValue) {
this.volumeSnapshotId = volumeSnapshotId;
this.volumeSnapshotDetailsName = volumeSnapshotDetailsName;
this.volumeSnapshotDetailsValue = volumeSnapshotDetailsValue;
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
package org.apache.cloudstack.api.response.solidfire;
import com.cloud.serializer.Param;
@ -22,16 +22,12 @@ import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;
public class ApiSolidFireVolumeIscsiNameResponse extends BaseResponse {
@SerializedName("solidFireVolumeIscsiName")
@Param(description = "SolidFire Volume Iscsi Name")
private String solidFireVolumeIscsiName;
public class ApiVolumeiScsiNameResponse extends BaseResponse {
@SerializedName("volumeiScsiName")
@Param(description = "Volume iSCSI Name")
private String volumeiScsiName;
public ApiSolidFireVolumeIscsiNameResponse(String sfVolumeIscsiName) {
solidFireVolumeIscsiName = sfVolumeIscsiName;
public ApiVolumeiScsiNameResponse(String volumeiScsiName) {
this.volumeiScsiName = volumeiScsiName;
}
public String getSolidFireVolumeIscsiName() {
return solidFireVolumeIscsiName;
}
}
}

View File

@ -0,0 +1,22 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.solidfire;
import com.cloud.utils.component.PluggableService;
public interface ApiSolidFireIntegrationTestService extends PluggableService {
}

View File

@ -0,0 +1,48 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.solidfire;
import java.util.List;
import java.util.ArrayList;
import org.apache.cloudstack.api.command.admin.solidfire.GetPathForVolumeCmd;
// import org.apache.log4j.Logger;
import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireAccountIdCmd;
import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireVolumeAccessGroupIdCmd;
import org.apache.cloudstack.api.command.admin.solidfire.GetVolumeSnapshotDetailsCmd;
import org.apache.cloudstack.api.command.admin.solidfire.GetVolumeiScsiNameCmd;
import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireVolumeSizeCmd;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.AdapterBase;
@Component
public class ApiSolidFireIntegrationTestServiceImpl extends AdapterBase implements ApiSolidFireIntegrationTestService {
@Override
public List<Class<?>> getCommands() {
List<Class<?>> cmdList = new ArrayList<Class<?>>();
cmdList.add(GetPathForVolumeCmd.class);
cmdList.add(GetSolidFireAccountIdCmd.class);
cmdList.add(GetSolidFireVolumeAccessGroupIdCmd.class);
cmdList.add(GetVolumeiScsiNameCmd.class);
cmdList.add(GetSolidFireVolumeSizeCmd.class);
cmdList.add(GetVolumeSnapshotDetailsCmd.class);
return cmdList;
}
}

View File

@ -1,37 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.solidfire;
import com.cloud.utils.component.PluggableService;
import com.cloud.storage.Volume;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse;
/**
* Provide API for SolidFire integration tests
*
*/
public interface ApiSolidFireService extends PluggableService {
public ApiSolidFireAccountIdResponse getSolidFireAccountId(Long csAccountId, Long storagePoolId);
public ApiSolidFireVolumeSizeResponse getSolidFireVolumeSize(Volume volume, StoragePool storagePool);
public ApiSolidFireVolumeAccessGroupIdResponse getSolidFireVolumeAccessGroupId(Long csClusterId, Long storagePoolId);
public ApiSolidFireVolumeIscsiNameResponse getSolidFireVolumeIscsiName(Volume volume);
}

View File

@ -1,126 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.solidfire;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
// import org.apache.log4j.Logger;
import org.apache.cloudstack.acl.APIChecker;
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireAccountIdCmd;
import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeAccessGroupIdCmd;
import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeIscsiNameCmd;
import org.apache.cloudstack.api.command.user.solidfire.GetSolidFireVolumeSizeCmd;
import org.apache.cloudstack.api.response.ApiSolidFireAccountIdResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeAccessGroupIdResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeIscsiNameResponse;
import org.apache.cloudstack.api.response.ApiSolidFireVolumeSizeResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.springframework.stereotype.Component;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.user.AccountDetailsDao;
import com.cloud.user.AccountDetailVO;
import com.cloud.user.User;
import com.cloud.utils.component.AdapterBase;
@Component
public class ApiSolidFireServiceImpl extends AdapterBase implements APIChecker, ApiSolidFireService {
// private static final Logger s_logger = Logger.getLogger(ApiSolidFireServiceImpl.class);
@Inject private AccountDetailsDao _accountDetailsDao;
@Inject private DataStoreProviderManager _dataStoreProviderMgr;
@Inject private ClusterDetailsDao _clusterDetailsDao;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
return true;
}
@Override
public ApiSolidFireAccountIdResponse getSolidFireAccountId(Long csAccountId, Long storagePoolId) {
AccountDetailVO accountDetail = _accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId));
String sfAccountId = accountDetail.getValue();
return new ApiSolidFireAccountIdResponse(Long.parseLong(sfAccountId));
}
@Override
public ApiSolidFireVolumeSizeResponse getSolidFireVolumeSize(Volume volume, StoragePool storagePool) {
PrimaryDataStoreDriver primaryStoreDriver = null;
try {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(storagePool.getStorageProviderName());
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
if (storeDriver instanceof PrimaryDataStoreDriver) {
primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
}
}
catch (InvalidParameterValueException e) {
throw new InvalidParameterValueException("Invalid Storage Driver Type");
}
return new ApiSolidFireVolumeSizeResponse(primaryStoreDriver.getVolumeSizeIncludingHypervisorSnapshotReserve(volume, storagePool));
}
@Override
public ApiSolidFireVolumeAccessGroupIdResponse getSolidFireVolumeAccessGroupId(Long csClusterId, Long storagePoolId) {
ClusterDetailsVO clusterDetails = _clusterDetailsDao.findDetail(csClusterId, SolidFireUtil.getVagKey(storagePoolId));
String sfVagId = clusterDetails.getValue();
return new ApiSolidFireVolumeAccessGroupIdResponse(Long.parseLong(sfVagId));
}
@Override
public ApiSolidFireVolumeIscsiNameResponse getSolidFireVolumeIscsiName(Volume volume) {
return new ApiSolidFireVolumeIscsiNameResponse(volume.get_iScsiName());
}
@Override
public boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException {
return true;
}
@Override
public List<Class<?>> getCommands() {
List<Class<?>> cmdList = new ArrayList<Class<?>>();
cmdList.add(GetSolidFireAccountIdCmd.class);
cmdList.add(GetSolidFireVolumeSizeCmd.class);
cmdList.add(GetSolidFireVolumeAccessGroupIdCmd.class);
cmdList.add(GetSolidFireVolumeIscsiNameCmd.class);
return cmdList;
}
}

View File

@ -0,0 +1,23 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.solidfire;
public interface SolidFireIntegrationTestManager {
long getSolidFireAccountId(String csAccountUuid, String storagePoolUuid);
long getSolidFireVolumeAccessGroupId(String csClusterUuid, String storagePoolUuid);
long getSolidFireVolumeSize(String volumeUuid);
}

View File

@ -0,0 +1,78 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.solidfire;
import javax.inject.Inject;
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
import org.springframework.stereotype.Component;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.user.AccountDetailsDao;
import com.cloud.user.AccountDetailVO;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
public class SolidFireIntegrationTestManagerImpl implements SolidFireIntegrationTestManager {
@Inject private AccountDetailsDao accountDetailsDao;
@Inject private ClusterDetailsDao clusterDetailsDao;
@Inject private SolidFireIntegrationTestUtil util;
@Inject private VolumeDao volumeDao;
@Inject private VolumeDetailsDao volumeDetailsDao;
@Override
public long getSolidFireAccountId(String csAccountUuid, String storagePoolUuid) {
long csAccountId = util.getAccountIdForAccountUuid(csAccountUuid);
long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid);
AccountDetailVO accountDetail = accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId));
String sfAccountId = accountDetail.getValue();
return Long.parseLong(sfAccountId);
}
@Override
public long getSolidFireVolumeAccessGroupId(String csClusterUuid, String storagePoolUuid) {
long csClusterId = util.getClusterIdForClusterUuid(csClusterUuid);
long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid);
ClusterDetailsVO clusterDetails = clusterDetailsDao.findDetail(csClusterId, SolidFireUtil.getVagKey(storagePoolId));
String sfVagId = clusterDetails.getValue();
return Long.parseLong(sfVagId);
}
@Override
public long getSolidFireVolumeSize(String volumeUuid) {
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volume.getId(), SolidFireUtil.VOLUME_SIZE);
if (volumeDetail != null && volumeDetail.getValue() != null) {
return Long.parseLong(volumeDetail.getValue());
}
throw new CloudRuntimeException("Unable to determine the size of the SolidFire volume");
}
}

View File

@ -0,0 +1,112 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.util.solidfire;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.dao.AccountDao;
public class SolidFireIntegrationTestUtil {
@Inject private AccountDao accountDao;
@Inject private ClusterDao clusterDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private SnapshotDao snapshotDao;
@Inject private SnapshotDetailsDao snapshotDetailsDao;
@Inject private VolumeDao volumeDao;
private SolidFireIntegrationTestUtil() {}
public long getAccountIdForAccountUuid(String accountUuid) {
Account account = accountDao.findByUuid(accountUuid);
return account.getAccountId();
}
public long getAccountIdForVolumeUuid(String volumeUuid) {
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
return volume.getAccountId();
}
public long getAccountIdForSnapshotUuid(String snapshotUuid) {
SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid);
return snapshot.getAccountId();
}
public long getClusterIdForClusterUuid(String clusterUuid) {
ClusterVO cluster = clusterDao.findByUuid(clusterUuid);
return cluster.getId();
}
public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) {
StoragePoolVO storagePool = storagePoolDao.findByUuid(storagePoolUuid);
return storagePool.getId();
}
public String getPathForVolumeUuid(String volumeUuid) {
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
return volume.getPath();
}
public String getVolume_iScsiName(String volumeUuid) {
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
return volume.get_iScsiName();
}
public List<ApiVolumeSnapshotDetailsResponse> getSnapshotDetails(String snapshotUuid) {
SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid);
List<SnapshotDetailsVO> snapshotDetails = snapshotDetailsDao.listDetails(snapshot.getId());
List<ApiVolumeSnapshotDetailsResponse> responses = new ArrayList<>();
if (snapshotDetails != null) {
for (SnapshotDetailsVO snapshotDetail : snapshotDetails) {
ApiVolumeSnapshotDetailsResponse response = new ApiVolumeSnapshotDetailsResponse(
snapshotDetail.getResourceId(),
snapshotDetail.getName(),
snapshotDetail.getValue()
);
responses.add(response);
}
}
return responses;
}
}

View File

@ -47,6 +47,8 @@ import org.apache.cloudstack.storage.command.DettachAnswer;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
@ -147,6 +149,13 @@ public class KVMStorageProcessor implements StorageProcessor {
return new SnapshotAndCopyAnswer();
}
@Override
public ResignatureAnswer resignature(final ResignatureCommand cmd) {
s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for KVMStorageProcessor");
return new ResignatureAnswer();
}
@Override
public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
final DataTO srcData = cmd.getSrcTO();

View File

@ -31,6 +31,8 @@ import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
@ -805,9 +807,17 @@ public class Ovm3StorageProcessor implements StorageProcessor {
* iSCSI?
*/
@Override
public Answer snapshotAndCopy(SnapshotAndCopyCommand cmd) {
LOGGER.debug("execute snapshotAndCopy: "+ cmd.getClass());
return new SnapshotAndCopyAnswer("not implemented yet");
public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) {
LOGGER.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for Ovm3StorageProcessor");
return new SnapshotAndCopyAnswer("Not implemented");
}
@Override
public ResignatureAnswer resignature(final ResignatureCommand cmd) {
LOGGER.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for Ovm3StorageProcessor");
return new ResignatureAnswer("Not implemented");
}
/**

View File

@ -35,6 +35,8 @@ import org.apache.cloudstack.storage.command.DettachAnswer;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
@ -66,6 +68,13 @@ public class SimulatorStorageProcessor implements StorageProcessor {
return new SnapshotAndCopyAnswer();
}
@Override
public ResignatureAnswer resignature(ResignatureCommand cmd) {
s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for SimulatorStorageProcessor");
return new ResignatureAnswer();
}
@Override
public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
TemplateObjectTO template = new TemplateObjectTO();

View File

@ -62,6 +62,8 @@ import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
@ -144,6 +146,13 @@ public class VmwareStorageProcessor implements StorageProcessor {
return new SnapshotAndCopyAnswer();
}
@Override
public ResignatureAnswer resignature(ResignatureCommand cmd) {
s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for VmwareStorageProcessor");
return new ResignatureAnswer();
}
private String getOVFFilePath(String srcOVAFileName) {
File file = new File(srcOVAFileName);
assert (_storage != null);

View File

@ -37,6 +37,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Queue;
import java.util.Random;
@ -164,9 +165,16 @@ import com.xensource.xenapi.XenAPIObject;
*
*/
public abstract class CitrixResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer {
/**
* used to describe what type of resource a storage device is of
*/
public enum SRType {
EXT, FILE, ISCSI, ISO, LVM, LVMOHBA, LVMOISCSI, NFS;
EXT, FILE, ISCSI, ISO, LVM, LVMOHBA, LVMOISCSI,
/**
* used for resigning metadata (like SR UUID and VDI UUID when a
* particular storage manager is installed on a XenServer host (for back-end snapshots to work))
*/
RELVMOISCSI, NFS;
String _str;
@ -1794,10 +1802,26 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
cmd.setPod(_pod);
cmd.setVersion(CitrixResourceBase.class.getPackage().getImplementationVersion());
try {
final String cmdLine = "xe sm-list | grep \"resigning of duplicates\"";
final XenServerUtilitiesHelper xenServerUtilitiesHelper = getXenServerUtilitiesHelper();
Pair<Boolean, String> result = xenServerUtilitiesHelper.executeSshWrapper(_host.getIp(), 22, _username, null, getPwdFromQueue(), cmdLine);
boolean supportsClonedVolumes = result != null && result.first() != null && result.first() &&
result.second() != null && result.second().length() > 0;
cmd.setSupportsClonedVolumes(supportsClonedVolumes);
} catch (NumberFormatException ex) {
s_logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage());
}
} catch (final XmlRpcException e) {
throw new CloudRuntimeException("XML RPC Exception" + e.getMessage(), e);
throw new CloudRuntimeException("XML RPC Exception: " + e.getMessage(), e);
} catch (final XenAPIException e) {
throw new CloudRuntimeException("XenAPIException" + e.toString(), e);
throw new CloudRuntimeException("XenAPIException: " + e.toString(), e);
} catch (final Exception e) {
throw new CloudRuntimeException("Exception: " + e.toString(), e);
}
}
@ -2264,6 +2288,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername,
final String chapInitiatorPassword, final boolean ignoreIntroduceException) {
return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, false, ignoreIntroduceException);
}
public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername,
final String chapInitiatorPassword, final boolean resignature, final boolean ignoreIntroduceException) {
synchronized (srNameLabel.intern()) {
final Map<String, String> deviceConfig = new HashMap<String, String>();
try {
@ -2353,17 +2382,52 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
throw new CloudRuntimeException(msg, e);
}
}
deviceConfig.put("SCSIid", scsiid);
final String result = SR.probe(conn, host, deviceConfig, type, smConfig);
String result = SR.probe(conn, host, deviceConfig, type, smConfig);
String pooluuid = null;
if (result.indexOf("<UUID>") != -1) {
pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
}
if (pooluuid == null || pooluuid.length() != 36) {
sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig);
} else {
}
else {
if (resignature) {
try {
SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig);
// The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected
// toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()).
// That being the case, if this CloudRuntimeException statement is executed, there appears to have been some kind
// of failure in the execution of the above SR.create (resign) method.
throw new CloudRuntimeException("Problem resigning the metadata");
}
catch (XenAPIException ex) {
String msg = ex.toString();
if (!msg.contains("successfully resigned")) {
throw ex;
}
result = SR.probe(conn, host, deviceConfig, type, smConfig);
pooluuid = null;
if (result.indexOf("<UUID>") != -1) {
pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
}
if (pooluuid == null || pooluuid.length() != 36) {
throw new CloudRuntimeException("Non-existent or invalid SR UUID");
}
}
}
try {
sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, type, "user", true, smConfig);
} catch (final XenAPIException ex) {
@ -2375,11 +2439,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
final Set<Host> setHosts = Host.getAll(conn);
if (setHosts == null) {
final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to hosts not available.";
final String msg = "Unable to create iSCSI SR " + deviceConfig + " due to hosts not available.";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
for (final Host currentHost : setHosts) {
final PBD.Record rec = new PBD.Record();
@ -2392,7 +2460,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
pbd.plug(conn);
}
}
sr.scan(conn);
return sr;
} catch (final XenAPIException e) {
final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString();
@ -3969,11 +4039,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
// the idea here is to see if the DiskTO in question is from managed storage
// and
// does not yet have an SR
// if no SR, create it and create a VDI in it
public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final String vmName) throws Exception {
// The idea here is to see if the DiskTO in question is from managed storage and does not yet have an SR.
// If no SR, create it and create a VDI in it.
public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final long vmId, final String vmName) throws Exception {
final Map<String, String> details = disk.getDetails();
if (details == null) {
@ -3994,7 +4062,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
return null;
}
final String vdiNameLabel = vmName + "-DATA";
final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA");
return prepareManagedStorage(conn, details, null, vdiNameLabel);
}
@ -4024,19 +4092,25 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
VDI vdi = getVDIbyUuid(conn, path, false);
final Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE));
Set<VDI> vdisInSr = sr.getVDIs(conn);
// If a VDI already exists in the SR (in case we cloned from a template cache), use that.
if (vdisInSr.size() == 1) {
vdi = vdisInSr.iterator().next();
}
if (vdi == null) {
vdi = createVdi(sr, vdiNameLabel, volumeSize);
} else {
// if VDI is not null, it must have already been created, so check
// whether a resize of the volume was performed
// if true, resize the VDI to the volume size
// If vdi is not null, it must have already been created, so check whether a resize of the volume was performed.
// If true, resize the VDI to the volume size.
s_logger.info("checking for the resize of the datadisk");
s_logger.info("Checking for the resize of the datadisk");
final long vdiVirtualSize = vdi.getVirtualSize(conn);
if (vdiVirtualSize != volumeSize) {
s_logger.info("resizing the data disk (vdi) from vdiVirtualsize: " + vdiVirtualSize + " to volumeSize: " + volumeSize);
s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + vdiVirtualSize + " to volumeSize: " + volumeSize);
try {
vdi.resize(conn, volumeSize);
@ -4044,6 +4118,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
s_logger.warn("Unable to resize volume", e);
}
}
// change the name-label in case of a cloned VDI
if (!Objects.equals(vdi.getNameLabel(conn), vdiNameLabel)) {
try {
vdi.setNameLabel(conn, vdiNameLabel);
} catch (final Exception e) {
s_logger.warn("Unable to rename volume", e);
}
}
}
return vdi;

View File

@ -44,6 +44,8 @@ import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.ForgetObjectCmd;
import org.apache.cloudstack.storage.command.IntroduceObjectAnswer;
import org.apache.cloudstack.storage.command.IntroduceObjectCmd;
import org.apache.cloudstack.storage.command.ResignatureAnswer;
import org.apache.cloudstack.storage.command.ResignatureCommand;
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
@ -159,6 +161,50 @@ public class XenServerStorageProcessor implements StorageProcessor {
}
}
@Override
public ResignatureAnswer resignature(final ResignatureCommand cmd) {
SR newSr = null;
final Connection conn = hypervisorResource.getConnection();
try {
final Map<String, String> details = cmd.getDetails();
final String iScsiName = details.get(DiskTO.IQN);
final String storageHost = details.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET);
newSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, true, false);
Set<VDI> vdis = newSr.getVDIs(conn);
if (vdis.size() != 1) {
throw new RuntimeException("There were " + vdis.size() + " VDIs in the SR.");
}
VDI vdi = vdis.iterator().next();
final ResignatureAnswer resignatureAnswer = new ResignatureAnswer();
resignatureAnswer.setSize(vdi.getVirtualSize(conn));
resignatureAnswer.setPath(vdi.getUuid(conn));
resignatureAnswer.setFormat(ImageFormat.VHD);
return resignatureAnswer;
}
catch (final Exception ex) {
s_logger.warn("Failed to resignature: " + ex.toString(), ex);
return new ResignatureAnswer(ex.getMessage());
}
finally {
if (newSr != null) {
hypervisorResource.removeSR(conn, newSr);
}
}
}
@Override
public AttachAnswer attachIso(final AttachCommand cmd) {
final DiskTO disk = cmd.getDisk();
@ -763,6 +809,9 @@ public class XenServerStorageProcessor implements StorageProcessor {
final DataTO destDataTo = cmd.getDestTO();
final int wait = cmd.getWait();
final DataStoreTO srcDataStoreTo = srcDataTo.getDataStore();
final Connection conn = hypervisorResource.getConnection();
SR sr = null;
boolean removeSrAfterCopy = false;
try {
if (srcDataStoreTo instanceof NfsTO && srcDataTo.getObjectType() == DataObjectType.TEMPLATE) {
@ -796,14 +845,11 @@ public class XenServerStorageProcessor implements StorageProcessor {
managedStoragePoolRootVolumeSize = details.get(PrimaryDataStoreTO.VOLUME_SIZE);
chapInitiatorUsername = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_USERNAME);
chapInitiatorSecret = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_SECRET);
removeSrAfterCopy = Boolean.parseBoolean(details.get(PrimaryDataStoreTO.REMOVE_AFTER_COPY));
}
}
}
final Connection conn = hypervisorResource.getConnection();
final SR sr;
if (managed) {
final Map<String, String> details = new HashMap<String, String>();
@ -861,9 +907,11 @@ public class XenServerStorageProcessor implements StorageProcessor {
newVol.setUuid(uuidToReturn);
newVol.setPath(uuidToReturn);
if (physicalSize != null) {
newVol.setSize(physicalSize);
}
newVol.setFormat(ImageFormat.VHD);
return new CopyCmdAnswer(newVol);
@ -875,6 +923,11 @@ public class XenServerStorageProcessor implements StorageProcessor {
return new CopyCmdAnswer(msg);
}
finally {
if (removeSrAfterCopy && sr != null) {
hypervisorResource.removeSR(conn, sr);
}
}
return new CopyCmdAnswer("not implemented yet");
}

View File

@ -171,6 +171,8 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
final DataStoreTO srcStore = srcData.getDataStore();
final Connection conn = hypervisorResource.getConnection();
SR srcSr = null;
SR destSr = null;
boolean removeSrAfterCopy = false;
Task task = null;
try {
@ -198,7 +200,8 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
final Set<VDI> setVdis = srcSr.getVDIs(conn);
if (setVdis.size() != 1) {
return new CopyCmdAnswer("Expected 1 VDI template but found " + setVdis.size() + " VDI template(s) on: " + uri.getHost() + ":" + uri.getPath() + "/" + volumeDirectory);
return new CopyCmdAnswer("Expected 1 VDI template, but found " + setVdis.size() + " VDI templates on: " +
uri.getHost() + ":" + uri.getPath() + "/" + volumeDirectory);
}
final VDI srcVdi = setVdis.iterator().next();
@ -225,11 +228,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
managedStoragePoolRootVolumeSize = details.get(PrimaryDataStoreTO.VOLUME_SIZE);
chapInitiatorUsername = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_USERNAME);
chapInitiatorSecret = details.get(PrimaryDataStoreTO.CHAP_INITIATOR_SECRET);
removeSrAfterCopy = Boolean.parseBoolean(details.get(PrimaryDataStoreTO.REMOVE_AFTER_COPY));
}
}
final SR destSr;
if (managed) {
details = new HashMap<String, String>();
@ -291,9 +293,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
newVol.setUuid(uuidToReturn);
newVol.setPath(uuidToReturn);
if (physicalSize != null) {
newVol.setSize(physicalSize);
}
newVol.setFormat(Storage.ImageFormat.VHD);
return new CopyCmdAnswer(newVol);
@ -316,6 +320,10 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
if (srcSr != null) {
hypervisorResource.removeSR(conn, srcSr);
}
if (removeSrAfterCopy && destSr != null) {
hypervisorResource.removeSR(conn, destSr);
}
}
return new CopyCmdAnswer("not implemented yet");

View File

@ -27,28 +27,80 @@ import com.cloud.agent.api.storage.ResizeVolumeCommand;
import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.exception.CloudRuntimeException;
import com.xensource.xenapi.Connection;
import com.xensource.xenapi.PBD;
import com.xensource.xenapi.SR;
import com.xensource.xenapi.VDI;
import java.util.HashSet;
import java.util.Set;
@ResourceWrapper(handles = ResizeVolumeCommand.class)
public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper<ResizeVolumeCommand, Answer, CitrixResourceBase> {
private static final Logger s_logger = Logger.getLogger(CitrixResizeVolumeCommandWrapper.class);
@Override
public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBase citrixResourceBase) {
final Connection conn = citrixResourceBase.getConnection();
final String volid = command.getPath();
final long newSize = command.getNewSize();
Connection conn = citrixResourceBase.getConnection();
String volId = command.getPath();
long newSize = command.getNewSize();
try {
final VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volid);
if (command.isManaged()) {
resizeSr(conn, command);
}
VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId);
vdi.resize(conn, newSize);
return new ResizeVolumeAnswer(command, true, "success", newSize);
} catch (final Exception e) {
s_logger.warn("Unable to resize volume", e);
final String error = "failed to resize volume:" + e;
} catch (Exception ex) {
s_logger.warn("Unable to resize volume", ex);
String error = "Failed to resize volume: " + ex;
return new ResizeVolumeAnswer(command, false, error);
}
}
}
private void resizeSr(Connection conn, ResizeVolumeCommand command) {
// If this is managed storage, re-size the SR, too.
// The logical unit/volume has already been re-sized, so the SR needs to fill up the new space.
String iScsiName = command.get_iScsiName();
try {
Set<SR> srs = SR.getByNameLabel(conn, iScsiName);
Set<PBD> allPbds = new HashSet<>();
for (SR sr : srs) {
if (!CitrixResourceBase.SRType.LVMOISCSI.equals(sr.getType(conn))) {
continue;
}
Set<PBD> pbds = sr.getPBDs(conn);
if (pbds.size() <= 0) {
s_logger.debug("No PBDs found for the following SR: " + sr.getNameLabel(conn));
}
allPbds.addAll(pbds);
}
for (PBD pbd: allPbds) {
PBD.Record pbdr = pbd.getRecord(conn);
if (pbdr.currentlyAttached) {
pbd.unplug(conn);
pbd.plug(conn);
}
}
}
catch (Throwable ex) {
throw new CloudRuntimeException("Unable to resize volume: " + ex.getMessage());
}
}
}

View File

@ -108,11 +108,13 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
}
index++;
}
for (DiskTO disk : disks) {
final VDI newVdi = citrixResourceBase.prepareManagedDisk(conn, disk, vmName);
final VDI newVdi = citrixResourceBase.prepareManagedDisk(conn, disk, vmSpec.getId(), vmSpec.getName());
if (newVdi != null) {
final String path = newVdi.getUuid(conn);
iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path);
}

View File

@ -53,7 +53,6 @@ import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
@ -110,7 +109,7 @@ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStore
String volumeName = volumeInfo.getName();
Long Iops = volumeInfo.getMaxIops();
// quota size of the cloudbyte volume will be increased with the given HypervisorSnapshotReserve
Long quotaSize = getVolumeSizeIncludingHypervisorSnapshotReserve(volumeInfo, _storagePoolDao.findById(storagePoolId));
Long quotaSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, _storagePoolDao.findById(storagePoolId));
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
VolumeVO volume = _volumeDao.findById(volumeInfo.getId());
@ -337,7 +336,8 @@ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStore
}
@Override
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
VolumeInfo volume = (VolumeInfo)dataObject;
long volumeSize = volume.getSize();
Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
@ -353,7 +353,7 @@ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStore
}
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}

View File

@ -38,6 +38,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@ -70,7 +71,6 @@ import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.VMTemplateDao;
@ -145,7 +145,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
}
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@ -169,8 +169,13 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
}
@Override
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
return volume.getSize();
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
return dataObject.getSize();
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0;
}
@Override

View File

@ -31,6 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
@ -49,7 +50,6 @@ import com.cloud.agent.api.to.DataTO;
import com.cloud.host.Host;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.dao.AccountDao;
@ -78,8 +78,13 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
@Override
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
return 0; //To change body of implemented methods use File | Settings | File Templates.
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
return 0;
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0;
}
@Inject
@ -97,7 +102,7 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}

View File

@ -31,7 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.async.AsyncRpcContext;
@ -44,7 +44,6 @@ import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
@ -77,7 +76,7 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
@Override
public ChapInfo getChapInfo(VolumeInfo volumeInfo) {
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@ -98,16 +97,18 @@ public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
@Override
public long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
return volume.getSize();
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) {
return dataObject.getSize();
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0;
}
private class CreateVolumeContext<T> extends AsyncRpcContext<T> {
private final DataObject volume;
public CreateVolumeContext(AsyncCompletionCallback<T> callback, DataObject volume) {
super(callback);
this.volume = volume;
}
}

View File

@ -33,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCy
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
@ -49,23 +50,27 @@ import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.utils.exception.CloudRuntimeException;
public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class);
@Inject private CapacityManager _capacityMgr;
@Inject private DataCenterDao zoneDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private PrimaryDataStoreHelper dataStoreHelper;
@Inject private DataCenterDao _zoneDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
@Inject private ResourceManager _resourceMgr;
@Inject private SnapshotDao _snapshotDao;
@Inject private SnapshotDetailsDao _snapshotDetailsDao;
@Inject private StorageManager _storageMgr;
@Inject private StoragePoolAutomation storagePoolAutomation;
@Inject private StoragePoolAutomation _storagePoolAutomation;
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject private VMTemplatePoolDao _tmpltPoolDao;
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
@ -83,7 +88,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
DataCenterVO zone = zoneDao.findById(zoneId);
DataCenterVO zone = _zoneDao.findById(zoneId);
String uuid = SolidFireUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip;
@ -179,7 +184,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
details.put(SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS, String.valueOf(fClusterDefaultBurstIopsPercentOfMaxIops));
// this adds a row in the cloud.storage_pool table for this SolidFire cluster
return dataStoreHelper.createPrimaryDataStore(parameters);
return _dataStoreHelper.createPrimaryDataStore(parameters);
}
// do not implement this method for SolidFire's plug-in
@ -196,7 +201,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
_dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
@ -220,23 +225,25 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
@Override
public boolean maintain(DataStore dataStore) {
storagePoolAutomation.maintain(dataStore);
dataStoreHelper.maintain(dataStore);
_storagePoolAutomation.maintain(dataStore);
_dataStoreHelper.maintain(dataStore);
return true;
}
@Override
public boolean cancelMaintain(DataStore store) {
dataStoreHelper.cancelMaintain(store);
storagePoolAutomation.cancelMaintain(store);
_dataStoreHelper.cancelMaintain(store);
_storagePoolAutomation.cancelMaintain(store);
return true;
}
// invoked to delete primary storage that is based on the SolidFire plug-in
@Override
public boolean deleteDataStore(DataStore store) {
public boolean deleteDataStore(DataStore dataStore) {
long storagePoolId = dataStore.getId();
List<SnapshotVO> lstSnapshots = _snapshotDao.listAll();
if (lstSnapshots != null) {
@ -244,13 +251,39 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.STORAGE_POOL_ID);
// if this snapshot belongs to the storagePool that was passed in
if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == store.getId()) {
if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePoolId) {
throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots.");
}
}
}
return dataStoreHelper.deletePrimaryDataStore(store);
List<VMTemplateStoragePoolVO> lstTemplatePoolRefs = _tmpltPoolDao.listByPoolId(storagePoolId);
if (lstTemplatePoolRefs != null) {
for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) {
try {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
long sfTemplateVolumeId = Long.parseLong(templatePoolRef.getLocalDownloadPath());
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfTemplateVolumeId);
}
catch (Exception ex) {
s_logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume");
}
_tmpltPoolDao.remove(templatePoolRef.getId());
}
}
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
storagePool.setUsedBytes(0);
_storagePoolDao.update(storagePoolId, storagePool);
_storagePoolDetailsDao.removeDetails(storagePoolId);
return _dataStoreHelper.deletePrimaryDataStore(dataStore);
}
/* (non-Javadoc)
@ -263,7 +296,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
StoragePoolVO storagePoolVo = storagePoolDao.findById(storagePool.getId());
StoragePoolVO storagePoolVo = _storagePoolDao.findById(storagePool.getId());
String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null;
@ -290,11 +323,11 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
@Override
public void enableStoragePool(DataStore dataStore) {
dataStoreHelper.enable(dataStore);
_dataStoreHelper.enable(dataStore);
}
@Override
public void disableStoragePool(DataStore dataStore) {
dataStoreHelper.disable(dataStore);
_dataStoreHelper.disable(dataStore);
}
}

View File

@ -52,24 +52,24 @@ import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
public class SolidFireSharedHostListener implements HypervisorHostListener {
private static final Logger s_logger = Logger.getLogger(SolidFireSharedHostListener.class);
private static final Logger LOGGER = Logger.getLogger(SolidFireSharedHostListener.class);
@Inject private AgentManager _agentMgr;
@Inject private AlertManager _alertMgr;
@Inject private ClusterDao _clusterDao;
@Inject private ClusterDetailsDao _clusterDetailsDao;
@Inject private DataStoreManager _dataStoreMgr;
@Inject private HostDao _hostDao;
@Inject private PrimaryDataStoreDao _storagePoolDao;
@Inject private StoragePoolHostDao _storagePoolHostDao;
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject private AgentManager agentMgr;
@Inject private AlertManager alertMgr;
@Inject private ClusterDao clusterDao;
@Inject private ClusterDetailsDao clusterDetailsDao;
@Inject private DataStoreManager dataStoreMgr;
@Inject private HostDao hostDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private StoragePoolHostDao storagePoolHostDao;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Override
public boolean hostAdded(long hostId) {
HostVO host = _hostDao.findById(hostId);
HostVO host = hostDao.findById(hostId);
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME,
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao);
handleVMware(hostId, true);
@ -78,37 +78,37 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
@Override
public boolean hostConnect(long hostId, long storagePoolId) {
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
StoragePool storagePool = (StoragePool) dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool);
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId);
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
if (storagePoolHost != null) {
storagePoolHost.setLocalPath(answer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
} else {
storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, answer.getPoolInfo().getLocalPath().replaceAll("//", "/"));
_storagePoolHostDao.persist(storagePoolHost);
storagePoolHostDao.persist(storagePoolHost);
}
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId);
storagePoolVO.setCapacityBytes(answer.getPoolInfo().getCapacityBytes());
storagePoolVO.setUsedBytes(answer.getPoolInfo().getCapacityBytes() - answer.getPoolInfo().getAvailableBytes());
_storagePoolDao.update(storagePoolId, storagePoolVO);
storagePoolDao.update(storagePoolId, storagePoolVO);
return true;
}
@Override
public boolean hostDisconnected(long hostId, long storagePoolId) {
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId);
if (storagePoolHost != null) {
_storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId);
}
return true;
@ -124,16 +124,16 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
@Override
public boolean hostRemoved(long hostId, long clusterId) {
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME,
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao);
return true;
}
private void handleVMware(long hostId, boolean add) {
HostVO host = _hostDao.findById(hostId);
HostVO host = hostDao.findById(hostId);
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME);
List<StoragePoolVO> storagePools = storagePoolDao.findPoolsByProvider(SolidFireUtil.SHARED_PROVIDER_NAME);
if (storagePools != null && storagePools.size() > 0) {
List<Map<String, String>> targets = new ArrayList<>();
@ -142,15 +142,15 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
if (storagePool.getClusterId().equals(host.getClusterId())) {
long storagePoolId = storagePool.getId();
StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN);
StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN);
String iqn = storagePoolDetail.getValue();
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP);
storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP);
String sVip = storagePoolDetail.getValue();
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT);
storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT);
String sPort = storagePoolDetail.getValue();
@ -177,7 +177,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
}
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
Answer answer = _agentMgr.easySend(hostId, cmd);
Answer answer = agentMgr.easySend(hostId, cmd);
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
@ -186,16 +186,16 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
if (!answer.getResult()) {
String msg = "Unable to modify targets on the following host: " + hostId;
HostVO host = _hostDao.findById(hostId);
HostVO host = hostDao.findById(hostId);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg);
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg);
throw new CloudRuntimeException(msg);
}
}
private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
Answer answer = _agentMgr.easySend(hostId, cmd);
Answer answer = agentMgr.easySend(hostId, cmd);
if (answer == null) {
throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId());
@ -204,7 +204,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
if (!answer.getResult()) {
String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId;
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
throw new CloudRuntimeException(msg);
}
@ -212,7 +212,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " +
storagePool.getId() + "; Host = " + hostId;
s_logger.info("Connection established between storage pool " + storagePool + " and host " + hostId);
LOGGER.info("Connection established between storage pool " + storagePool + " and host " + hostId);
return (ModifyStoragePoolAnswer)answer;
}

View File

@ -30,6 +30,7 @@ import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -55,6 +56,7 @@ import org.apache.log4j.Logger;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonObject;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
@ -104,6 +106,15 @@ public class SolidFireUtil {
public static final String ACCOUNT_ID = "accountId";
public static final String VOLUME_ID = "volumeId";
public static final String TEMP_VOLUME_ID = "tempVolumeId";
public static final String SNAPSHOT_ID = "snapshotId";
public static final String CloudStackVolumeId = "CloudStackVolumeId";
public static final String CloudStackVolumeSize = "CloudStackVolumeSize";
public static final String CloudStackSnapshotId = "CloudStackSnapshotId";
public static final String CloudStackSnapshotSize = "CloudStackSnapshotSize";
public static final String CloudStackTemplateId = "CloudStackTemplateId";
public static final String CloudStackTemplateSize = "CloudStackTemplateSize";
public static final String VOLUME_SIZE = "sfVolumeSize";
@ -562,13 +573,44 @@ public class SolidFireUtil {
}
public static long createSolidFireVolume(SolidFireConnection sfConnection, String strSfVolumeName, long lSfAccountId, long lTotalSize,
boolean bEnable512e, String strCloudStackVolumeSize, long minIops, long maxIops, long burstIops)
boolean bEnable512e, Map<String, String> mapAttributes, long minIops, long maxIops, long burstIops)
{
final Gson gson = new GsonBuilder().create();
JsonObject volumeToCreate = new JsonObject();
Object volumeToCreate = strCloudStackVolumeSize != null && strCloudStackVolumeSize.trim().length() > 0 ?
new VolumeToCreateWithCloudStackVolumeSize(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, strCloudStackVolumeSize, minIops, maxIops, burstIops) :
new VolumeToCreate(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, minIops, maxIops, burstIops);
volumeToCreate.addProperty("method", "CreateVolume");
JsonObject params = new JsonObject();
volumeToCreate.add("params", params);
params.addProperty("name", strSfVolumeName);
params.addProperty("accountID", lSfAccountId);
params.addProperty("totalSize", lTotalSize);
params.addProperty("enable512e", bEnable512e);
JsonObject qos = new JsonObject();
params.add("qos", qos);
qos.addProperty("minIOPS", minIops);
qos.addProperty("maxIOPS", maxIops);
qos.addProperty("burstIOPS", burstIops);
if (mapAttributes != null && mapAttributes.size() > 0) {
JsonObject attributes = new JsonObject();
params.add("attributes", attributes);
Iterator<Map.Entry<String, String>> itr = mapAttributes.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<String, String> pair = itr.next();
attributes.addProperty(pair.getKey(), pair.getValue());
}
}
final Gson gson = new GsonBuilder().create();
String strVolumeToCreateJson = gson.toJson(volumeToCreate);
@ -581,14 +623,46 @@ public class SolidFireUtil {
return volumeCreateResult.result.volumeID;
}
public static void modifySolidFireVolume(SolidFireConnection sfConnection, long volumeId, long totalSize, String strCloudStackVolumeSize,
public static void modifySolidFireVolume(SolidFireConnection sfConnection, long volumeId, Long totalSize, Map<String, String> mapAttributes,
long minIops, long maxIops, long burstIops)
{
final Gson gson = new GsonBuilder().create();
JsonObject volumeToModify = new JsonObject();
Object volumeToModify = strCloudStackVolumeSize != null && strCloudStackVolumeSize.trim().length() > 0 ?
new VolumeToModifyWithCloudStackVolumeSize(volumeId, totalSize, strCloudStackVolumeSize, minIops, maxIops, burstIops) :
new VolumeToModify(volumeId, totalSize, minIops, maxIops, burstIops);
volumeToModify.addProperty("method", "ModifyVolume");
JsonObject params = new JsonObject();
volumeToModify.add("params", params);
params.addProperty("volumeID", volumeId);
if (totalSize != null) {
params.addProperty("totalSize", totalSize);
}
JsonObject qos = new JsonObject();
params.add("qos", qos);
qos.addProperty("minIOPS", minIops);
qos.addProperty("maxIOPS", maxIops);
qos.addProperty("burstIOPS", burstIops);
if (mapAttributes != null && mapAttributes.size() > 0) {
JsonObject attributes = new JsonObject();
params.add("attributes", attributes);
Iterator<Map.Entry<String, String>> itr = mapAttributes.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<String, String> pair = itr.next();
attributes.addProperty(pair.getKey(), pair.getValue());
}
}
final Gson gson = new GsonBuilder().create();
String strVolumeToModifyJson = gson.toJson(volumeToModify);
@ -687,7 +761,7 @@ public class SolidFireUtil {
executeJsonRpc(sfConnection, strVolumeToDeleteJson);
}
public static void purgeSolidFireVolume(SolidFireConnection sfConnection, long lVolumeId)
public static void purgeSolidFireVolume(SolidFireConnection sfConnection, long lVolumeId)
{
final Gson gson = new GsonBuilder().create();
@ -800,10 +874,51 @@ public class SolidFireUtil {
}
}
public static long createSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, String snapshotName) {
final Gson gson = new GsonBuilder().create();
public static class SolidFireSnapshot {
private final long _id;
private final String _name;
SnapshotToCreate snapshotToCreate = new SnapshotToCreate(lVolumeId, snapshotName);
public SolidFireSnapshot(long id, String name) {
_id = id;
_name = name;
}
public long getId() {
return _id;
}
public String getName() {
return _name;
}
}
public static long createSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, String snapshotName, Map<String, String> mapAttributes) {
JsonObject snapshotToCreate = new JsonObject();
snapshotToCreate.addProperty("method", "CreateSnapshot");
JsonObject params = new JsonObject();
snapshotToCreate.add("params", params);
params.addProperty("volumeID", lVolumeId);
params.addProperty("name", snapshotName);
if (mapAttributes != null && mapAttributes.size() > 0) {
JsonObject attributes = new JsonObject();
params.add("attributes", attributes);
Iterator<Map.Entry<String, String>> itr = mapAttributes.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<String, String> pair = itr.next();
attributes.addProperty(pair.getKey(), pair.getValue());
}
}
final Gson gson = new GsonBuilder().create();
String strSnapshotToCreateJson = gson.toJson(snapshotToCreate);
@ -816,6 +931,38 @@ public class SolidFireUtil {
return snapshotCreateResult.result.snapshotID;
}
public static SolidFireSnapshot getSolidFireSnapshot(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId) {
final Gson gson = new GsonBuilder().create();
SnapshotsToGet snapshotsToGet = new SnapshotsToGet(lVolumeId);
String strSnapshotsToGetJson = gson.toJson(snapshotsToGet);
String strSnapshotsGetResultJson = executeJsonRpc(sfConnection, strSnapshotsToGetJson);
SnapshotsGetResult snapshotsGetResult = gson.fromJson(strSnapshotsGetResultJson, SnapshotsGetResult.class);
verifyResult(snapshotsGetResult.result, strSnapshotsGetResultJson, gson);
String snapshotName = null;
if (snapshotsGetResult.result.snapshots != null) {
for (SnapshotsGetResult.Result.Snapshot snapshot : snapshotsGetResult.result.snapshots) {
if (snapshot.snapshotID == lSnapshotId) {
snapshotName = snapshot.name;
break;
}
}
}
if (snapshotName == null) {
throw new CloudRuntimeException("Could not find SolidFire snapshot ID: " + lSnapshotId + " for the following SolidFire volume ID: " + lVolumeId);
}
return new SolidFireSnapshot(lSnapshotId, snapshotName);
}
public static void deleteSolidFireSnapshot(SolidFireConnection sfConnection, long lSnapshotId)
{
final Gson gson = new GsonBuilder().create();
@ -841,10 +988,40 @@ public class SolidFireUtil {
verifyResult(rollbackInitiatedResult.result, strRollbackInitiatedResultJson, gson);
}
public static long createSolidFireClone(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId, String cloneName) {
final Gson gson = new GsonBuilder().create();
public static long createSolidFireClone(SolidFireConnection sfConnection, long lVolumeId, long lSnapshotId, long sfAccountId,
String cloneName, Map<String, String> mapAttributes) {
JsonObject cloneToCreate = new JsonObject();
CloneToCreate cloneToCreate = new CloneToCreate(lVolumeId, lSnapshotId, cloneName);
cloneToCreate.addProperty("method", "CloneVolume");
JsonObject params = new JsonObject();
cloneToCreate.add("params", params);
params.addProperty("volumeID", lVolumeId);
if (lSnapshotId > 0) {
params.addProperty("snapshotID", lSnapshotId);
}
params.addProperty("newAccountID", sfAccountId);
params.addProperty("name", cloneName);
if (mapAttributes != null && mapAttributes.size() > 0) {
JsonObject attributes = new JsonObject();
params.add("attributes", attributes);
Iterator<Map.Entry<String, String>> itr = mapAttributes.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<String, String> pair = itr.next();
attributes.addProperty(pair.getKey(), pair.getValue());
}
}
final Gson gson = new GsonBuilder().create();
String strCloneToCreateJson = gson.toJson(cloneToCreate);
@ -854,7 +1031,33 @@ public class SolidFireUtil {
verifyResult(cloneCreateResult.result, strCloneCreateResultJson, gson);
return cloneCreateResult.result.cloneID;
// Clone is an async operation. Poll until we get data.
AsyncJobToPoll asyncJobToPoll = new AsyncJobToPoll(cloneCreateResult.result.asyncHandle);
String strAsyncJobToPollJson = gson.toJson(asyncJobToPoll);
do {
String strAsyncJobResultJson = executeJsonRpc(sfConnection, strAsyncJobToPollJson);
AsyncJobResult asyncJobResult = gson.fromJson(strAsyncJobResultJson, AsyncJobResult.class);
verifyResult(asyncJobResult.result, strAsyncJobResultJson, gson);
if (asyncJobResult.result.status.equals("complete")) {
break;
}
try {
Thread.sleep(500); // sleep for 1/2 of a second
}
catch (Exception ex) {
// ignore
}
}
while (true);
return cloneCreateResult.result.volumeID;
}
public static long createSolidFireAccount(SolidFireConnection sfConnection, String strAccountName)
@ -1134,189 +1337,6 @@ public class SolidFireUtil {
}
}
@SuppressWarnings("unused")
private static final class VolumeToCreateWithCloudStackVolumeSize {
private final String method = "CreateVolume";
private final VolumeToCreateParams params;
private VolumeToCreateWithCloudStackVolumeSize(final String strVolumeName, final long lAccountId, final long lTotalSize,
final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToCreateParams {
private final String name;
private final long accountID;
private final long totalSize;
private final boolean enable512e;
private final VolumeToCreateParamsAttributes attributes;
private final VolumeToCreateParamsQoS qos;
private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e,
final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
name = strVolumeName;
accountID = lAccountId;
totalSize = lTotalSize;
enable512e = bEnable512e;
attributes = new VolumeToCreateParamsAttributes(strCloudStackVolumeSize);
qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToCreateParamsAttributes {
private final String CloudStackVolumeSize;
private VolumeToCreateParamsAttributes(final String strCloudStackVolumeSize) {
CloudStackVolumeSize = strCloudStackVolumeSize;
}
}
private static final class VolumeToCreateParamsQoS {
private final long minIOPS;
private final long maxIOPS;
private final long burstIOPS;
private VolumeToCreateParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
minIOPS = lMinIOPS;
maxIOPS = lMaxIOPS;
burstIOPS = lBurstIOPS;
}
}
}
}
@SuppressWarnings("unused")
private static final class VolumeToCreate {
private final String method = "CreateVolume";
private final VolumeToCreateParams params;
private VolumeToCreate(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e,
final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToCreateParams {
private final String name;
private final long accountID;
private final long totalSize;
private final boolean enable512e;
private final VolumeToCreateParamsQoS qos;
private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, final boolean bEnable512e,
final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
name = strVolumeName;
accountID = lAccountId;
totalSize = lTotalSize;
enable512e = bEnable512e;
qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToCreateParamsQoS {
private final long minIOPS;
private final long maxIOPS;
private final long burstIOPS;
private VolumeToCreateParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
minIOPS = lMinIOPS;
maxIOPS = lMaxIOPS;
burstIOPS = lBurstIOPS;
}
}
}
}
@SuppressWarnings("unused")
private static final class VolumeToModifyWithCloudStackVolumeSize
{
private final String method = "ModifyVolume";
private final VolumeToModifyParams params;
private VolumeToModifyWithCloudStackVolumeSize(final long lVolumeId, final long lTotalSize, final String strCloudStackVolumeSize,
final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
{
params = new VolumeToModifyParams(lVolumeId, lTotalSize, strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToModifyParams
{
private final long volumeID;
private final long totalSize;
private final VolumeToModifyParamsAttributes attributes;
private final VolumeToModifyParamsQoS qos;
private VolumeToModifyParams(final long lVolumeId, final long lTotalSize, String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
{
volumeID = lVolumeId;
totalSize = lTotalSize;
attributes = new VolumeToModifyParamsAttributes(strCloudStackVolumeSize);
qos = new VolumeToModifyParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS);
}
}
private static final class VolumeToModifyParamsAttributes {
private final String CloudStackVolumeSize;
private VolumeToModifyParamsAttributes(final String strCloudStackVolumeSize) {
CloudStackVolumeSize = strCloudStackVolumeSize;
}
}
private static final class VolumeToModifyParamsQoS {
private final long minIOPS;
private final long maxIOPS;
private final long burstIOPS;
private VolumeToModifyParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
minIOPS = lMinIOPS;
maxIOPS = lMaxIOPS;
burstIOPS = lBurstIOPS;
}
}
}
@SuppressWarnings("unused")
private static final class VolumeToModify
{
private final String method = "ModifyVolume";
private final VolumeToModifyParams params;
private VolumeToModify(final long lVolumeId, final long lTotalSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
{
params = new VolumeToModifyParams(lVolumeId, lTotalSize, lMinIOPS, lMaxIOPS, lBurstIOPS);
}
private static final class VolumeToModifyParams
{
private final long volumeID;
private final long totalSize;
private final VolumeToModifyParamsQoS qos;
private VolumeToModifyParams(final long lVolumeId, final long lTotalSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS)
{
volumeID = lVolumeId;
totalSize = lTotalSize;
qos = new VolumeToModifyParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS);
}
}
private static final class VolumeToModifyParamsQoS {
private final long minIOPS;
private final long maxIOPS;
private final long burstIOPS;
private VolumeToModifyParamsQoS(final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) {
minIOPS = lMinIOPS;
maxIOPS = lMaxIOPS;
burstIOPS = lBurstIOPS;
}
}
}
@SuppressWarnings("unused")
private static final class VolumeToGet
{
@ -1407,21 +1427,20 @@ public class SolidFireUtil {
}
@SuppressWarnings("unused")
private static final class SnapshotToCreate {
private final String method = "CreateSnapshot";
private final SnapshotToCreateParams params;
private static final class SnapshotsToGet
{
private final String method = "ListSnapshots";
private final SnapshotsToGetParams params;
private SnapshotToCreate(final long lVolumeId, final String snapshotName) {
params = new SnapshotToCreateParams(lVolumeId, snapshotName);
private SnapshotsToGet(final long lVolumeId) {
params = new SnapshotsToGetParams(lVolumeId);
}
private static final class SnapshotToCreateParams {
private static final class SnapshotsToGetParams {
private final long volumeID;
private final String name;
private SnapshotToCreateParams(final long lVolumeId, final String snapshotName) {
private SnapshotsToGetParams(final long lVolumeId) {
volumeID = lVolumeId;
name = snapshotName;
}
}
}
@ -1465,28 +1484,6 @@ public class SolidFireUtil {
}
}
@SuppressWarnings("unused")
private static final class CloneToCreate {
private final String method = "CloneVolume";
private final CloneToCreateParams params;
private CloneToCreate(final long lVolumeId, final long lSnapshotId, final String cloneName) {
params = new CloneToCreateParams(lVolumeId, lSnapshotId, cloneName);
}
private static final class CloneToCreateParams {
private final long volumeID;
private final long snapshotID;
private final String name;
private CloneToCreateParams(final long lVolumeId, final long lSnapshotId, final String cloneName) {
volumeID = lVolumeId;
snapshotID = lSnapshotId;
name = cloneName;
}
}
}
@SuppressWarnings("unused")
private static final class AccountToAdd
{
@ -1680,6 +1677,28 @@ public class SolidFireUtil {
}
}
@SuppressWarnings("unused")
private static final class AsyncJobToPoll
{
private final String method = "GetAsyncResult";
private final AsyncJobToPollParams params;
private AsyncJobToPoll(final long asyncHandle)
{
params = new AsyncJobToPollParams(asyncHandle);
}
private static final class AsyncJobToPollParams
{
private final long asyncHandle;
private AsyncJobToPollParams(final long asyncHandle)
{
this.asyncHandle = asyncHandle;
}
}
}
private static final class VolumeCreateResult {
private Result result;
@ -1721,6 +1740,19 @@ public class SolidFireUtil {
}
}
private static final class SnapshotsGetResult {
private Result result;
private static final class Result {
private Snapshot[] snapshots;
private static final class Snapshot {
private long snapshotID;
private String name;
}
}
}
@SuppressWarnings("unused")
private static final class RollbackInitiatedResult {
private Result result;
@ -1734,7 +1766,8 @@ public class SolidFireUtil {
private Result result;
private static final class Result {
private long cloneID;
private long volumeID;
private long asyncHandle;
}
}
@ -1786,6 +1819,15 @@ public class SolidFireUtil {
}
}
private static final class AsyncJobResult {
private AsyncResult result;
private static final class AsyncResult
{
private String status;
}
}
private static final class JsonError
{
private Error error;

View File

@ -549,28 +549,35 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
return getUsedBytes(pool);
}
else {
// Get size for all the non-destroyed volumes
// Get size for all the non-destroyed volumes.
Pair<Long, Long> sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId());
totalAllocatedSize = sizes.second() + sizes.first() * _extraBytesPerVolume;
}
// Get size for VM Snapshots
totalAllocatedSize = totalAllocatedSize + _volumeDao.getVMSnapshotSizeByPool(pool.getId());
// Get size for VM Snapshots.
totalAllocatedSize += _volumeDao.getVMSnapshotSizeByPool(pool.getId());
// Iterate through all templates on this storage pool
boolean tmpinstalled = false;
List<VMTemplateStoragePoolVO> templatePoolVOs;
templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId());
boolean tmpInstalled = false;
// Iterate through all templates on this storage pool.
List<VMTemplateStoragePoolVO> templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId());
for (VMTemplateStoragePoolVO templatePoolVO : templatePoolVOs) {
if ((templateForVmCreation != null) && !tmpinstalled && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) {
tmpinstalled = true;
if ((templateForVmCreation != null) && !tmpInstalled && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) {
tmpInstalled = true;
}
long templateSize = templatePoolVO.getTemplateSize();
totalAllocatedSize += templateSize + _extraBytesPerVolume;
}
if ((templateForVmCreation != null) && !tmpInstalled) {
long templateForVmCreationSize = templateForVmCreation.getSize() != null ? templateForVmCreation.getSize() : 0;
totalAllocatedSize += templateForVmCreationSize + _extraBytesPerVolume;
}
return totalAllocatedSize;
}

View File

@ -1234,7 +1234,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
requestVolumes = new ArrayList<Volume>();
requestVolumes.add(vol);
if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
if (!_storageMgr.storagePoolHasEnoughIops(requestVolumes, potentialSPool) ||
!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool, potentialHost.getClusterId()))
continue;
volumeAllocationMap.put(potentialSPool, requestVolumes);
}

View File

@ -44,9 +44,11 @@ import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.DataCenterDetailsDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.ConnectionException;
import com.cloud.host.DetailVO;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.component.AdapterBase;
import com.cloud.utils.exception.CloudRuntimeException;
@ -69,6 +71,8 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo
@Inject
HostDao _hostDao = null;
@Inject
private HostDetailsDao hostDetailsDao;
@Inject
HostPodDao _podDao = null;
@Inject
DataCenterDetailsDao _zoneDetailsDao = null;
@ -319,6 +323,25 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo
host.setHypervisorType(hyType);
host.setHypervisorVersion(scc.getHypervisorVersion());
updateHostDetails(host, scc);
}
private void updateHostDetails(HostVO host, StartupRoutingCommand startupRoutingCmd) {
final String name = "supportsResign";
final String value = String.valueOf(startupRoutingCmd.getSupportsClonedVolumes());
DetailVO hostDetail = hostDetailsDao.findDetail(host.getId(), name);
if (hostDetail != null) {
hostDetail.setValue(value);
hostDetailsDao.update(hostDetail.getId(), hostDetail);
}
else {
hostDetail = new DetailVO(host.getId(), name, value);
hostDetailsDao.persist(hostDetail);
}
}
private boolean checkCIDR(Host.Type type, HostPodVO pod, String serverPrivateIP, String serverPrivateNetmask) {

View File

@ -1733,6 +1733,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
_hostDao.update(host.getId(), host);
}
if (startup instanceof StartupRoutingCommand) {
final StartupRoutingCommand ssCmd = (StartupRoutingCommand)startup;
updateHostDetails(host, ssCmd);
}
try {
resourceStateTransitTo(host, ResourceState.Event.InternalCreated, _nodeId);
/* Agent goes to Connecting status */
@ -1750,6 +1756,24 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
return host;
}
private void updateHostDetails(HostVO host, StartupRoutingCommand startupRoutingCmd) {
final String name = "supportsResign";
final String value = String.valueOf(startupRoutingCmd.getSupportsClonedVolumes());
DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), name);
if (hostDetail != null) {
hostDetail.setValue(value);
_hostDetailsDao.update(hostDetail.getId(), hostDetail);
}
else {
hostDetail = new DetailVO(host.getId(), name, value);
_hostDetailsDao.persist(hostDetail);
}
}
private boolean isFirstHostInCluster(final HostVO host) {
boolean isFirstHost = true;
if (host.getClusterId() != null) {

View File

@ -21,16 +21,21 @@ public class ResizeVolumePayload {
public final Long newSize;
public final Long newMinIops;
public final Long newMaxIops;
public final Integer newHypervisorSnapshotReserve;
public final boolean shrinkOk;
public final String instanceName;
public final long[] hosts;
public final boolean isManaged;
public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, boolean shrinkOk, String instanceName, long[] hosts) {
public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, boolean shrinkOk,
String instanceName, long[] hosts, boolean isManaged) {
this.newSize = newSize;
this.newMinIops = newMinIops;
this.newMaxIops = newMaxIops;
this.newHypervisorSnapshotReserve = newHypervisorSnapshotReserve;
this.shrinkOk = shrinkOk;
this.instanceName = instanceName;
this.hosts = hosts;
this.isManaged = isManaged;
}
}

View File

@ -106,6 +106,30 @@ public interface StorageManager extends StorageService {
boolean storagePoolHasEnoughSpace(List<Volume> volume, StoragePool pool);
/**
* This comment is relevant to managed storage only.
*
* Long clusterId = only used for managed storage
*
* Some managed storage can be more efficient handling VM templates (via cloning) if it knows the capabilities of the compute cluster it is dealing with.
* If the compute cluster supports UUID resigning and the storage system can clone a volume from a volume, then this determines how much more space a
* new root volume (that makes use of a template) will take up on the storage system.
*
* For example, if a storage system can clone a volume from a volume and the compute cluster supports UUID resigning (relevant for hypervisors like
* XenServer and ESXi that put virtual disks in clustered file systems), then the storage system will need to determine if it already has a copy of
* the template or if it will need to create one first before cloning the template to a new volume to be used for the new root disk (assuming the root
* disk is being deployed from a template). If the template doesn't already exists on the storage system, then you need to take into consideration space
* required for that template (stored in one volume) and space required for a new volume created from that template volume (for your new root volume).
*
* If UUID resigning is not available in the compute cluster or the storage system doesn't support cloning a volume from a volume, then for each new
* root disk that uses a template, CloudStack will have the template be copied down to a newly created volume on the storage system (i.e. no need
* to take into consideration the possible need to first create a volume on the storage system for a template that will be used for the root disk
* via cloning).
*
* Cloning volumes on the back-end instead of copying down a new template for each new volume helps to alleviate load on the hypervisors.
*/
boolean storagePoolHasEnoughSpace(List<Volume> volume, StoragePool pool, Long clusterId);
boolean registerHostListener(String providerUuid, HypervisorHostListener listener);
void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException;

View File

@ -70,6 +70,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCy
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
@ -1668,9 +1669,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return false;
}
// Only IOPS guaranteed primary storage like SolidFire is using/setting IOPS.
// Only IOPS-guaranteed primary storage like SolidFire is using/setting IOPS.
// This check returns true for storage that does not specify IOPS.
if (pool.getCapacityIops() == null ) {
if (pool.getCapacityIops() == null) {
s_logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity");
return true;
@ -1696,6 +1697,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public boolean storagePoolHasEnoughSpace(List<Volume> volumes, StoragePool pool) {
return storagePoolHasEnoughSpace(volumes, pool, null);
}
@Override
public boolean storagePoolHasEnoughSpace(List<Volume> volumes, StoragePool pool, Long clusterId) {
if (volumes == null || volumes.isEmpty()) {
return false;
}
@ -1704,10 +1710,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return false;
}
// allocated space includes template of specified volume
// allocated space includes templates
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null);
long allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null);
long totalAskingSize = 0;
for (Volume volume : volumes) {
// refreshing the volume from the DB to get latest hv_ss_reserve (hypervisor snapshot reserve) field
// I could have just assigned this to "volume", but decided to make a new variable for it so that it
@ -1718,18 +1725,37 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(getDiskOfferingVO(volumeVO), volumeVO.getId(), getHypervisorType(volumeVO));
// hv_ss_reserve field might have been updated; refresh from DB to make use of it in getVolumeSizeIncludingHypervisorSnapshotReserve
// hv_ss_reserve field might have been updated; refresh from DB to make use of it in getDataObjectSizeIncludingHypervisorSnapshotReserve
volumeVO = _volumeDao.findById(volume.getId());
}
if (volumeVO.getTemplateId() != null) {
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId());
if (tmpl != null && tmpl.getFormat() != ImageFormat.ISO) {
allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
// this if statement should resolve to true at most once per execution of the for loop its contained within (for a root disk that is
// to leverage a template)
if (volume.getTemplateId() != null) {
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volume.getTemplateId());
if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) {
allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
}
}
if (volumeVO.getState() != Volume.State.Ready) {
totalAskingSize = totalAskingSize + getVolumeSizeIncludingHypervisorSnapshotReserve(volumeVO, pool);
totalAskingSize += getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeVO, pool);
if (ScopeType.ZONE.equals(poolVO.getScope()) && volumeVO.getTemplateId() != null) {
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId());
if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) {
// Storage plug-ins for zone-wide primary storage can be designed in such a way as to store a template on the
// primary storage once and make use of it in different clusters (via cloning).
// This next call leads to CloudStack asking how many more bytes it will need for the template (if the template is
// already stored on the primary storage, then the answer is 0).
if (clusterId != null && _clusterDao.computeWhetherClusterSupportsResigning(clusterId)) {
totalAskingSize += getBytesRequiredForTemplate(tmpl, pool);
}
}
}
}
}
@ -1749,11 +1775,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity +
", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " +
", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " +
storageAllocatedThreshold);
}
double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity);
double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity);
if (usedPercentage > storageAllocatedThreshold) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() +
@ -1763,10 +1789,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return false;
}
if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) {
if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() +
", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " +
", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " +
totalAskingSize);
}
return false;
@ -1792,19 +1818,36 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return null;
}
private long getVolumeSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
if (storeDriver instanceof PrimaryDataStoreDriver) {
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
return primaryStoreDriver.getVolumeSizeIncludingHypervisorSnapshotReserve(volume, pool);
VolumeInfo volumeInfo = volFactory.getVolume(volume.getId());
return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool);
}
return volume.getSize();
}
private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
if (storeDriver instanceof PrimaryDataStoreDriver) {
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
TemplateInfo templateInfo = tmplFactory.getReadyTemplateOnImageStore(tmpl.getId(), pool.getDataCenterId());
return primaryStoreDriver.getBytesRequiredForTemplate(templateInfo, pool);
}
return tmpl.getSize();
}
@Override
public void createCapacityEntry(long poolId) {
StoragePoolVO storage = _storagePoolDao.findById(poolId);

View File

@ -839,6 +839,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
Long newSize = null;
Long newMinIops = null;
Long newMaxIops = null;
Integer newHypervisorSnapshotReserve = null;
boolean shrinkOk = cmd.getShrinkOk();
VolumeVO volume = _volsDao.findById(cmd.getEntityId());
@ -881,6 +882,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// if we are to use the existing disk offering
if (newDiskOffering == null) {
newSize = cmd.getSize();
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
// if the caller is looking to change the size of the volume
if (newSize != null) {
@ -939,10 +941,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well.");
}
if (!areIntegersEqual(diskOffering.getHypervisorSnapshotReserve(), newDiskOffering.getHypervisorSnapshotReserve())) {
throw new InvalidParameterValueException("The hypervisor snapshot reverse on the new and old disk offerings must be equal.");
}
if (newDiskOffering.getDomainId() != null) {
// not a public offering; check access
_configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering);
@ -975,6 +973,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
newMinIops = newDiskOffering.getMinIops();
newMaxIops = newDiskOffering.getMaxIops();
}
// if the hypervisor snapshot reserve value is null, it must remain null (currently only KVM uses null and null is all KVM uses for a value here)
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve() != null ? newDiskOffering.getHypervisorSnapshotReserve() : null;
}
long currentSize = volume.getSize();
@ -1013,6 +1014,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
volume.setSize(newSize);
volume.setMinIops(newMinIops);
volume.setMaxIops(newMaxIops);
volume.setHypervisorSnapshotReserve(newHypervisorSnapshotReserve);
if (newDiskOffering != null) {
volume.setDiskOfferingId(cmd.getNewDiskOfferingId());
@ -1038,13 +1040,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
try {
outcome.get();
@ -1079,19 +1081,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops,
newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
}
private static boolean areIntegersEqual(Integer i1, Integer i2) {
if (i1 == null) {
i1 = 0;
}
if (i2 == null) {
i2 = 0;
}
return i1.equals(i2);
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
}
private void validateIops(Long minIops, Long maxIops) {
@ -1106,9 +1096,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
}
private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newDiskOfferingId, boolean shrinkOk) {
private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops,
Integer newHypervisorSnapshotReserve, Long newDiskOfferingId, boolean shrinkOk) {
VolumeVO volume = _volsDao.findById(volumeId);
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
boolean isManaged = storagePool.isManaged();
/*
* get a list of hosts to send the commands to, try the system the
* associated vm is running on first, then the last known place it ran.
@ -1127,8 +1120,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
final String errorMsg = "The VM must be stopped or the disk detached in order to resize with the XenServer Hypervisor.";
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
if (storagePool.isManaged() && storagePool.getHypervisor() == HypervisorType.Any && hosts != null && hosts.length > 0) {
HostVO host = _hostDao.findById(hosts[0]);
@ -1143,13 +1134,20 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
}
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, shrinkOk, instanceName, hosts);
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve,
shrinkOk, instanceName, hosts, isManaged);
try {
VolumeInfo vol = volFactory.getVolume(volume.getId());
vol.addPayload(payload);
StoragePoolVO storagePool = _storagePoolDao.findById(vol.getPoolId());
// this call to resize has a different impact depending on whether the
// underlying primary storage is managed or not
// if managed, this is the chance for the plug-in to change IOPS value, if applicable
// if not managed, this is the chance for the plug-in to talk to the hypervisor layer
// to change the size of the disk
AsyncCallFuture<VolumeApiResult> future = volService.resize(vol);
VolumeApiResult result = future.get();
// managed storage is designed in such a way that the storage plug-in does not
// talk to the hypervisor layer; as such, if the storage is managed and the
@ -1165,14 +1163,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
_volsDao.update(volume.getId(), volume);
}
// this call to resize has a different impact depending on whether the
// underlying primary storage is managed or not
// if managed, this is the chance for the plug-in to change IOPS value, if applicable
// if not managed, this is the chance for the plug-in to talk to the hypervisor layer
// to change the size of the disk
AsyncCallFuture<VolumeApiResult> future = volService.resize(vol);
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.warn("Failed to resize the volume " + volume);
String details = "";
@ -2758,9 +2748,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return new VmJobVolumeOutcome(workJob, volumeId);
}
public Outcome<Volume> resizeVolumeThroughJobQueue(final Long vmId, final long volumeId,
final long currentSize, final long newSize, final Long newMinIops, final Long newMaxIops, final Long newServiceOfferingId, final boolean shrinkOk) {
public Outcome<Volume> resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, final long currentSize, final long newSize,
final Long newMinIops, final Long newMaxIops, final Integer newHypervisorSnapshotReserve,
final Long newServiceOfferingId, final boolean shrinkOk) {
final CallContext context = CallContext.current();
final User callingUser = context.getCallingUser();
final Account callingAccount = context.getCallingAccount();
@ -2781,7 +2771,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// save work context info (there are some duplications)
VmWorkResizeVolume workInfo = new VmWorkResizeVolume(callingUser.getId(), callingAccount.getId(), vm.getId(),
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newServiceOfferingId, shrinkOk);
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newServiceOfferingId, shrinkOk);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
@ -2915,7 +2905,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@ReflectionUse
private Pair<JobInfo.Status, String> orchestrateResizeVolume(VmWorkResizeVolume work) throws Exception {
Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), work.getNewMinIops(), work.getNewMaxIops(),
work.getNewServiceOfferingId(), work.isShrinkOk());
work.getNewHypervisorSnapshotReserve(), work.getNewServiceOfferingId(), work.isShrinkOk());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED,
_jobMgr.marshallResultObject(new Long(vol.getId())));
}

View File

@ -35,6 +35,7 @@ import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd;
import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
@ -1013,9 +1014,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
try {
postCreateSnapshot(volume.getId(), snapshotId, payload.getSnapshotPolicyId());
SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Image);
DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr);
SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole);
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(),
null, null, snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid());
// Correct the resource count of snapshot in case of delta snapshots.
_resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize()));
} catch (Exception e) {
@ -1030,6 +1036,30 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
return snapshot;
}
private static DataStoreRole getDataStoreRole(Snapshot snapshot, SnapshotDataStoreDao snapshotStoreDao, DataStoreManager dataStoreMgr) {
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
if (snapshotStore == null) {
return DataStoreRole.Image;
}
long storagePoolId = snapshotStore.getDataStoreId();
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
if (mapCapabilities != null) {
String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
Boolean supportsStorageSystemSnapshots = new Boolean(value);
if (supportsStorageSystemSnapshots) {
return DataStoreRole.Primary;
}
}
return DataStoreRole.Image;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {

View File

@ -38,7 +38,7 @@ import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
import org.apache.commons.collections.CollectionUtils;
@ -62,12 +62,14 @@ import org.apache.cloudstack.api.command.user.template.ListTemplatePermissionsCm
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd;
import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd;
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
@ -79,7 +81,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.Templa
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@ -689,7 +690,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
try {
templateStoragePoolRef.setTemplateSize(0);
templateStoragePoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED);
_tmpltPoolDao.update(templateStoragePoolRefId, templateStoragePoolRef);
} finally {
_tmpltPoolDao.releaseFromLockTable(templateStoragePoolRefId);
@ -873,41 +876,55 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
@Override
@DB
public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) {
//Need to hold the lock, otherwise, another thread may create a volume from the template at the same time.
//Assumption here is that, we will hold the same lock during create volume from template
// Need to hold the lock; otherwise, another thread may create a volume from the template at the same time.
// Assumption here is that we will hold the same lock during create volume from template.
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId());
if (templatePoolRef == null) {
s_logger.debug("can't aquire the lock for template pool ref:" + templatePoolVO.getId());
s_logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId());
return;
}
try {
StoragePool pool = (StoragePool)_dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId());
VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId());
PrimaryDataStore pool = (PrimaryDataStore)_dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId());
TemplateInfo template = _tmplFactory.getTemplate(templatePoolRef.getTemplateId(), pool);
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Evicting " + templatePoolVO);
}
DestroyCommand cmd = new DestroyCommand(pool, templatePoolVO);
try {
if (pool.isManaged()) {
// For managed store, just delete the template volume.
AsyncCallFuture<TemplateApiResult> future = _tmpltSvr.deleteTemplateOnPrimary(template, pool);
TemplateApiResult result = future.get();
if (result.isFailed()) {
s_logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId());
} else {
// Remove the templatePoolVO.
if (_tmpltPoolDao.remove(templatePoolVO.getId())) {
s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName());
}
}
} else {
DestroyCommand cmd = new DestroyCommand(pool, templatePoolVO);
Answer answer = _storageMgr.sendToPool(pool, cmd);
if (answer != null && answer.getResult()) {
// Remove the templatePoolVO
// Remove the templatePoolVO.
if (_tmpltPoolDao.remove(templatePoolVO.getId())) {
s_logger.debug("Successfully evicted template: " + template.getName() + " from storage pool: " + pool.getName());
s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName());
}
} else {
s_logger.info("Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName());
s_logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName());
}
} catch (StorageUnavailableException e) {
s_logger.info("Storage is unavailable currently. Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName());
}
} catch (StorageUnavailableException | InterruptedException | ExecutionException e) {
s_logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName());
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId());
}
}
@Override
@ -1482,14 +1499,17 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
future = _tmpltSvr.createTemplateFromSnapshotAsync(snapInfo, tmplInfo, store);
} else if (volumeId != null) {
VolumeInfo volInfo = _volFactory.getVolume(volumeId);
future = _tmpltSvr.createTemplateFromVolumeAsync(volInfo, tmplInfo, store);
} else {
throw new CloudRuntimeException("Creating private Template need to specify snapshotId or volumeId");
}
CommandResult result = null;
try {
result = future.get();
if (result.isFailed()) {
privateTemplate = null;
s_logger.debug("Failed to create template" + result.getResult());

View File

@ -1550,6 +1550,8 @@
preFilter: function(args) {
if (args.context.volumes != null && args.context.volumes[0].type == 'ROOT') {
args.$form.find('.form-item[rel=newdiskoffering]').hide();
selectedDiskOfferingObj = null;
} else {
args.$form.find('.form-item[rel=newsize]').hide();
}

View File

@ -0,0 +1,38 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils;
import java.util.Map;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
public class Utils {
public static <K, V> Map getImmutableMap(Map<K, V> map) {
Map<K, V> filteredMap = Maps.filterValues(map, new Predicate<V>() {
public boolean apply(final V input) {
return input != null;
}
});
return ImmutableMap.<K, V>builder().putAll(filteredMap).build();
}
}