Fix primary storage count when deleting volumes (#2629)

* Primary Storage count for an account does not decrease when a Data Disk is deleted

When a data disk is created and not attached in a running VM, the "deleteVolume" will not decrement the count for used primary storage in the VMs accounting information. The property that is not being decremented is called "primarystoragetotal"; this information can be retrieved via "listAccounts" API method.

Steps to reproduce this issue:
1 - Create an account, deploy a VM in it
2 - Check the primary storage count for the account with listAccounts API
3 - Create a data disk
4 - Check the primary storage count for the account with listAccounts API
5 - Delete the Data disk
6 - Check the primary storage count for the account with listAccounts API - It is the same as before deleting the data disk (it should not be the same as the value in step 2!)

* formatting and cleanups

* fix imports that were wrongly changed during rebase
This commit is contained in:
Rafael Weingärtner 2018-05-16 15:28:28 -03:00 committed by GitHub
parent d893fb5b00
commit b9ed42bd29
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1127 additions and 864 deletions

View File

@ -18,6 +18,8 @@
*/
package com.cloud.storage;
import java.net.MalformedURLException;
import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
@ -26,13 +28,10 @@ import org.apache.cloudstack.api.command.user.volume.GetUploadParamsForVolumeCmd
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.user.Account;
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
import java.net.MalformedURLException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.user.Account;
public interface VolumeApiService {
/**
@ -76,13 +75,14 @@ public interface VolumeApiService {
GetUploadParamsResponse uploadVolume(GetUploadParamsForVolumeCmd cmd) throws ResourceAllocationException, MalformedURLException;
boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException;
boolean deleteVolume(long volumeId, Account caller);
Volume attachVolumeToVM(AttachVolumeCmd command);
Volume detachVolumeFromVM(DetachVolumeCmd cmd);
Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup) throws ResourceAllocationException;
Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup)
throws ResourceAllocationException;
Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException;
@ -92,10 +92,8 @@ public interface VolumeApiService {
* Extracts the volume to a particular location.
*
* @param cmd
* the command specifying url (where the volume needs to be extracted to), zoneId (zone where the volume
* exists),
* the command specifying url (where the volume needs to be extracted to), zoneId (zone where the volume exists),
* id (the id of the volume)
*
*/
String extractVolume(ExtractVolumeCmd cmd);

View File

@ -25,6 +25,7 @@ import com.cloud.storage.Volume;
import com.cloud.vm.VirtualMachine;
public interface VolumeInfo extends DataObject, Volume {
boolean isAttachedVM();
void addPayload(Object data);
@ -36,6 +37,7 @@ public interface VolumeInfo extends DataObject, Volume {
Long getLastPoolId();
String getAttachedVmName();
VirtualMachine getAttachedVM();
void processEventOnly(ObjectInDataStoreStateMachine.Event event);

View File

@ -20,16 +20,15 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.Map;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.command.CommandResult;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.DiskOffering;
import com.cloud.utils.Pair;
public interface VolumeService {
class VolumeApiResult extends CommandResult {
@ -54,38 +53,24 @@ public interface VolumeService {
/**
* Creates the volume based on the given criteria
*
* @param cmd
*
* @return the volume object
*/
AsyncCallFuture<VolumeApiResult> createVolumeAsync(VolumeInfo volume, DataStore store);
/**
* Delete volume
*
* @param volumeId
* @return
* @throws ConcurrentOperationException
*/
AsyncCallFuture<VolumeApiResult> expungeVolumeAsync(VolumeInfo volume);
/**
*
*/
boolean cloneVolume(long volumeId, long baseVolId);
/**
*
*/
AsyncCallFuture<VolumeApiResult> createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot);
VolumeEntity getVolumeEntity(long volumeId);
AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
TemplateInfo srcTemplateInfo, long destHostId);
AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId);
AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId,
TemplateInfo template);
AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template);
AsyncCallFuture<VolumeApiResult> copyVolume(VolumeInfo srcVolume, DataStore destStore);
@ -93,11 +78,11 @@ public interface VolumeService {
AsyncCallFuture<CommandResult> migrateVolumes(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost);
boolean destroyVolume(long volumeId) throws ConcurrentOperationException;
void destroyVolume(long volumeId);
AsyncCallFuture<VolumeApiResult> registerVolume(VolumeInfo volume, DataStore store);
public Pair<EndPoint,DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store);
public Pair<EndPoint, DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store);
AsyncCallFuture<VolumeApiResult> resize(VolumeInfo volume);
@ -108,5 +93,4 @@ public interface VolumeService {
SnapshotInfo takeSnapshot(VolumeInfo volume);
VolumeInfo updateHypervisorSnapshotReserveForVolume(DiskOffering diskOffering, long volumeId, HypervisorType hyperType);
}

View File

@ -30,8 +30,6 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@ -67,6 +65,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.log4j.Logger;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
@ -225,8 +224,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
// Find a destination storage pool with the specified criteria
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(),
diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(),
diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
dskCh.setHyperType(dataDiskHyperType);
storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering);
@ -249,17 +248,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, Long templateId) {
VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(),
oldVol.getName(),
oldVol.getDataCenterId(),
oldVol.getDomainId(),
oldVol.getAccountId(),
oldVol.getDiskOfferingId(),
oldVol.getProvisioningType(),
oldVol.getSize(),
oldVol.getMinIops(),
oldVol.getMaxIops(),
oldVol.get_iScsiName());
VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(),
oldVol.getProvisioningType(), oldVol.getSize(), oldVol.getMinIops(), oldVol.getMaxIops(), oldVol.get_iScsiName());
if (templateId != null) {
newVol.setTemplateId(templateId);
} else {
@ -398,8 +388,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
DataStoreRole dataStoreRole = getDataStoreRole(snapshot);
SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), dataStoreRole);
if(snapInfo == null && dataStoreRole == DataStoreRole.Image) {
if (snapInfo == null && dataStoreRole == DataStoreRole.Image) {
// snapshot is not backed up to secondary, let's do that now.
snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Primary);
@ -480,8 +469,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + dc.getId());
}
return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(),
diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null);
return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(),
diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null);
} else {
return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(),
diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
@ -489,8 +478,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
@DB
public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId,
ServiceOffering offering, DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) throws NoTransitionException {
public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering,
DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) throws NoTransitionException {
final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids);
DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering);
@ -519,8 +508,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
@DB
public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering,
DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) {
public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering, DiskOffering diskOffering,
List<StoragePool> avoids, long size, HypervisorType hyperType) {
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volume = volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
@ -656,12 +645,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
protected DiskProfile toDiskProfile(Volume vol, DiskOffering offering) {
return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(),
offering.isRecreatable(), vol.getTemplateId());
return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(), offering.isRecreatable(),
vol.getTemplateId());
}
@Override
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId) {
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner,
Long deviceId) {
if (size == null) {
size = offering.getDiskSize();
} else {
@ -671,17 +661,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type,
name,
vm.getDataCenterId(),
owner.getDomainId(),
owner.getId(),
offering.getId(),
offering.getProvisioningType(),
size,
minIops,
maxIops,
null);
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), size, minIops, maxIops, null);
if (vm != null) {
vol.setInstanceId(vm.getId());
}
@ -719,11 +699,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
@Override
public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, Account owner) {
public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm,
Account owner) {
assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really....";
Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId());
if (rootDisksize != null ) {
if (rootDisksize != null) {
rootDisksize = rootDisksize * 1024 * 1024 * 1024;
if (rootDisksize > size) {
s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
@ -736,17 +717,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type,
name,
vm.getDataCenterId(),
owner.getDomainId(),
owner.getId(),
offering.getId(),
offering.getProvisioningType(),
size,
minIops,
maxIops,
null);
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), size, minIops, maxIops, null);
vol.setFormat(getSupportedImageFormatForCluster(template.getHypervisorType()));
if (vm != null) {
vol.setInstanceId(vm.getId());
@ -767,7 +738,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
vol.setDisplayVolume(userVm.isDisplayVm());
}
vol = _volsDao.persist(vol);
// Create event and update resource count for volumes if vm is a user vm
@ -817,16 +787,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
}
private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod,
DiskOffering diskVO, ServiceOffering svo, HypervisorType rootDiskHyperType) throws NoTransitionException {
private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod, DiskOffering diskVO,
ServiceOffering svo, HypervisorType rootDiskHyperType) throws NoTransitionException {
if (!isSupportedImageFormatForCluster(volume, rootDiskHyperType)) {
throw new InvalidParameterValueException("Failed to attach volume to VM since volumes format " + volume.getFormat().getFileExtension()
+ " is not compatible with the vm hypervisor type");
throw new InvalidParameterValueException("Failed to attach volume to VM since volumes format " + volume.getFormat().getFileExtension() + " is not compatible with the vm hypervisor type");
}
VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, vm, rootDiskTmplt, dcVO, pod, rootDiskPool.getClusterId(), svo, diskVO, new ArrayList<StoragePool>(),
volume.getSize(), rootDiskHyperType);
VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, vm, rootDiskTmplt, dcVO, pod, rootDiskPool.getClusterId(), svo, diskVO, new ArrayList<StoragePool>(), volume.getSize(),
rootDiskHyperType);
return volumeOnPrimary;
}
@ -871,8 +840,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
long vmTemplateId = vm.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId
+ ", updating templateId in the new Volume");
s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + ", updating templateId in the new Volume");
}
templateIdToUse = vmTemplateId;
}
@ -1108,8 +1076,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(),
vm.getServiceOfferingId(), vol.getDiskOfferingId());
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
@ -1217,7 +1184,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} else {
storageMigrationEnabled = StorageMigrationEnabled.value();
}
if(storageMigrationEnabled){
if (storageMigrationEnabled) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
}
@ -1237,8 +1204,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
} else {
if (vol.getPoolId() == null) {
throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol,
Volume.class, vol.getId());
throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
@ -1319,8 +1285,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
long hostId = vm.getVirtualMachine().getHostId();
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
}
else {
} else {
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
@ -1398,7 +1363,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (oldHostId != hostId) {
Host oldHost = _hostDao.findById(oldHostId);
Host host = _hostDao.findById(hostId);
DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
storageMgr.removeStoragePoolFromCluster(oldHostId, vol.get_iScsiName(), pool);
@ -1417,8 +1381,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(),
vm.getServiceOfferingId(), vol.getDiskOfferingId());
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
@ -1434,13 +1397,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
cloneType = UserVmCloneType.full;
}
UserVmCloneSettingVO cloneSettingVO = _vmCloneSettingDao.findByVmId(vm.getId());
if (cloneSettingVO != null){
if (! cloneSettingVO.getCloneType().equals(cloneType.toString())){
if (cloneSettingVO != null) {
if (!cloneSettingVO.getCloneType().equals(cloneType.toString())) {
cloneSettingVO.setCloneType(cloneType.toString());
_vmCloneSettingDao.update(cloneSettingVO.getVmId(), cloneSettingVO);
}
}
else {
} else {
UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(vm.getId(), cloneType.toString());
_vmCloneSettingDao.persist(vmCloneSettingVO);
}
@ -1465,8 +1427,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
return true;
}
public static final ConfigKey<Long> MaxVolumeSize = new ConfigKey<Long>(Long.class, "storage.max.volume.size", "Storage", "2000", "The maximum size for a volume (in GB).",
true);
public static final ConfigKey<Long> MaxVolumeSize = new ConfigKey<Long>(Long.class, "storage.max.volume.size", "Storage", "2000", "The maximum size for a volume (in GB).", true);
public static final ConfigKey<Boolean> RecreatableSystemVmEnabled = new ConfigKey<Boolean>(Boolean.class, "recreate.systemvm.enabled", "Advanced", "false",
"If true, will recreate system vm root disk whenever starting system vm", true);
@ -1478,8 +1439,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
"Enable/disable storage migration across primary storage", true);
static final ConfigKey<Boolean> VolumeUrlCheck = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.url.check", "true",
"Check the url for a volume before downloading it from the management server. Set to flase when you managment has no internet access.",
true);
"Check the url for a volume before downloading it from the management server. Set to flase when you managment has no internet access.", true);
@Override
public ConfigKey<?>[] getConfigKeys() {
@ -1507,9 +1467,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
_volsDao.remove(volume.getId());
}
if(volume.getState().equals(Volume.State.Attaching)) {
s_logger.warn("Vol: " + volume.getName() + " failed to attach to VM: " + _userVmDao.findById(vmId).getHostName() +
" on last mgt server stop, changing state back to Ready");
if (volume.getState().equals(Volume.State.Attaching)) {
s_logger.warn("Vol: " + volume.getName() + " failed to attach to VM: " + _userVmDao.findById(vmId).getHostName() + " on last mgt server stop, changing state back to Ready");
volume.setState(Volume.State.Ready);
_volsDao.update(volumeId, volume);
}
@ -1549,8 +1508,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override
public void cleanupStorageJobs() {
//clean up failure jobs related to volume
List<AsyncJobVO> jobs = _jobMgr.findFailureAsyncJobs(VmWorkAttachVolume.class.getName(),
VmWorkMigrateVolume.class.getName(), VmWorkTakeVolumeSnapshot.class.getName());
List<AsyncJobVO> jobs = _jobMgr.findFailureAsyncJobs(VmWorkAttachVolume.class.getName(), VmWorkMigrateVolume.class.getName(), VmWorkTakeVolumeSnapshot.class.getName());
for (AsyncJobVO job : jobs) {
try {
@ -1592,8 +1550,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
// FIXME - All this is boiler plate code and should be done as part of state transition. This shouldn't be part of orchestrator.
// publish usage event for the volume
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), Volume.class.getName(),
volume.getUuid(), volume.isDisplayVolume());
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplay());
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, volume.isDisplay(), new Long(volume.getSize()));
} catch (Exception e) {
@ -1619,13 +1577,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeVO vol = _volsDao.findById(volumeId);
boolean needUpdate = false;
// Volume path is not getting updated in the DB, need to find reason and fix the issue.
if (vol.getPath() == null)
if (vol.getPath() == null) {
return;
if (!vol.getPath().equalsIgnoreCase(path))
}
if (!vol.getPath().equalsIgnoreCase(path)) {
needUpdate = true;
}
if (chainInfo != null && (vol.getChainInfo() == null || !chainInfo.equalsIgnoreCase(vol.getChainInfo())))
if (chainInfo != null && (vol.getChainInfo() == null || !chainInfo.equalsIgnoreCase(vol.getChainInfo()))) {
needUpdate = true;
}
if (needUpdate) {
s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);

View File

@ -85,7 +85,6 @@ import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
@ -110,6 +109,7 @@ import com.cloud.storage.Volume.State;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.storage.template.TemplateProp;
import com.cloud.user.AccountManager;
@ -119,8 +119,6 @@ import com.cloud.utils.Pair;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.storage.dao.VolumeDetailsDao;
@Component
public class VolumeServiceImpl implements VolumeService {
@ -420,8 +418,7 @@ public class VolumeServiceImpl implements VolumeService {
if (!supportsStorageSystemSnapshots) {
_snapshotStoreDao.remove(snapStoreVo.getId());
}
}
else {
} else {
_snapshotStoreDao.remove(snapStoreVo.getId());
}
}
@ -453,8 +450,8 @@ public class VolumeServiceImpl implements VolumeService {
private final TemplateInfo _templateInfo;
private final AsyncCallFuture<VolumeApiResult> _future;
public ManagedCreateBaseImageContext(AsyncCompletionCallback<T> callback, VolumeInfo volumeInfo,
PrimaryDataStore primaryDatastore, TemplateInfo templateInfo, AsyncCallFuture<VolumeApiResult> future) {
public ManagedCreateBaseImageContext(AsyncCompletionCallback<T> callback, VolumeInfo volumeInfo, PrimaryDataStore primaryDatastore, TemplateInfo templateInfo,
AsyncCallFuture<VolumeApiResult> future) {
super(callback);
_volumeInfo = volumeInfo;
@ -488,8 +485,8 @@ public class VolumeServiceImpl implements VolumeService {
final DataObject destObj;
long templatePoolId;
public CreateBaseImageContext(AsyncCompletionCallback<T> callback, VolumeInfo volume, PrimaryDataStore datastore, TemplateInfo srcTemplate,
AsyncCallFuture<VolumeApiResult> future, DataObject destObj, long templatePoolId) {
public CreateBaseImageContext(AsyncCompletionCallback<T> callback, VolumeInfo volume, PrimaryDataStore datastore, TemplateInfo srcTemplate, AsyncCallFuture<VolumeApiResult> future,
DataObject destObj, long templatePoolId) {
super(callback);
this.volume = volume;
this.dataStore = datastore;
@ -549,13 +546,11 @@ public class VolumeServiceImpl implements VolumeService {
throw new CloudRuntimeException("Failed to find template " + template.getUniqueName() + " in storage pool " + dataStore.getId());
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found template " + template.getUniqueName() + " in storage pool " + dataStore.getId() + " with VMTemplateStoragePool id: " +
templatePoolRef.getId());
s_logger.debug("Found template " + template.getUniqueName() + " in storage pool " + dataStore.getId() + " with VMTemplateStoragePool id: " + templatePoolRef.getId());
}
}
long templatePoolRefId = templatePoolRef.getId();
CreateBaseImageContext<CreateCmdResult> context =
new CreateBaseImageContext<CreateCmdResult>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId);
CreateBaseImageContext<CreateCmdResult> context = new CreateBaseImageContext<CreateCmdResult>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)).setContext(context);
@ -571,8 +566,8 @@ public class VolumeServiceImpl implements VolumeService {
}
templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId());
if (templatePoolRef != null && templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) {
s_logger.info("Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() +
" is already copied to primary storage, skip copying");
s_logger.info(
"Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying");
createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future);
return;
}
@ -606,8 +601,7 @@ public class VolumeServiceImpl implements VolumeService {
return;
}
protected Void managedCopyBaseImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback,
ManagedCreateBaseImageContext<VolumeApiResult> context) {
protected Void managedCopyBaseImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, ManagedCreateBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
VolumeInfo volumeInfo = context.getVolumeInfo();
VolumeApiResult res = new VolumeApiResult(volumeInfo);
@ -626,8 +620,7 @@ public class VolumeServiceImpl implements VolumeService {
}
volDao.update(volume.getId(), volume);
}
else {
} else {
volumeInfo.processEvent(Event.DestroyRequested);
res.setResult(result.getResult());
@ -652,8 +645,7 @@ public class VolumeServiceImpl implements VolumeService {
if (result.isSuccess()) {
((TemplateObject)templateOnPrimaryStoreObj).setInstallPath(result.getPath());
templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer());
}
else {
} else {
templateOnPrimaryStoreObj.processEvent(Event.OperationFailed);
}
@ -673,8 +665,7 @@ public class VolumeServiceImpl implements VolumeService {
if (result.isSuccess()) {
templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed, result.getAnswer());
}
else {
} else {
templateOnPrimaryStoreObj.processEvent(Event.OperationFailed);
}
@ -708,8 +699,8 @@ public class VolumeServiceImpl implements VolumeService {
private final DataObject templateOnStore;
private final SnapshotInfo snapshot;
public CreateVolumeFromBaseImageContext(AsyncCompletionCallback<T> callback, DataObject vo, DataStore primaryStore, DataObject templateOnStore,
AsyncCallFuture<VolumeApiResult> future, SnapshotInfo snapshot) {
public CreateVolumeFromBaseImageContext(AsyncCompletionCallback<T> callback, DataObject vo, DataStore primaryStore, DataObject templateOnStore, AsyncCallFuture<VolumeApiResult> future,
SnapshotInfo snapshot) {
super(callback);
this.vo = vo;
this.future = future;
@ -727,8 +718,7 @@ public class VolumeServiceImpl implements VolumeService {
DataObject volumeOnPrimaryStorage = pd.create(volume);
volumeOnPrimaryStorage.processEvent(Event.CreateOnlyRequested);
CreateVolumeFromBaseImageContext<VolumeApiResult> context =
new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null));
caller.setContext(context);
@ -738,8 +728,7 @@ public class VolumeServiceImpl implements VolumeService {
}
@DB
protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback,
CreateVolumeFromBaseImageContext<VolumeApiResult> context) {
protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CreateVolumeFromBaseImageContext<VolumeApiResult> context) {
DataObject vo = context.vo;
DataObject tmplOnPrimary = context.templateOnStore;
CopyCommandResult result = callback.getResult();
@ -771,7 +760,7 @@ public class VolumeServiceImpl implements VolumeService {
_tmpltPoolDao.update(templatePoolRefId, templatePoolRef);
}
}finally {
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
}
@ -838,8 +827,7 @@ public class VolumeServiceImpl implements VolumeService {
if (result.isFailed()) {
String errMesg = result.getResult();
throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() +
" on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg);
throw new CloudRuntimeException("Unable to create template " + templateOnPrimary.getId() + " on primary storage " + destPrimaryDataStore.getId() + ":" + errMesg);
}
} catch (Throwable e) {
s_logger.debug("Failed to create template volume on storage", e);
@ -847,8 +835,7 @@ public class VolumeServiceImpl implements VolumeService {
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
}
finally {
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
@ -866,9 +853,8 @@ public class VolumeServiceImpl implements VolumeService {
* @param destPrimaryDataStore The managed primary storage
* @param destHost The host that we will use for the copy
*/
private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef,
PrimaryDataStore destPrimaryDataStore, Host destHost)
{
private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef, PrimaryDataStore destPrimaryDataStore,
Host destHost) {
AsyncCallFuture<VolumeApiResult> copyTemplateFuture = new AsyncCallFuture<>();
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
long templatePoolRefId = templatePoolRef.getId();
@ -890,10 +876,8 @@ public class VolumeServiceImpl implements VolumeService {
try {
// copy the template from sec storage to the created volume
CreateBaseImageContext<CreateCmdResult> copyContext = new CreateBaseImageContext<>(
null, null, destPrimaryDataStore, srcTemplateInfo,
copyTemplateFuture, templateOnPrimary, templatePoolRefId
);
CreateBaseImageContext<CreateCmdResult> copyContext = new CreateBaseImageContext<>(null, null, destPrimaryDataStore, srcTemplateInfo, copyTemplateFuture, templateOnPrimary,
templatePoolRefId);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> copyCaller = AsyncCallbackDispatcher.create(this);
copyCaller.setCallback(copyCaller.getTarget().copyManagedTemplateCallback(null, null)).setContext(copyContext);
@ -930,8 +914,7 @@ public class VolumeServiceImpl implements VolumeService {
motionSrv.copyAsync(srcTemplateInfo, templateOnPrimary, destHost, copyCaller);
result = copyTemplateFuture.get();
}
finally {
} finally {
revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore);
if (HypervisorType.VMware.equals(destHost.getHypervisorType())) {
@ -946,21 +929,18 @@ public class VolumeServiceImpl implements VolumeService {
}
if (result.isFailed()) {
throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() +
" to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult());
throw new CloudRuntimeException("Failed to copy template " + templateOnPrimary.getId() + " to primary storage " + destPrimaryDataStore.getId() + ": " + result.getResult());
// XXX: I find it is useful to destroy the volume on primary storage instead of another thread trying the copy again because I've seen
// something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail).
// For now, I just retry the copy.
}
}
catch (Throwable e) {
} catch (Throwable e) {
s_logger.debug("Failed to create a template on primary storage", e);
templateOnPrimary.processEvent(Event.OperationFailed);
throw new CloudRuntimeException(e.getMessage());
}
finally {
} finally {
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
}
@ -983,8 +963,7 @@ public class VolumeServiceImpl implements VolumeService {
String msg = "Unable to get an answer to the modify targets command";
s_logger.warn(msg);
}
else if (!answer.getResult()) {
} else if (!answer.getResult()) {
String msg = "Unable to modify target on the following host: " + hostId;
s_logger.warn(msg);
@ -999,8 +978,7 @@ public class VolumeServiceImpl implements VolumeService {
* @param destPrimaryDataStore Primary storage of the volume
* @param future For async
*/
private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, TemplateInfo templateOnPrimary, PrimaryDataStore destPrimaryDataStore,
AsyncCallFuture<VolumeApiResult> future) {
private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, TemplateInfo templateOnPrimary, PrimaryDataStore destPrimaryDataStore, AsyncCallFuture<VolumeApiResult> future) {
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId());
if (templatePoolRef == null) {
@ -1015,8 +993,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
volumeInfo.processEvent(Event.CreateOnlyRequested);
CreateVolumeFromBaseImageContext<VolumeApiResult> context =
new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, templateOnPrimary, future, null);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, templateOnPrimary, future, null);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
@ -1033,8 +1010,7 @@ public class VolumeServiceImpl implements VolumeService {
}
}
private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost,
AsyncCallFuture<VolumeApiResult> future) {
private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost, AsyncCallFuture<VolumeApiResult> future) {
try {
// Create a volume on managed storage.
@ -1050,8 +1026,7 @@ public class VolumeServiceImpl implements VolumeService {
// Refresh the volume info from the DB.
volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore);
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<CreateCmdResult>(null, volumeInfo,
primaryDataStore, srcTemplateInfo, future);
ManagedCreateBaseImageContext<CreateCmdResult> context = new ManagedCreateBaseImageContext<CreateCmdResult>(null, volumeInfo, primaryDataStore, srcTemplateInfo, future);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context);
@ -1081,8 +1056,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller);
}
finally {
} finally {
revokeAccess(volumeInfo, destHost, primaryDataStore);
}
} catch (Throwable t) {
@ -1098,8 +1072,7 @@ public class VolumeServiceImpl implements VolumeService {
if (expungeVolumeResult.isFailed()) {
errMsg += " : Failed to expunge a volume that was created";
}
}
catch (Exception ex) {
} catch (Exception ex) {
errMsg += " : " + ex.getMessage();
}
@ -1112,8 +1085,7 @@ public class VolumeServiceImpl implements VolumeService {
}
@Override
public AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId,
TemplateInfo srcTemplateInfo, long destHostId) {
public AsyncCallFuture<VolumeApiResult> createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) {
PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId);
Host destHost = _hostDao.findById(destHostId);
@ -1121,9 +1093,7 @@ public class VolumeServiceImpl implements VolumeService {
throw new CloudRuntimeException("Destination host should not be null.");
}
Boolean storageCanCloneVolume = new Boolean(
destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString())
);
Boolean storageCanCloneVolume = new Boolean(destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString()));
boolean computeSupportsVolumeClone = computeSupportsVolumeClone(destHost.getDataCenterId(), destHost.getHypervisorType());
@ -1146,10 +1116,7 @@ public class VolumeServiceImpl implements VolumeService {
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId());
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " +
srcTemplateInfo.getUniqueName() + " in storage pool " +
destPrimaryDataStore.getId()
);
throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId());
}
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
@ -1190,8 +1157,7 @@ public class VolumeServiceImpl implements VolumeService {
Collections.shuffle(clusters, new Random(System.nanoTime()));
clusters:
for (Cluster cluster : clusters) {
clusters: for (Cluster cluster : clusters) {
if (cluster.getAllocationState() == AllocationState.Enabled) {
List<HostVO> hosts = _hostDao.findByClusterId(cluster.getId());
@ -1203,13 +1169,11 @@ public class VolumeServiceImpl implements VolumeService {
if (computeClusterMustSupportResign) {
if (clusterDao.getSupportsResigning(cluster.getId())) {
return host;
}
else {
} else {
// no other host in the cluster in question should be able to satisfy our requirements here, so move on to the next cluster
continue clusters;
}
}
else {
} else {
return host;
}
}
@ -1237,17 +1201,15 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
@Override
@DB
public boolean destroyVolume(long volumeId) throws ConcurrentOperationException {
@Override
public void destroyVolume(long volumeId) {
// mark volume entry in volumes table as destroy state
VolumeInfo vol = volFactory.getVolume(volumeId);
vol.stateTransit(Volume.Event.DestroyRequested);
snapshotMgr.deletePoliciesForVolume(volumeId);
vol.stateTransit(Volume.Event.OperationSucceeded);
return true;
}
@Override
@ -1259,8 +1221,7 @@ public class VolumeServiceImpl implements VolumeService {
volumeOnStore.processEvent(Event.CreateOnlyRequested);
_volumeDetailsDao.addDetail(volume.getId(), SNAPSHOT_ID, Long.toString(snapshot.getId()), false);
CreateVolumeFromBaseImageContext<VolumeApiResult> context =
new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volume, store, volumeOnStore, future, snapshot);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volume, store, volumeOnStore, future, snapshot);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)).setContext(context);
motionSrv.copyAsync(snapshot, volumeOnStore, caller);
@ -1274,8 +1235,7 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback,
CreateVolumeFromBaseImageContext<VolumeApiResult> context) {
protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CreateVolumeFromBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
VolumeInfo volume = (VolumeInfo)context.templateOnStore;
SnapshotInfo snapshot = context.snapshot;
@ -1332,8 +1292,7 @@ public class VolumeServiceImpl implements VolumeService {
final VolumeInfo destVolume;
final AsyncCallFuture<VolumeApiResult> future;
public CopyVolumeContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<VolumeApiResult> future, VolumeInfo srcVolume, VolumeInfo destVolume,
DataStore destStore) {
public CopyVolumeContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<VolumeApiResult> future, VolumeInfo srcVolume, VolumeInfo destVolume, DataStore destStore) {
super(callback);
this.srcVolume = srcVolume;
this.destVolume = destVolume;
@ -1370,8 +1329,7 @@ public class VolumeServiceImpl implements VolumeService {
}
}
protected Void
copyVolumeFromImageToPrimaryCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CopyVolumeContext<VolumeApiResult> context) {
protected Void copyVolumeFromImageToPrimaryCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CopyVolumeContext<VolumeApiResult> context) {
VolumeInfo srcVolume = context.srcVolume;
VolumeInfo destVolume = context.destVolume;
CopyCommandResult result = callback.getResult();
@ -1424,8 +1382,7 @@ public class VolumeServiceImpl implements VolumeService {
}
}
protected Void
copyVolumeFromPrimaryToImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CopyVolumeContext<VolumeApiResult> context) {
protected Void copyVolumeFromPrimaryToImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CopyVolumeContext<VolumeApiResult> context) {
VolumeInfo srcVolume = context.srcVolume;
VolumeInfo destVolume = context.destVolume;
CopyCommandResult result = callback.getResult();
@ -1542,8 +1499,7 @@ public class VolumeServiceImpl implements VolumeService {
/**
* @param callback
*/
public MigrateVolumeContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<VolumeApiResult> future, VolumeInfo srcVolume, VolumeInfo destVolume,
DataStore destStore) {
public MigrateVolumeContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<VolumeApiResult> future, VolumeInfo srcVolume, VolumeInfo destVolume, DataStore destStore) {
super(callback);
this.srcVolume = srcVolume;
this.destVolume = destVolume;
@ -1654,8 +1610,7 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
protected Void
migrateVmWithVolumesCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, MigrateVmWithVolumesContext<CommandResult> context) {
protected Void migrateVmWithVolumesCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, MigrateVmWithVolumesContext<CommandResult> context) {
Map<VolumeInfo, DataStore> volumeToPool = context.volumeToPool;
CopyCommandResult result = callback.getResult();
AsyncCallFuture<CommandResult> future = context.future;
@ -1715,7 +1670,7 @@ public class VolumeServiceImpl implements VolumeService {
}
@Override
public Pair<EndPoint,DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store) {
public Pair<EndPoint, DataObject> registerVolumeForPostUpload(VolumeInfo volume, DataStore store) {
EndPoint ep = _epSelector.select(store);
if (ep == null) {
@ -1724,7 +1679,7 @@ public class VolumeServiceImpl implements VolumeService {
throw new CloudRuntimeException(errorMessage);
}
DataObject volumeOnStore = store.create(volume);
return new Pair<>(ep,volumeOnStore);
return new Pair<>(ep, volumeOnStore);
}
protected Void registerVolumeCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> callback, CreateVolumeContext<VolumeApiResult> context) {
@ -1751,21 +1706,20 @@ public class VolumeServiceImpl implements VolumeService {
if (volStore != null) {
physicalSize = volStore.getPhysicalSize();
} else {
s_logger.warn("No entry found in volume_store_ref for volume id: " + vo.getId() + " and image store id: " + ds.getId() +
" at the end of uploading volume!");
s_logger.warn("No entry found in volume_store_ref for volume id: " + vo.getId() + " and image store id: " + ds.getId() + " at the end of uploading volume!");
}
Scope dsScope = ds.getScope();
if (dsScope.getScopeType() == ScopeType.ZONE) {
if (dsScope.getScopeId() != null) {
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), dsScope.getScopeId(), vo.getId(), vo.getName(), null,
null, physicalSize, vo.getSize(), Volume.class.getName(), vo.getUuid());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), dsScope.getScopeId(), vo.getId(), vo.getName(), null, null, physicalSize, vo.getSize(),
Volume.class.getName(), vo.getUuid());
} else {
s_logger.warn("Zone scope image store " + ds.getId() + " has a null scope id");
}
} else if (dsScope.getScopeType() == ScopeType.REGION) {
// publish usage event for region-wide image store using a -1 zoneId for 4.2, need to revisit post-4.2
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), -1, vo.getId(), vo.getName(), null, null, physicalSize,
vo.getSize(), Volume.class.getName(), vo.getUuid());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_UPLOAD, vo.getAccountId(), -1, vo.getId(), vo.getName(), null, null, physicalSize, vo.getSize(),
Volume.class.getName(), vo.getUuid());
_resourceLimitMgr.incrementResourceCount(vo.getAccountId(), ResourceType.secondary_storage, vo.getSize());
}
@ -1829,8 +1783,8 @@ public class VolumeServiceImpl implements VolumeService {
if (ep != null) {
VolumeVO volume = volDao.findById(volumeId);
PrimaryDataStore primaryDataStore = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId());
ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore),
volume.getSize(), newSize, true, instanceName, primaryDataStore.isManaged(), volume.get_iScsiName());
ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore), volume.getSize(), newSize, true, instanceName,
primaryDataStore.isManaged(), volume.get_iScsiName());
answer = ep.sendMessage(resizeCmd);
} else {
@ -1903,8 +1857,8 @@ public class VolumeServiceImpl implements VolumeService {
for (VolumeDataStoreVO volumeStore : dbVolumes) {
VolumeVO volume = volDao.findById(volumeStore.getVolumeId());
if (volume == null) {
s_logger.warn("Volume_store_ref table shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId +
", but the volume is not found in volumes table, potentially some bugs in deleteVolume, so we just treat this volume to be deleted and mark it as destroyed");
s_logger.warn("Volume_store_ref table shows that volume " + volumeStore.getVolumeId() + " is on image store " + storeId
+ ", but the volume is not found in volumes table, potentially some bugs in deleteVolume, so we just treat this volume to be deleted and mark it as destroyed");
volumeStore.setDestroyed(true);
_volumeStoreDao.update(volumeStore.getId(), volumeStore);
continue;
@ -1929,7 +1883,8 @@ public class VolumeServiceImpl implements VolumeService {
VolumeObject volObj = (VolumeObject)volFactory.getVolume(volume.getId());
volObj.processEvent(Event.OperationFailed);
} else if (volumeStore.getDownloadUrl() == null) {
msg = "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() + " is corrupted, please check in image store: " + volumeStore.getDataStoreId();
msg = "Volume (" + volume.getUuid() + ") with install path " + volInfo.getInstallPath() + " is corrupted, please check in image store: "
+ volumeStore.getDataStoreId();
s_logger.warn(msg);
} else {
s_logger.info("Removing volume_store_ref entry for corrupted volume " + volume.getName());
@ -1959,8 +1914,8 @@ public class VolumeServiceImpl implements VolumeService {
if (volInfo.getSize() > 0) {
try {
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()),
com.cloud.configuration.Resource.ResourceType.secondary_storage, volInfo.getSize() - volInfo.getPhysicalSize());
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()), com.cloud.configuration.Resource.ResourceType.secondary_storage,
volInfo.getSize() - volInfo.getPhysicalSize());
} catch (ResourceAllocationException e) {
s_logger.warn(e.getMessage());
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED, volume.getDataCenterId(), volume.getPodId(), e.getMessage(), e.getMessage());
@ -2021,7 +1976,7 @@ public class VolumeServiceImpl implements VolumeService {
}
// Delete volumes which are not present on DB.
for (Map.Entry<Long,TemplateProp> entry : volumeInfos.entrySet()) {
for (Map.Entry<Long, TemplateProp> entry : volumeInfos.entrySet()) {
Long uniqueName = entry.getKey();
TemplateProp tInfo = entry.getValue();
@ -2091,7 +2046,7 @@ public class VolumeServiceImpl implements VolumeService {
s_logger.error("Take snapshot: " + volume.getId() + " failed", cre);
throw cre;
} catch (Exception e) {
if(s_logger.isDebugEnabled()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e);
}
throw new CloudRuntimeException("Failed to take snapshot", e);

View File

@ -38,12 +38,7 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
@ -64,11 +59,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
@ -82,8 +79,8 @@ import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
@ -98,6 +95,9 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
@ -176,7 +176,6 @@ import com.cloud.user.dao.UserDao;
import com.cloud.utils.DateUtil;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ManagerBase;
@ -467,7 +466,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
public boolean configure(String name, Map<String, Object> params) {
Map<String, String> configs = _configDao.getConfiguration("management-server", params);
@ -476,8 +475,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true);
s_logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() +
", template cleanup enabled: " + TemplateCleanupEnabled.value());
s_logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value()
+ ", template cleanup enabled: " + TemplateCleanupEnabled.value());
String cleanupInterval = configs.get("extract.url.cleanup.interval");
_downloadUrlCleanupInterval = NumbersUtil.parseInt(cleanupInterval, 7200);
@ -525,7 +524,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public String getStoragePoolTags(long poolId) {
return StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId));
return com.cloud.utils.StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId));
}
@Override
@ -586,8 +585,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
if (pool == null) {
//the path can be different, but if they have the same uuid, assume they are the same storage
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null,
pInfo.getUuid());
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid());
if (pool != null) {
s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool");
}
@ -628,8 +626,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
@Override
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException,
ResourceUnavailableException {
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException {
String providerName = cmd.getStorageProviderName();
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
@ -675,7 +672,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} else {
throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
}
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Hyperv && hypervisorType != HypervisorType.LXC && hypervisorType != HypervisorType.Any) {
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Hyperv && hypervisorType != HypervisorType.LXC
&& hypervisorType != HypervisorType.Any) {
throw new InvalidParameterValueException("zone wide storage pool is not supported for hypervisor type " + hypervisor);
}
}
@ -716,7 +714,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
lifeCycle.attachZone(store, zoneScope, hypervisorType);
}
} catch (Exception e) {
s_logger.debug("Failed to add data store: "+e.getMessage(), e);
s_logger.debug("Failed to add data store: " + e.getMessage(), e);
try {
// clean up the db, just absorb the exception thrown in deletion with error logged, so that user can get error for adding data store
// not deleting data store.
@ -726,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} catch (Exception ex) {
s_logger.debug("Failed to clean up storage pool: " + ex.getMessage());
}
throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e);
throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
}
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
@ -752,8 +750,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@ActionEvent(eventType = EventTypes.EVENT_DISABLE_PRIMARY_STORAGE, eventDescription = "disable storage pool")
private void disablePrimaryStoragePool(StoragePoolVO primaryStorage) {
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) {
throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " +
primaryStorage.getStatus().toString());
throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString());
}
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
@ -765,8 +762,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@ActionEvent(eventType = EventTypes.EVENT_ENABLE_PRIMARY_STORAGE, eventDescription = "enable storage pool")
private void enablePrimaryStoragePool(StoragePoolVO primaryStorage) {
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) {
throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " +
primaryStorage.getStatus().toString());
throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString());
}
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
@ -863,8 +859,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
final Answer answer = _agentMgr.easySend(hostId, cmd);
if (answer == null || !answer.getResult()) {
String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" +
(StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
s_logger.error(errMsg);
@ -965,8 +960,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// All this is for the inaccuracy of floats for big number multiplication.
BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId());
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue();
s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor "
+ overProvFactor.toString());
s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString());
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes());
} else {
s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString());
@ -992,18 +986,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (storagePool.getScope() == ScopeType.HOST) {
List<StoragePoolHostVO> stoargePoolHostVO = _storagePoolHostDao.listByPoolId(storagePool.getId());
if(stoargePoolHostVO != null && !stoargePoolHostVO.isEmpty()){
if (stoargePoolHostVO != null && !stoargePoolHostVO.isEmpty()) {
HostVO host = _hostDao.findById(stoargePoolHostVO.get(0).getHostId());
if(host != null){
if (host != null) {
capacityState = (host.getResourceState() == ResourceState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled;
}
}
}
if (capacities.size() == 0) {
CapacityVO capacity =
new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity,
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity,
capacityType);
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
@ -1016,8 +1009,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_capacityDao.update(capacity.getId(), capacity);
}
}
s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " +
storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId());
s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - "
+ storagePool.getId() + ", PodId " + storagePool.getPodId());
}
@Override
@ -1045,8 +1038,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
hostIds.removeAll(hostIdsToAvoid);
}
if (hostIds == null || hostIds.isEmpty()) {
throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster",
pool.getId());
throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster", pool.getId());
}
for (Long hostId : hostIds) {
try {
@ -1088,20 +1080,19 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
try {
List<VMTemplateStoragePoolVO> unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool);
s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " +
pool.getName());
s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + pool.getName());
for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) {
if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) {
s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() +
" on pool " + templatePoolVO.getPoolId() + " because it is not completely downloaded.");
s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId()
+ " because it is not completely downloaded.");
continue;
}
if (!templatePoolVO.getMarkedForGC()) {
templatePoolVO.setMarkedForGC(true);
_vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO);
s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() +
" on pool " + templatePoolVO.getPoolId() + " for garbage collection.");
s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId()
+ " for garbage collection.");
continue;
}
@ -1115,7 +1106,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
//destroy snapshots in destroying state in snapshot_store_ref
List<SnapshotDataStoreVO> ssSnapshots = _snapshotStoreDao.listByState(ObjectInDataStoreStateMachine.State.Destroying);
for(SnapshotDataStoreVO ssSnapshotVO : ssSnapshots){
for (SnapshotDataStoreVO ssSnapshotVO : ssSnapshots) {
try {
_snapshotService.deleteSnapshot(snapshotFactory.getSnapshot(ssSnapshotVO.getSnapshotId(), DataStoreRole.Image));
} catch (Exception e) {
@ -1125,7 +1116,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
cleanupSecondaryStorage(recurring);
List<VolumeVO> vols = _volsDao.listVolumesToBeDestroyed(new Date(System.currentTimeMillis() - ((long) StorageCleanupDelay.value() << 10)));
List<VolumeVO> vols = _volsDao.listVolumesToBeDestroyed(new Date(System.currentTimeMillis() - ((long)StorageCleanupDelay.value() << 10)));
for (VolumeVO vol : vols) {
try {
// If this fails, just log a warning. It's ideal if we clean up the host-side clustered file
@ -1179,10 +1170,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
Host host = _hostDao.findById(ep.getId());
if (host != null && host.getManagementServerId() != null) {
if (_serverId == host.getManagementServerId().longValue()) {
if (!volService.destroyVolume(volume.getId())) {
s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid());
continue;
}
volService.destroyVolume(volume.getId());
// decrement volume resource count
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplayVolume());
// expunge volume from secondary if volume is on image store
@ -1286,8 +1274,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (cluster.getHypervisorType() == HypervisorType.KVM) {
volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore());
}
else {
} else {
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
@ -1367,8 +1354,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
try {
long storeId = store.getId();
List<TemplateDataStoreVO> destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId);
s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() +
" templates to cleanup on template_store_ref for store: " + store.getName());
s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName());
for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO);
@ -1384,8 +1370,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
for (DataStore store : imageStores) {
try {
List<SnapshotDataStoreVO> destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId());
s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() +
" snapshots to cleanup on snapshot_store_ref for store: " + store.getName());
s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName());
for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) {
// check if this snapshot has child
SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store);
@ -1416,8 +1401,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
for (DataStore store : imageStores) {
try {
List<VolumeDataStoreVO> destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId());
s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " +
store.getName());
s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName());
for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO);
@ -1459,8 +1443,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) {
throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" +
primaryStorage.getStatus().toString());
throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString());
}
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
@ -1486,8 +1469,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) {
throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" +
primaryStorage.getStatus().toString(), primaryStorageId);
throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(),
primaryStorageId);
}
DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
@ -1498,7 +1481,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary);
}
protected class StorageGarbageCollector extends ManagedContextRunnable {
public StorageGarbageCollector() {
@ -1533,9 +1515,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
for (Long poolId : poolIds) {
StoragePoolVO pool = _storagePoolDao.findById(poolId);
// check if pool is in an inconsistent state
if (pool != null &&
(pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) || pool.getStatus()
.equals(StoragePoolStatus.CancelMaintenance))) {
if (pool != null && (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)
|| pool.getStatus().equals(StoragePoolStatus.CancelMaintenance))) {
_storagePoolWorkDao.removePendingJobsOnMsRestart(vo.getMsid(), poolId);
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
_storagePoolDao.update(poolId, pool);
@ -1740,13 +1721,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (stats != null) {
double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() +
", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold);
s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage
+ ", disable threshold: " + storageUsedThreshold);
}
if (usedPercentage >= storageUsedThreshold) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage +
" has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold);
s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: "
+ storageUsedThreshold);
}
return false;
}
@ -1804,7 +1785,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
// allocated space includes templates
if(s_logger.isDebugEnabled()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Destination pool id: " + pool.getId());
}
@ -1853,12 +1834,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (pool.getPoolType().supportsOverProvisioning()) {
BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId());
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with over-provisioning factor " +
overProvFactor.toString());
s_logger.debug("Total over-provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString());
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
} else {
totalOverProvCapacity = pool.getCapacityBytes();
@ -1869,31 +1848,20 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool with ID " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize: " +
totalOverProvCapacity + ", totalAllocatedSize: " + allocatedSizeWithTemplate + ", askingSize: " + totalAskingSize +
", allocated disable threshold: " + storageAllocatedThreshold);
}
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : "
+ allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold);
double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity);
if (usedPercentage > storageAllocatedThreshold) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on the pool with ID " + pool.getId() + " for volume allocation: " + volumes.toString() +
" since its allocated percentage " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold " +
storageAllocatedThreshold + ", skipping this pool");
}
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " + usedPercentage
+ " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold + ", skipping this pool");
return false;
}
if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on the pool with ID " + pool.getId() + " for volume allocation: " + volumes.toString() +
"; not enough storage, maxSize: " + totalOverProvCapacity + ", totalAllocatedSize: " + allocatedSizeWithTemplate + ", askingSize: " +
totalAskingSize);
}
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity
+ ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize);
return false;
}
@ -2007,8 +1975,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
@Override
public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException {
public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
if (storeProvider == null) {
@ -2056,10 +2023,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
Account account = CallContext.current().getCallingAccount();
if (Grouping.AllocationState.Disabled == zone.getAllocationState()
&& !_accountMgr.isRootAdmin(account.getId())) {
PermissionDeniedException ex = new PermissionDeniedException(
"Cannot perform this operation, Zone with specified id is currently disabled");
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled");
ex.addProxyObject(zone.getUuid(), "dcId");
throw ex;
}
@ -2080,7 +2045,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
try {
store = lifeCycle.initialize(params);
} catch (Exception e) {
if(s_logger.isDebugEnabled()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Failed to add data store: " + e.getMessage(), e);
}
throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
@ -2106,8 +2071,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
@Override
public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException {
public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
// check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages
List<ImageStoreVO> imgStores = _imageStoreDao.listImageStores();
List<ImageStoreVO> nfsStores = new ArrayList<ImageStoreVO>();
@ -2261,8 +2225,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
Account account = CallContext.current().getCallingAccount();
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
PermissionDeniedException ex = new PermissionDeniedException(
"Cannot perform this operation, Zone with specified id is currently disabled");
PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled");
ex.addProxyObject(zone.getUuid(), "dcId");
throw ex;
}
@ -2281,8 +2244,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
try {
store = lifeCycle.initialize(params);
} catch (Exception e) {
s_logger.debug("Failed to add data store: "+e.getMessage(), e);
throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e);
s_logger.debug("Failed to add data store: " + e.getMessage(), e);
throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
}
return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.ImageCache);
@ -2351,18 +2314,18 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
@Override
public void cleanupDownloadUrls(){
public void cleanupDownloadUrls() {
// Cleanup expired volume URLs
List<VolumeDataStoreVO> volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls();
HashSet<Long> expiredVolumeIds = new HashSet<Long>();
HashSet<Long> activeVolumeIds = new HashSet<Long>();
for(VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList){
for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) {
long volumeId = volumeOnImageStore.getVolumeId();
try {
long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), volumeOnImageStore.getExtractUrlCreated());
if(downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval){ // URL hasnt expired yet
if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet
activeVolumeIds.add(volumeId);
continue;
}
@ -2370,19 +2333,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId);
// Remove it from image store
ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image);
ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image);
secStore.deleteExtractUrl(volumeOnImageStore.getInstallPath(), volumeOnImageStore.getExtractUrl(), Upload.Type.VOLUME);
// Now expunge it from DB since this entry was created only for download purpose
_volumeStoreDao.expunge(volumeOnImageStore.getId());
}catch(Throwable th){
s_logger.warn("Caught exception while deleting download url " +volumeOnImageStore.getExtractUrl() +
" for volume id " + volumeOnImageStore.getVolumeId(), th);
} catch (Throwable th) {
s_logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th);
}
}
for(Long volumeId : expiredVolumeIds)
{
if(activeVolumeIds.contains(volumeId)) {
for (Long volumeId : expiredVolumeIds) {
if (activeVolumeIds.contains(volumeId)) {
continue;
}
Volume volume = _volumeDao.findById(volumeId);
@ -2393,27 +2354,26 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// Cleanup expired template URLs
List<TemplateDataStoreVO> templatesOnImageStoreList = _templateStoreDao.listTemplateDownloadUrls();
for(TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList){
for (TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList) {
try {
long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), templateOnImageStore.getExtractUrlCreated());
if(downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval){ // URL hasnt expired yet
if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet
continue;
}
s_logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId());
// Remove it from image store
ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image);
ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image);
secStore.deleteExtractUrl(templateOnImageStore.getInstallPath(), templateOnImageStore.getExtractUrl(), Upload.Type.TEMPLATE);
// Now remove download details from DB.
templateOnImageStore.setExtractUrl(null);
templateOnImageStore.setExtractUrlCreated(null);
_templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore);
}catch(Throwable th){
s_logger.warn("caught exception while deleting download url " +templateOnImageStore.getExtractUrl() +
" for template id " +templateOnImageStore.getTemplateId(), th);
} catch (Throwable th) {
s_logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th);
}
}
}
@ -2504,7 +2464,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
public DiskTO getDiskWithThrottling(final DataTO volTO, final Volume.Type volumeType, final long deviceId, final String path, final long offeringId, final long diskOfferingId) {
DiskTO disk = null;
if (volTO != null && volTO instanceof VolumeObjectTO) {
VolumeObjectTO volumeTO = (VolumeObjectTO) volTO;
VolumeObjectTO volumeTO = (VolumeObjectTO)volTO;
ServiceOffering offering = _entityMgr.findById(ServiceOffering.class, offeringId);
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, diskOfferingId);
if (volumeType == Volume.Type.ROOT) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff