mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.19'
This commit is contained in:
commit
19f79b1d94
@ -426,3 +426,7 @@ iscsi.session.cleanup.enabled=false
|
||||
|
||||
# Instance Conversion from Vmware to KVM through virt-v2v. Enable verbose mode
|
||||
# virtv2v.verbose.enabled=false
|
||||
|
||||
# If set to "true", the agent will register for libvirt domain events, allowing for immediate updates on crashed or
|
||||
# unexpectedly stopped. Experimental, requires agent restart.
|
||||
# libvirt.events.enabled=false
|
||||
|
||||
@ -695,6 +695,13 @@ public class AgentProperties{
|
||||
*/
|
||||
public static final Property<Boolean> DEVELOPER = new Property<>("developer", false);
|
||||
|
||||
/**
|
||||
* If set to "true", the agent will register for libvirt domain events, allowing for immediate updates on crashed or unexpectedly
|
||||
* stopped VMs. Experimental, requires agent restart.
|
||||
* Default value: <code>false</code>
|
||||
*/
|
||||
public static final Property<Boolean> LIBVIRT_EVENTS_ENABLED = new Property<>("libvirt.events.enabled", false);
|
||||
|
||||
/**
|
||||
* Can only be used if developer = true. This property is used to define the local bridge name and private network name.<br>
|
||||
* Data type: String.<br>
|
||||
|
||||
@ -130,6 +130,8 @@ public interface VolumeOrchestrationService {
|
||||
|
||||
boolean canVmRestartOnAnotherServer(long vmId);
|
||||
|
||||
void saveVolumeDetails(Long diskOfferingId, Long volumeId);
|
||||
|
||||
/**
|
||||
* Allocate a volume or multiple volumes in case of template is registered with the 'deploy-as-is' option, allowing multiple disks
|
||||
*/
|
||||
|
||||
@ -864,18 +864,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType()));
|
||||
vol = _volsDao.persist(vol);
|
||||
|
||||
List<VolumeDetailVO> volumeDetailsVO = new ArrayList<VolumeDetailVO>();
|
||||
DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthLimitDetail != null) {
|
||||
volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
|
||||
}
|
||||
DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT);
|
||||
if (iopsLimitDetail != null) {
|
||||
volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
|
||||
}
|
||||
if (!volumeDetailsVO.isEmpty()) {
|
||||
_volDetailDao.saveDetails(volumeDetailsVO);
|
||||
}
|
||||
saveVolumeDetails(offering.getId(), vol.getId());
|
||||
|
||||
// Save usage event and update resource count for user vm volumes
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
@ -890,6 +879,32 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
return diskProfile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void saveVolumeDetails(Long diskOfferingId, Long volumeId) {
|
||||
List<VolumeDetailVO> volumeDetailsVO = new ArrayList<>();
|
||||
DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(diskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthLimitDetail != null) {
|
||||
volumeDetailsVO.add(new VolumeDetailVO(volumeId, Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
|
||||
} else {
|
||||
VolumeDetailVO bandwidthLimit = _volDetailDao.findDetail(volumeId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthLimit != null) {
|
||||
_volDetailDao.remove(bandwidthLimit.getId());
|
||||
}
|
||||
}
|
||||
DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(diskOfferingId, Volume.IOPS_LIMIT);
|
||||
if (iopsLimitDetail != null) {
|
||||
volumeDetailsVO.add(new VolumeDetailVO(volumeId, Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
|
||||
} else {
|
||||
VolumeDetailVO iopsLimit = _volDetailDao.findDetail(volumeId, Volume.IOPS_LIMIT);
|
||||
if (iopsLimit != null) {
|
||||
_volDetailDao.remove(iopsLimit.getId());
|
||||
}
|
||||
}
|
||||
if (!volumeDetailsVO.isEmpty()) {
|
||||
_volDetailDao.saveDetails(volumeDetailsVO);
|
||||
}
|
||||
}
|
||||
|
||||
private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm,
|
||||
Account owner, long deviceId, String configurationId) {
|
||||
assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template.";
|
||||
|
||||
@ -80,4 +80,7 @@ public class VolumeDetailVO implements ResourceDetail {
|
||||
return display;
|
||||
}
|
||||
|
||||
public void setValue(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,9 +93,6 @@ import org.libvirt.SchedParameter;
|
||||
import org.libvirt.SchedUlongParameter;
|
||||
import org.libvirt.Secret;
|
||||
import org.libvirt.VcpuInfo;
|
||||
import org.libvirt.event.DomainEvent;
|
||||
import org.libvirt.event.DomainEventDetail;
|
||||
import org.libvirt.event.StoppedDetail;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.Node;
|
||||
@ -469,7 +466,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
protected CPUStat cpuStat = new CPUStat();
|
||||
protected MemStat memStat = new MemStat(dom0MinMem, dom0OvercommitMem);
|
||||
private final LibvirtUtilitiesHelper libvirtUtilitiesHelper = new LibvirtUtilitiesHelper();
|
||||
private AgentStatusUpdater _agentStatusUpdater;
|
||||
private LibvirtDomainListener libvirtDomainListener;
|
||||
|
||||
protected Boolean enableManuallySettingCpuTopologyOnKvmVm = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM);
|
||||
|
||||
@ -503,8 +500,23 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
}
|
||||
|
||||
@Override
|
||||
public void registerStatusUpdater(AgentStatusUpdater updater) {
|
||||
_agentStatusUpdater = updater;
|
||||
public synchronized void registerStatusUpdater(AgentStatusUpdater updater) {
|
||||
if (AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LIBVIRT_EVENTS_ENABLED)) {
|
||||
try {
|
||||
Connect conn = LibvirtConnection.getConnection();
|
||||
if (libvirtDomainListener != null) {
|
||||
LOGGER.debug("Clearing old domain listener");
|
||||
conn.removeLifecycleListener(libvirtDomainListener);
|
||||
}
|
||||
libvirtDomainListener = new LibvirtDomainListener(updater);
|
||||
conn.addLifecycleListener(libvirtDomainListener);
|
||||
LOGGER.debug("Set up the libvirt domain event lifecycle listener");
|
||||
} catch (LibvirtException e) {
|
||||
LOGGER.error("Failed to get libvirt connection for domain event lifecycle", e);
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Libvirt event listening is disabled, not registering status updater");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1879,6 +1891,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
public boolean stop() {
|
||||
try {
|
||||
final Connect conn = LibvirtConnection.getConnection();
|
||||
if (AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LIBVIRT_EVENTS_ENABLED) && libvirtDomainListener != null) {
|
||||
LOGGER.debug("Clearing old domain listener");
|
||||
conn.removeLifecycleListener(libvirtDomainListener);
|
||||
}
|
||||
conn.close();
|
||||
} catch (final LibvirtException e) {
|
||||
LOGGER.trace("Ignoring libvirt error.", e);
|
||||
@ -3699,50 +3715,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
} catch (final CloudRuntimeException e) {
|
||||
LOGGER.debug("Unable to initialize local storage pool: " + e);
|
||||
}
|
||||
setupLibvirtEventListener();
|
||||
return sscmd;
|
||||
}
|
||||
|
||||
private void setupLibvirtEventListener() {
|
||||
try {
|
||||
Connect conn = LibvirtConnection.getConnection();
|
||||
conn.addLifecycleListener(this::onDomainLifecycleChange);
|
||||
|
||||
logger.debug("Set up the libvirt domain event lifecycle listener");
|
||||
} catch (LibvirtException e) {
|
||||
logger.error("Failed to get libvirt connection for domain event lifecycle", e);
|
||||
}
|
||||
}
|
||||
|
||||
private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) {
|
||||
try {
|
||||
logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent));
|
||||
if (domainEvent != null) {
|
||||
switch (domainEvent.getType()) {
|
||||
case STOPPED:
|
||||
/* libvirt-destroyed VMs have detail StoppedDetail.DESTROYED, self shutdown guests are StoppedDetail.SHUTDOWN
|
||||
* Checking for this helps us differentiate between events where cloudstack or admin stopped the VM vs guest
|
||||
* initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */
|
||||
DomainEventDetail detail = domainEvent.getDetail();
|
||||
if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail) || StoppedDetail.FAILED.equals(detail)) {
|
||||
logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM");
|
||||
_agentStatusUpdater.triggerUpdate();
|
||||
} else {
|
||||
logger.debug("Event detail: " + detail);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
logger.debug(String.format("No handling for event %s", domainEvent));
|
||||
}
|
||||
}
|
||||
} catch (LibvirtException e) {
|
||||
logger.error("Libvirt exception while processing lifecycle event", e);
|
||||
} catch (Throwable e) {
|
||||
logger.error("Error during lifecycle", e);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public String diskUuidToSerial(String uuid) {
|
||||
String uuidWithoutHyphen = uuid.replace("-","");
|
||||
return uuidWithoutHyphen.substring(0, Math.min(uuidWithoutHyphen.length(), 20));
|
||||
|
||||
@ -42,7 +42,7 @@ public class LibvirtConnection {
|
||||
return getConnection(s_hypervisorURI);
|
||||
}
|
||||
|
||||
static public Connect getConnection(String hypervisorURI) throws LibvirtException {
|
||||
static synchronized public Connect getConnection(String hypervisorURI) throws LibvirtException {
|
||||
LOGGER.debug("Looking for libvirtd connection at: " + hypervisorURI);
|
||||
Connect conn = s_connections.get(hypervisorURI);
|
||||
|
||||
@ -122,6 +122,11 @@ public class LibvirtConnection {
|
||||
* @throws LibvirtException
|
||||
*/
|
||||
private static synchronized void setupEventListener() throws LibvirtException {
|
||||
if (!AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LIBVIRT_EVENTS_ENABLED)) {
|
||||
LOGGER.debug("Libvirt event listening is disabled, not setting up event loop");
|
||||
return;
|
||||
}
|
||||
|
||||
if (libvirtEventThread == null || !libvirtEventThread.isAlive()) {
|
||||
// Registers a default event loop, must be called before connecting to hypervisor
|
||||
Library.initEventLoop();
|
||||
|
||||
@ -0,0 +1,65 @@
|
||||
/*
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import com.cloud.resource.AgentStatusUpdater;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.libvirt.event.DomainEvent;
|
||||
import org.libvirt.event.DomainEventDetail;
|
||||
import org.libvirt.event.LifecycleListener;
|
||||
import org.libvirt.event.StoppedDetail;
|
||||
|
||||
public class LibvirtDomainListener implements LifecycleListener {
|
||||
private static final Logger LOGGER = Logger.getLogger(LibvirtDomainListener.class);
|
||||
|
||||
private final AgentStatusUpdater agentStatusUpdater;
|
||||
|
||||
public LibvirtDomainListener(AgentStatusUpdater updater) {
|
||||
agentStatusUpdater = updater;
|
||||
}
|
||||
|
||||
public int onLifecycleChange(Domain domain, DomainEvent domainEvent) {
|
||||
try {
|
||||
LOGGER.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent));
|
||||
if (domainEvent != null) {
|
||||
switch (domainEvent.getType()) {
|
||||
case STOPPED:
|
||||
/* libvirt-destroyed VMs have detail StoppedDetail.DESTROYED, self shutdown guests are StoppedDetail.SHUTDOWN
|
||||
* Checking for this helps us differentiate between events where cloudstack or admin stopped the VM vs guest
|
||||
* initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */
|
||||
DomainEventDetail detail = domainEvent.getDetail();
|
||||
if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail) || StoppedDetail.FAILED.equals(detail)) {
|
||||
if (agentStatusUpdater != null) {
|
||||
LOGGER.info("Triggering out of band status update due to completed self-shutdown or crash of VM");
|
||||
agentStatusUpdater.triggerUpdate();
|
||||
}
|
||||
} else {
|
||||
LOGGER.debug("Event detail: " + detail);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOGGER.debug(String.format("No handling for event %s", domainEvent));
|
||||
}
|
||||
}
|
||||
} catch (LibvirtException e) {
|
||||
LOGGER.error("Libvirt exception while processing lifecycle event", e);
|
||||
} catch (Throwable e) {
|
||||
LOGGER.error("Error during lifecycle", e);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -59,14 +59,6 @@ public class LibvirtScaleVmCommandWrapper extends CommandWrapper<ScaleVmCommand,
|
||||
String message = String.format("Unable to scale %s due to [%s].", scalingDetails, e.getMessage());
|
||||
logger.error(message, e);
|
||||
return new ScaleVmAnswer(command, false, message);
|
||||
} finally {
|
||||
if (conn != null) {
|
||||
try {
|
||||
conn.close();
|
||||
} catch (LibvirtException ex) {
|
||||
logger.warn(String.format("Error trying to close libvirt connection [%s]", ex.getMessage()), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -223,7 +223,6 @@ public class KVMHostInfo {
|
||||
We used to check if this was supported, but that is no longer required
|
||||
*/
|
||||
this.capabilities.add("snapshot");
|
||||
conn.close();
|
||||
} catch (final LibvirtException e) {
|
||||
LOGGER.error("Caught libvirt exception while fetching host information", e);
|
||||
}
|
||||
|
||||
@ -70,7 +70,6 @@ public class KVMHostInfoTest {
|
||||
Mockito.when(LibvirtConnection.getConnection()).thenReturn(conn);
|
||||
Mockito.when(conn.nodeInfo()).thenReturn(nodeInfo);
|
||||
Mockito.when(conn.getCapabilities()).thenReturn(capabilitiesXml);
|
||||
Mockito.when(conn.close()).thenReturn(0);
|
||||
int manualSpeed = 500;
|
||||
|
||||
KVMHostInfo kvmHostInfo = new KVMHostInfo(10, 10, manualSpeed, 0);
|
||||
@ -92,8 +91,6 @@ public class KVMHostInfoTest {
|
||||
Mockito.when(LibvirtConnection.getConnection()).thenReturn(conn);
|
||||
Mockito.when(conn.nodeInfo()).thenReturn(nodeInfo);
|
||||
Mockito.when(conn.getCapabilities()).thenReturn(capabilitiesXml);
|
||||
Mockito.when(conn.close()).thenReturn(0);
|
||||
int manualSpeed = 500;
|
||||
|
||||
KVMHostInfo kvmHostInfo = new KVMHostInfo(10, 10, 100, 2);
|
||||
Assert.assertEquals("reserve two CPU cores", 8, kvmHostInfo.getAllocatableCpus());
|
||||
|
||||
@ -22,6 +22,7 @@ import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
@ -38,6 +39,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
|
||||
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
|
||||
import org.apache.cloudstack.storage.RemoteHostEndPoint;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
@ -128,11 +131,15 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
@Inject
|
||||
private ConfigurationDao configDao;
|
||||
@Inject
|
||||
private DiskOfferingDetailsDao diskOfferingDetailsDao;
|
||||
@Inject
|
||||
private HostDao hostDao;
|
||||
@Inject
|
||||
private VMInstanceDao vmInstanceDao;
|
||||
@Inject
|
||||
private VolumeService volumeService;
|
||||
@Inject
|
||||
private VolumeOrchestrationService volumeMgr;
|
||||
|
||||
public ScaleIOPrimaryDataStoreDriver() {
|
||||
|
||||
@ -142,40 +149,47 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao);
|
||||
}
|
||||
|
||||
private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception {
|
||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
||||
if (StringUtils.isBlank(sdcId)) {
|
||||
alertHostSdcDisconnection(host);
|
||||
throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
||||
}
|
||||
|
||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||
return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps);
|
||||
}
|
||||
|
||||
private boolean setVolumeLimitsFromDetails(VolumeVO volume, Host host, DataStore dataStore) throws Exception {
|
||||
Long bandwidthLimitInKbps = 0L; // Unlimited
|
||||
// Check Bandwidth Limit parameter in volume details
|
||||
final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) {
|
||||
bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
|
||||
}
|
||||
|
||||
Long iopsLimit = 0L; // Unlimited
|
||||
// Check IOPS Limit parameter in volume details, else try MaxIOPS
|
||||
final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
|
||||
if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
|
||||
iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
|
||||
} else if (volume.getMaxIops() != null) {
|
||||
iopsLimit = volume.getMaxIops();
|
||||
}
|
||||
if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
|
||||
iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
|
||||
}
|
||||
|
||||
return setVolumeLimitsOnSDC(volume, host, dataStore, iopsLimit, bandwidthLimitInKbps);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
try {
|
||||
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
|
||||
final VolumeVO volume = volumeDao.findById(dataObject.getId());
|
||||
logger.debug("Granting access for PowerFlex volume: " + volume.getPath());
|
||||
|
||||
Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited
|
||||
// Check Bandwidht Limit parameter in volume details
|
||||
final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) {
|
||||
bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
|
||||
}
|
||||
|
||||
Long iopsLimit = Long.valueOf(0); // Unlimited
|
||||
// Check IOPS Limit parameter in volume details, else try MaxIOPS
|
||||
final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
|
||||
if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
|
||||
iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
|
||||
} else if (volume.getMaxIops() != null) {
|
||||
iopsLimit = volume.getMaxIops();
|
||||
}
|
||||
if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
|
||||
iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
|
||||
}
|
||||
|
||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
||||
if (StringUtils.isBlank(sdcId)) {
|
||||
alertHostSdcDisconnection(host);
|
||||
throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
||||
}
|
||||
|
||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||
return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps);
|
||||
return setVolumeLimitsFromDetails(volume, host, dataStore);
|
||||
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
|
||||
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
|
||||
logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
|
||||
@ -792,7 +806,15 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
logger.error(errorMsg);
|
||||
answer = new Answer(cmd, false, errorMsg);
|
||||
} else {
|
||||
answer = ep.sendMessage(cmd);
|
||||
VolumeVO volume = volumeDao.findById(destData.getId());
|
||||
Host host = destHost != null ? destHost : hostDao.findById(ep.getId());
|
||||
try {
|
||||
setVolumeLimitsOnSDC(volume, host, destData.getDataStore(), 0L, 0L);
|
||||
answer = ep.sendMessage(cmd);
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to copy template to volume due to: " + e.getMessage(), e);
|
||||
answer = new Answer(cmd, false, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return answer;
|
||||
@ -1182,7 +1204,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload();
|
||||
long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize();
|
||||
// Only increase size is allowed and size should be specified in granularity of 8 GB
|
||||
if (newSizeInBytes <= volumeInfo.getSize()) {
|
||||
if (newSizeInBytes < volumeInfo.getSize()) {
|
||||
throw new CloudRuntimeException("Only increase size is allowed for volume: " + volumeInfo.getName());
|
||||
}
|
||||
|
||||
@ -1211,6 +1233,20 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
}
|
||||
}
|
||||
|
||||
Long newMaxIops = payload.newMaxIops != null ? payload.newMaxIops : volumeInfo.getMaxIops();
|
||||
long newBandwidthLimit = 0L;
|
||||
Long newDiskOfferingId = payload.newDiskOfferingId != null ? payload.newDiskOfferingId : volumeInfo.getDiskOfferingId();
|
||||
if (newDiskOfferingId != null) {
|
||||
DiskOfferingDetailVO bandwidthLimitDetail = diskOfferingDetailsDao.findDetail(newDiskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
|
||||
if (bandwidthLimitDetail != null) {
|
||||
newBandwidthLimit = Long.parseLong(bandwidthLimitDetail.getValue()) * 1024;
|
||||
}
|
||||
DiskOfferingDetailVO iopsLimitDetail = diskOfferingDetailsDao.findDetail(newDiskOfferingId, Volume.IOPS_LIMIT);
|
||||
if (iopsLimitDetail != null) {
|
||||
newMaxIops = Long.parseLong(iopsLimitDetail.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
if (volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2) || attachedRunning) {
|
||||
logger.debug("Volume needs to be resized at the hypervisor host");
|
||||
|
||||
@ -1230,9 +1266,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
volumeInfo.getPassphrase(), volumeInfo.getEncryptFormat());
|
||||
|
||||
try {
|
||||
if (!attachedRunning) {
|
||||
grantAccess(volumeInfo, ep, volumeInfo.getDataStore());
|
||||
}
|
||||
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
|
||||
setVolumeLimitsOnSDC(volume, host, volumeInfo.getDataStore(), newMaxIops != null ? newMaxIops : 0L, newBandwidthLimit);
|
||||
Answer answer = ep.sendMessage(resizeVolumeCommand);
|
||||
|
||||
if (!answer.getResult() && volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2)) {
|
||||
@ -1254,14 +1289,23 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
|
||||
long oldVolumeSize = volume.getSize();
|
||||
volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
|
||||
if (payload.newMinIops != null) {
|
||||
volume.setMinIops(payload.newMinIops);
|
||||
}
|
||||
if (payload.newMaxIops != null) {
|
||||
volume.setMaxIops(payload.newMaxIops);
|
||||
}
|
||||
volumeDao.update(volume.getId(), volume);
|
||||
if (payload.newDiskOfferingId != null) {
|
||||
volumeMgr.saveVolumeDetails(payload.newDiskOfferingId, volume.getId());
|
||||
}
|
||||
|
||||
long capacityBytes = storagePool.getCapacityBytes();
|
||||
long usedBytes = storagePool.getUsedBytes();
|
||||
|
||||
long newVolumeSize = volume.getSize();
|
||||
usedBytes += newVolumeSize - oldVolumeSize;
|
||||
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
|
||||
storagePool.setUsedBytes(Math.min(usedBytes, capacityBytes));
|
||||
storagePoolDao.update(storagePoolId, storagePool);
|
||||
} catch (Exception e) {
|
||||
String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage();
|
||||
|
||||
@ -21,6 +21,7 @@ public class ResizeVolumePayload {
|
||||
public final Long newSize;
|
||||
public final Long newMinIops;
|
||||
public final Long newMaxIops;
|
||||
public Long newDiskOfferingId;
|
||||
public final Integer newHypervisorSnapshotReserve;
|
||||
public final boolean shrinkOk;
|
||||
public final String instanceName;
|
||||
@ -37,5 +38,12 @@ public class ResizeVolumePayload {
|
||||
this.instanceName = instanceName;
|
||||
this.hosts = hosts;
|
||||
this.isManaged = isManaged;
|
||||
this.newDiskOfferingId = null;
|
||||
}
|
||||
|
||||
public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, Long newDiskOfferingId, Integer newHypervisorSnapshotReserve, boolean shrinkOk,
|
||||
String instanceName, long[] hosts, boolean isManaged) {
|
||||
this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
|
||||
this.newDiskOfferingId = newDiskOfferingId;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1473,7 +1473,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
}
|
||||
|
||||
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
|
||||
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newDiskOfferingId, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
|
||||
|
||||
try {
|
||||
VolumeInfo vol = volFactory.getVolume(volume.getId());
|
||||
@ -1512,6 +1512,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
|
||||
if (newDiskOfferingId != null) {
|
||||
volume.setDiskOfferingId(newDiskOfferingId);
|
||||
_volumeMgr.saveVolumeDetails(newDiskOfferingId, volume.getId());
|
||||
}
|
||||
|
||||
if (newMinIops != null) {
|
||||
volume.setMinIops(newMinIops);
|
||||
}
|
||||
|
||||
if (newMaxIops != null) {
|
||||
volume.setMaxIops(newMaxIops);
|
||||
}
|
||||
|
||||
// Update size if volume has same size as before, else it is already updated
|
||||
@ -2032,6 +2041,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
|
||||
if (newDiskOffering != null) {
|
||||
volume.setDiskOfferingId(newDiskOfferingId);
|
||||
_volumeMgr.saveVolumeDetails(newDiskOfferingId, volume.getId());
|
||||
}
|
||||
|
||||
_volsDao.update(volume.getId(), volume);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user