Cloudstack-711: Cpu and Ram Overcommit Ratio.

This commit is contained in:
Bharat Kumar 2013-02-22 17:31:06 +05:30 committed by Abhinandan Prateek
parent ea3db2f073
commit 23e54bb0f4
33 changed files with 534 additions and 301 deletions

View File

@ -28,7 +28,8 @@ public class VirtualMachineTO {
private BootloaderType bootloader;
Type type;
int cpus;
Integer speed;
Integer minSpeed;
Integer maxSpeed;
long minRam;
long maxRam;
String hostName;
@ -47,12 +48,13 @@ public class VirtualMachineTO {
VolumeTO[] disks;
NicTO[] nics;
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer minSpeed, Integer maxSpeed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
this.id = id;
this.name = instanceName;
this.type = type;
this.cpus = cpus;
this.speed = speed;
this.minSpeed = minSpeed;
this.maxSpeed = maxSpeed;
this.minRam = minRam;
this.maxRam = maxRam;
this.bootloader = bootloader;
@ -101,10 +103,13 @@ public class VirtualMachineTO {
this.cpus = cpus;
}
public Integer getSpeed() {
return speed;
public Integer getMinSpeed() {
return minSpeed;
}
public Integer getMaxSpeed() {
return maxSpeed;
}
public boolean getLimitCpuUse() {
return limitCpuUse;
}

View File

@ -71,7 +71,7 @@ public interface ResourceService {
boolean deleteCluster(DeleteClusterCmd cmd);
Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate);
Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio);
List<? extends Host> discoverHosts(AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException;

View File

@ -136,4 +136,10 @@ public interface VirtualMachineProfile<T extends VirtualMachine> {
BootloaderType getBootLoaderType();
Map<Param, Object> getParameters();
Float getCpuOvercommitRatio();
Float getMemoryOvercommitRatio();
}

View File

@ -46,6 +46,7 @@ public class ApiConstants {
public static final String COMPONENT = "component";
public static final String CPU_NUMBER = "cpunumber";
public static final String CPU_SPEED = "cpuspeed";
public static final String CPU_OVERCOMMIT_RATIO="cpuovercommitratio";
public static final String CREATED = "created";
public static final String CUSTOMIZED = "customized";
public static final String DESCRIPTION = "description";
@ -119,6 +120,7 @@ public class ApiConstants {
public static final String MAX = "max";
public static final String MAX_SNAPS = "maxsnaps";
public static final String MEMORY = "memory";
public static final String MEMORY_OVERCOMMIT_RATIO="memoryovercommitratio";
public static final String MODE = "mode";
public static final String NAME = "name";
public static final String METHOD_NAME = "methodname";

View File

@ -20,6 +20,10 @@ package org.apache.cloudstack.api.command.admin.cluster;
import java.util.ArrayList;
import java.util.List;
import com.cloud.exception.InvalidParameterValueException;
import org.apache.cloudstack.api.*;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
@ -81,6 +85,12 @@ public class AddClusterCmd extends BaseCmd {
@Parameter(name = ApiConstants.VSM_IPADDRESS, type = CommandType.STRING, required = false, description = "the ipaddress of the VSM associated with this cluster")
private String vsmipaddress;
@Parameter (name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false , description = "value of the cpu overcommit ratio, defaults to 1")
private String cpuovercommitRatio;
@Parameter(name = ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false ,description = "value of the default ram overcommit ratio, defaults to 1")
private String memoryovercommitratio;
public String getVSMIpaddress() {
return vsmipaddress;
}
@ -147,9 +157,26 @@ public class AddClusterCmd extends BaseCmd {
this.allocationState = allocationState;
}
public Float getCpuOvercommitRatio (){
if(cpuovercommitRatio != null){
return Float.parseFloat(cpuovercommitRatio);
}
return 1.0f;
}
public Float getMemoryOvercommitRaito (){
if (memoryovercommitratio != null){
return Float.parseFloat(memoryovercommitratio);
}
return 1.0f;
}
@Override
public void execute(){
try {
if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) {
throw new InvalidParameterValueException("Cpu and ram overcommit ratios should not be less than 1");
}
List<? extends Cluster> result = _resourceService.discoverCluster(this);
ListResponse<ClusterResponse> response = new ListResponse<ClusterResponse>();
List<ClusterResponse> clusterResponses = new ArrayList<ClusterResponse>();

View File

@ -54,6 +54,13 @@ public class UpdateClusterCmd extends BaseCmd {
@Parameter(name=ApiConstants.MANAGED_STATE, type=CommandType.STRING, description="whether this cluster is managed by cloudstack")
private String managedState;
@Parameter(name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of cpu overcommit ratio")
private String cpuovercommitratio;
@Parameter(name=ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of ram overcommit ratio")
private String memoryovercommitratio;
public String getClusterName() {
return clusterName;
}
@ -100,6 +107,20 @@ public class UpdateClusterCmd extends BaseCmd {
this.managedState = managedstate;
}
public Float getCpuOvercommitRatio (){
if(cpuovercommitratio != null){
return Float.parseFloat(cpuovercommitratio);
}
return 1.0f;
}
public Float getMemoryOvercommitRaito (){
if (memoryovercommitratio != null){
return Float.parseFloat(memoryovercommitratio);
}
return 1.0f;
}
@Override
public void execute(){
Cluster cluster = _resourceService.getCluster(getId());
@ -107,7 +128,11 @@ public class UpdateClusterCmd extends BaseCmd {
throw new InvalidParameterValueException("Unable to find the cluster by id=" + getId());
}
Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate());
if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) {
throw new InvalidParameterValueException("Cpu and ram overcommit ratios should be greater than one");
}
Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate(), getMemoryOvercommitRaito(), getCpuOvercommitRatio());
if (result != null) {
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false);
clusterResponse.setResponseName(getCommandName());

View File

@ -62,6 +62,12 @@ public class ClusterResponse extends BaseResponse {
@SerializedName("capacity") @Param(description="the capacity of the Cluster", responseObject = CapacityResponse.class)
private List<CapacityResponse> capacitites;
@SerializedName("cpuovercommitratio") @Param(description = "The cpu overcommit ratio of the cluster")
private String cpuovercommitratio;
@SerializedName("memoryovercommitratio") @Param (description = "The ram overcommit ratio of the cluster")
private String memoryovercommitratio;
public String getId() {
return id;
}
@ -149,4 +155,18 @@ public class ClusterResponse extends BaseResponse {
public void setCapacitites(ArrayList<CapacityResponse> arrayList) {
this.capacitites = arrayList;
}
public void setCpuovercommitratio(String cpuovercommitratio){
this.cpuovercommitratio= cpuovercommitratio;
}
public void setRamovercommitratio (String memoryOvercommitRatio){
this.memoryovercommitratio= memoryOvercommitRatio;
}
public String getCpuovercommitratio (){
return cpuovercommitratio;
}
public String getRamovercommitratio (){
return memoryovercommitratio;
}
}

View File

@ -23,14 +23,13 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.dc.*;
import com.cloud.dc.ClusterDetailsDao;
import org.apache.log4j.Logger;
import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@ -61,16 +60,14 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner {
@Inject protected ConfigurationDao _configDao;
@Inject protected CapacityManager _capacityMgr;
@Inject protected ResourceManager _resourceMgr;
@Inject protected ClusterDetailsDao _clusterDetailsDao;
@Override
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException {
VirtualMachine vm = vmProfile.getVirtualMachine();
ServiceOffering offering = vmProfile.getServiceOffering();
ServiceOffering offering = vmProfile.getServiceOffering();
String hostTag = null;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
if (vm.getLastHostId() != null && haVmTag == null) {
@ -126,7 +123,13 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner {
return null;
}
for (HostVO h : hosts) {
if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOverprovisioningFactor, true)) {
long cluster_id = h.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio") ;
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
s_logger.debug("Find host " + h.getId() + " has enough capacity");
DataCenter dc = _dcDao.findById(h.getDataCenterId());
Pod pod = _podDao.findById(h.getPodId());

View File

@ -2921,12 +2921,24 @@ ServerResource {
vm.addComp(guest);
GuestResourceDef grd = new GuestResourceDef();
grd.setMemorySize(vmTO.getMinRam() / 1024);
//check if overcommit should be considered.
if(vmTO.getMinSpeed() == vmTO.getMaxSpeed()){
}
if (vmTO.getMinRam() != vmTO.getMaxRam()){
grd.setMemBalloning(true);
grd.setCurrentMem((int)vmTO.getMinRam()/1024);
grd.setMemorySize((int)vmTO.getMaxRam()/1024);
}
else{
grd.setMemorySize(vmTO.getMaxRam() / 1024);
}
grd.setVcpuNum(vmTO.getCpus());
vm.addComp(grd);
CpuTuneDef ctd = new CpuTuneDef();
ctd.setShares(vmTO.getCpus() * vmTO.getSpeed());
ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed());
vm.addComp(ctd);
FeaturesDef features = new FeaturesDef();

View File

@ -116,6 +116,7 @@ public class LibvirtVMDef {
private int _currentMem = -1;
private String _memBacking;
private int _vcpu = -1;
private boolean _memBalloning= false;
public void setMemorySize(long mem) {
_mem = mem;
@ -133,6 +134,10 @@ public class LibvirtVMDef {
_vcpu = vcpu;
}
public void setMemBalloning(boolean turnon){
_memBalloning = turnon;
}
@Override
public String toString() {
StringBuilder resBuidler = new StringBuilder();
@ -145,6 +150,9 @@ public class LibvirtVMDef {
resBuidler.append("<memoryBacking>" + "<" + _memBacking + "/>"
+ "</memoryBacking>\n");
}
if (_memBalloning){
resBuidler.append("<devices>\n" + "<memballoon model='virtio'/>\n" + "</devices>\n");
}
if (_vcpu != -1) {
resBuidler.append("<vcpu>" + _vcpu + "</vcpu>\n");
}

View File

@ -348,7 +348,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager {
@Override
public Answer startVM(StartCommand cmd, SimulatorInfo info) {
VirtualMachineTO vm = cmd.getVirtualMachine();
String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid());
String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getMaxSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid());
if (result != null) {
return new StartAnswer(cmd, result);
} else {

View File

@ -184,7 +184,7 @@ public class AgentRoutingResource extends AgentStorageResource {
throws IllegalArgumentException {
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
String vmName = vmSpec.getName();
if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getSpeed() + this.usedCpu) ||
if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getMaxSpeed() + this.usedCpu) ||
this.totalMem < (vmSpec.getMaxRam() + this.usedMem)) {
return new StartAnswer(cmd, "Not enough resource to start the vm");
}
@ -199,9 +199,9 @@ public class AgentRoutingResource extends AgentStorageResource {
return new StartAnswer(cmd, result.getDetails());
}
this.usedCpu += vmSpec.getCpus() * vmSpec.getSpeed();
this.usedCpu += vmSpec.getCpus() * vmSpec.getMaxSpeed();
this.usedMem += vmSpec.getMaxRam();
_runningVms.put(vmName, new Pair<Long, Long>(Long.valueOf(vmSpec.getCpus() * vmSpec.getSpeed()), vmSpec.getMaxRam()));
_runningVms.put(vmName, new Pair<Long, Long>(Long.valueOf(vmSpec.getCpus() * vmSpec.getMaxSpeed()), vmSpec.getMaxRam()));
state = State.Running;
} finally {

View File

@ -138,10 +138,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
int _additionalPortRangeSize;
int _routerExtraPublicNics = 2;
String _cpuOverprovisioningFactor = "1";
String _reserveCpu = "false";
String _memOverprovisioningFactor = "1";
String _reserveMem = "false";
String _rootDiskController = DiskControllerType.ide.toString();
@ -262,14 +260,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2);
_cpuOverprovisioningFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
if(_cpuOverprovisioningFactor == null || _cpuOverprovisioningFactor.isEmpty())
_cpuOverprovisioningFactor = "1";
_memOverprovisioningFactor = _configDao.getValue(Config.MemOverprovisioningFactor.key());
if(_memOverprovisioningFactor == null || _memOverprovisioningFactor.isEmpty())
_memOverprovisioningFactor = "1";
_reserveCpu = _configDao.getValue(Config.VmwareReserveCpu.key());
if(_reserveCpu == null || _reserveCpu.isEmpty())
_reserveCpu = "false";
@ -507,9 +497,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
params.put("vmware.use.nexus.vswitch", _nexusVSwitchActive);
params.put("service.console.name", _serviceConsoleName);
params.put("management.portgroup.name", _managemetPortGroupName);
params.put("cpu.overprovisioning.factor", _cpuOverprovisioningFactor);
params.put("vmware.reserve.cpu", _reserveCpu);
params.put("mem.overprovisioning.factor", _memOverprovisioningFactor);
params.put("vmware.reserve.mem", _reserveMem);
params.put("vmware.root.disk.controller", _rootDiskController);
params.put("vmware.recycle.hung.wokervm", _recycleHungWorker);

View File

@ -289,10 +289,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
protected boolean _nexusVSwitch = false;
protected float _cpuOverprovisioningFactor = 1;
protected boolean _reserveCpu = false;
protected float _memOverprovisioningFactor = 1;
protected boolean _reserveMem = false;
protected boolean _recycleHungWorker = false;
protected DiskControllerType _rootDiskController = DiskControllerType.ide;
@ -2089,10 +2087,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
}
assert (vmSpec.getSpeed() != null) && (rootDiskDataStoreDetails != null);
if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(),
getReserveCpuMHz(vmSpec.getSpeed().intValue()), vmSpec.getLimitCpuUse(), ramMb, getReserveMemMB(ramMb),
translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) {
assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null);
if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(),
vmSpec.getMinSpeed(), vmSpec.getLimitCpuUse(),(int)(vmSpec.getMaxRam()/(1024*1024)), ramMb,
translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) {
throw new Exception("Failed to create VM. vmName: " + vmName);
}
}
@ -2122,10 +2120,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024));
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(),
getReserveCpuMHz(vmSpec.getSpeed().intValue()), ramMb, getReserveMemMB(ramMb),
translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse());
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(),
vmSpec.getMinSpeed(),(int) (vmSpec.getMaxRam()/(1024*1024)), ramMb,
translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse());
VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices];
int i = 0;
int ideControllerKey = vmMo.getIDEDeviceControllerKey();
@ -2375,22 +2373,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return validatedDetails;
}
private int getReserveCpuMHz(int cpuMHz) {
if(this._reserveCpu) {
return (int)(cpuMHz / this._cpuOverprovisioningFactor);
}
return 0;
}
private int getReserveMemMB(int memMB) {
if(this._reserveMem) {
return (int)(memMB / this._memOverprovisioningFactor);
}
return 0;
}
private NicTO[] sortNicsByDeviceId(NicTO[] nics) {
List<NicTO> listForSort = new ArrayList<NicTO>();
@ -4882,11 +4866,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name");
}
String value = (String) params.get("cpu.overprovisioning.factor");
if(value != null)
_cpuOverprovisioningFactor = Float.parseFloat(value);
value = (String) params.get("vmware.reserve.cpu");
String value = (String) params.get("vmware.reserve.cpu");
if(value != null && value.equalsIgnoreCase("true"))
_reserveCpu = true;
@ -4894,10 +4874,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(value != null && value.equalsIgnoreCase("true"))
_recycleHungWorker = true;
value = (String) params.get("mem.overprovisioning.factor");
if(value != null)
_memOverprovisioningFactor = Float.parseFloat(value);
value = (String) params.get("vmware.reserve.mem");
if(value != null && value.equalsIgnoreCase("true"))
_reserveMem = true;

View File

@ -1114,13 +1114,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
vm.setAffinity(conn, host);
vm.removeFromOtherConfig(conn, "disks");
vm.setNameLabel(conn, vmSpec.getName());
setMemory(conn, vm, vmSpec.getMinRam());
setMemory(conn, vm, vmSpec.getMinRam(),vmSpec.getMaxRam());
vm.setVCPUsMax(conn, (long)vmSpec.getCpus());
vm.setVCPUsAtStartup(conn, (long)vmSpec.getCpus());
Map<String, String> vcpuParams = new HashMap<String, String>();
Integer speed = vmSpec.getSpeed();
Integer speed = vmSpec.getMinSpeed();
if (speed != null) {
int cpuWeight = _maxWeight; //cpu_weight
@ -3253,8 +3253,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
}
}
protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException {
vm.setMemoryLimits(conn, memsize, memsize, memsize, memsize);
protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException {
vm.setMemoryLimits(conn, maxMemsize, maxMemsize, minMemsize, maxMemsize);
}
private void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException {

View File

@ -72,17 +72,17 @@ public class XcpServerResource extends CitrixResourceBase {
}
@Override
protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException {
protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException {
vm.setMemoryStaticMin(conn, 33554432L);
vm.setMemoryDynamicMin(conn, 33554432L);
vm.setMemoryDynamicMax(conn, 33554432L);
//vm.setMemoryDynamicMin(conn, 33554432L);
//vm.setMemoryDynamicMax(conn, 33554432L);
vm.setMemoryStaticMax(conn, 33554432L);
vm.setMemoryStaticMax(conn, memsize);
vm.setMemoryDynamicMax(conn, memsize);
vm.setMemoryDynamicMin(conn, memsize);
vm.setMemoryStaticMin(conn, memsize);
//vm.setMemoryStaticMax(conn, maxMemsize );
vm.setMemoryDynamicMax(conn, maxMemsize );
vm.setMemoryDynamicMin(conn, minMemsize );
//vm.setMemoryStaticMin(conn, maxMemsize );
}
@ -99,7 +99,7 @@ public class XcpServerResource extends CitrixResourceBase {
return answer;
} catch (Exception ex) {
s_logger.warn("Failed to get network usage stats due to ", ex);
return new NetworkUsageAnswer(cmd, ex);
return new NetworkUsageAnswer(cmd, ex);
}
}

View File

@ -136,9 +136,9 @@ public class XenServer56FP1Resource extends XenServer56Resource {
record.nameLabel = vmSpec.getName();
record.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY;
record.actionsAfterShutdown = Types.OnNormalExit.DESTROY;
record.memoryDynamicMax = vmSpec.getMinRam();
record.memoryDynamicMax = vmSpec.getMaxRam();
record.memoryDynamicMin = vmSpec.getMinRam();
record.memoryStaticMax = vmSpec.getMinRam();
record.memoryStaticMax = vmSpec.getMaxRam();
record.memoryStaticMin = vmSpec.getMinRam();
record.VCPUsMax = (long) vmSpec.getCpus();
record.VCPUsAtStartup = (long) vmSpec.getCpus();
@ -152,7 +152,7 @@ public class XenServer56FP1Resource extends XenServer56Resource {
Map<String, String> vcpuParams = new HashMap<String, String>();
Integer speed = vmSpec.getSpeed();
Integer speed = vmSpec.getMinSpeed();
if (speed != null) {
int cpuWeight = _maxWeight; // cpu_weight

View File

@ -26,6 +26,10 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.org.Cluster;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -78,6 +82,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
@Inject GuestOSCategoryDao _guestOSCategoryDao = null;
@Inject VMInstanceDao _vmInstanceDao = null;
@Inject ResourceManager _resourceMgr;
@Inject ClusterDao _clusterDao;
@Inject ClusterDetailsDao _clusterDetailsDao;
float _factor = 1;
boolean _checkHvm = true;
protected String _allocationAlgorithm = "random";
@ -214,8 +220,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu();
boolean cpuFreqGood = host.getSpeed().intValue() >= offering.getSpeed();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, _factor, considerReservedCapacity);
long ram_requested = offering.getRamSize() * 1024L * 1024L;
Cluster cluster = _clusterDao.findById(host.getClusterId());
ClusterDetailsVO clusterDetailsCpuOvercommit = _clusterDetailsDao.findDetail(cluster.getId(),"cpuOvercommitRatio");
ClusterDetailsVO clusterDetailsRamOvercommmt = _clusterDetailsDao.findDetail(cluster.getId(),"memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue());
Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue());
boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false,cpuOvercommitRatio,memoryOvercommitRatio, considerReservedCapacity);
if (numCpusGood && cpuFreqGood && hostHasCapacity) {
if (s_logger.isDebugEnabled()) {

View File

@ -318,6 +318,7 @@ public class ApiDBUtils {
static AsyncJobDao _asyncJobDao;
static HostDetailsDao _hostDetailsDao;
static VMSnapshotDao _vmSnapshotDao;
static ClusterDetailsDao _clusterDetailsDao;
@Inject private ManagementServer ms;
@Inject public AsyncJobManager asyncMgr;
@ -418,6 +419,7 @@ public class ApiDBUtils {
@Inject private SnapshotPolicyDao snapshotPolicyDao;
@Inject private AsyncJobDao asyncJobDao;
@Inject private HostDetailsDao hostDetailsDao;
@Inject private ClusterDetailsDao clusterDetailsDao;
@Inject private VMSnapshotDao vmSnapshotDao;
@PostConstruct
void init() {
@ -517,6 +519,7 @@ public class ApiDBUtils {
_snapshotPolicyDao = snapshotPolicyDao;
_asyncJobDao = asyncJobDao;
_hostDetailsDao = hostDetailsDao;
_clusterDetailsDao = clusterDetailsDao;
_vmSnapshotDao = vmSnapshotDao;
// Note: stats collector should already have been initialized by this time, otherwise a null instance is returned
_statsCollector = StatsCollector.getInstance();
@ -681,6 +684,10 @@ public class ApiDBUtils {
return _clusterDao.findById(clusterId);
}
public static ClusterDetailsVO findClusterDetails(long clusterId, String name){
return _clusterDetailsDao.findDetail(clusterId,name);
}
public static DiskOfferingVO findDiskOfferingById(Long diskOfferingId) {
return _diskOfferingDao.findByIdIncludingRemoved(diskOfferingId);
}

View File

@ -690,14 +690,12 @@ public class ApiResponseHelper implements ResponseGenerator {
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null);
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();
float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor();
for (SummedCapacity capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
capacityResponse.setCapacityType(capacity.getCapacityType());
capacityResponse.setCapacityUsed(capacity.getUsedCapacity());
if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor)));
capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity())));
} else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) {
List<SummedCapacity> c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, pod.getId(), null);
capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity());
@ -827,12 +825,15 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setClusterType(cluster.getClusterType().toString());
clusterResponse.setAllocationState(cluster.getAllocationState().toString());
clusterResponse.setManagedState(cluster.getManagedState().toString());
String cpuOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"cpuOvercommitRatio").getValue();
String memoryOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"memoryOvercommitRatio").getValue();
clusterResponse.setCpuovercommitratio(cpuOvercommitRatio);
clusterResponse.setRamovercommitratio(memoryOvercommitRatio);
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId());
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();
float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor();
for (SummedCapacity capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
@ -840,8 +841,11 @@ public class ApiResponseHelper implements ResponseGenerator {
capacityResponse.setCapacityUsed(capacity.getUsedCapacity());
if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) {
capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor)));
} else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) {
capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(cpuOvercommitRatio))));
}else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY){
capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(memoryOvercommitRatio))));
}
else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) {
List<SummedCapacity> c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, null, cluster.getId());
capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity());
capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity());

View File

@ -39,8 +39,8 @@ public interface CapacityManager extends Manager {
* @param ram required RAM
* @param cpuOverprovisioningFactor factor to apply to the actual host cpu
*/
boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity);
boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity);
void updateCapacityForHost(HostVO host);
/**

View File

@ -28,6 +28,12 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.DataCenter;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.resource.ResourceState;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -119,26 +125,25 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
@Inject
protected UserVmDao _userVMDao;
@Inject
ClusterDetailsDao _clusterDetailsDao;
@Inject
ClusterDao _clusterDao;
private int _vmCapacityReleaseInterval;
private ScheduledExecutorService _executor;
private boolean _stopped;
long _extraBytesPerVolume = 0;
private float _storageOverProvisioningFactor = 1.0f;
private float _cpuOverProvisioningFactor = 1.0f;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600);
_storageOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.StorageOverprovisioningFactor.key()), 1.0f);
_cpuOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.CPUOverprovisioningFactor.key()), 1.0f);
if (_cpuOverProvisioningFactor < 1.0f) {
_cpuOverProvisioningFactor = 1.0f;
}
_executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("HostCapacity-Checker"));
VirtualMachine.State.getStateMachine().registerListener(this);
_agentManager.registerForHostEvents(new StorageCapacityListener(_capacityDao, _storageOverProvisioningFactor), true, false, false);
_agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this, _cpuOverProvisioningFactor), true, false, false);
_agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this), true, false, false);
return true;
}
@ -163,7 +168,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId());
CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_MEMORY);
Long clusterId=null;
if (hostId != null) {
HostVO host = _hostDao.findById(hostId);
clusterId= host.getClusterId();
}
if (capacityCpu == null || capacityMemory == null || svo == null) {
return false;
}
@ -172,9 +181,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
try {
txn.start();
int vmCPU = svo.getCpu() * svo.getSpeed();
long vmMem = svo.getRamSize() * 1024L * 1024L;
capacityCpu = _capacityDao.lockRow(capacityCpu.getId(), true);
capacityMemory = _capacityDao.lockRow(capacityMemory.getId(), true);
@ -183,13 +189,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMemory.getReservedCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor);
float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue());
float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue());
int vmCPU = (int) (svo.getCpu() * svo.getSpeed());
long vmMem = (long) (svo.getRamSize() * 1024L * 1024L);
long actualTotalMem = capacityMemory.getTotalCapacity();
long totalMem = (long) (actualTotalMem * memoryOvercommitRatio);
long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem);
}
long totalMem = capacityMemory.getTotalCapacity();
if (!moveFromReserved) {
/* move resource from used */
@ -241,6 +252,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
public void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost) {
long hostId = vm.getHostId();
HostVO host = _hostDao.findById(hostId);
long clusterId = host.getClusterId();
float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue());
float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue());
ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId());
@ -251,11 +266,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
return;
}
int cpu = svo.getCpu() * svo.getSpeed();
long ram = svo.getRamSize() * 1024L * 1024L;
int cpu = (int) (svo.getCpu() * svo.getSpeed());
long ram = (long) (svo.getRamSize() * 1024L * 1024L);
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
Transaction txn = Transaction.currentTxn();
@ -269,11 +282,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMem.getReservedCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor);
long actualTotalMem = capacityMem.getTotalCapacity();
long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio);
long totalMem = (long) (actualTotalMem * memoryOvercommitRatio);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
}
long totalMem = capacityMem.getTotalCapacity();
long freeCpu = totalCpu - (reservedCpu + usedCpu);
long freeMem = totalMem - (reservedMem + usedMem);
@ -325,12 +339,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
}
@Override
public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity) {
public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio,float memoryOvercommitRatio, boolean considerReservedCapacity) {
boolean hasCapacity = false;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram
+ " , cpuOverprovisioningFactor: " + cpuOverprovisioningFactor);
+ " , cpuOverprovisioningFactor: " + cpuOvercommitRatio);
}
CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU);
@ -356,13 +370,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMem.getReservedCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor);
long actualTotalMem = capacityMem.getTotalCapacity();
long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio );
long totalMem = (long) (actualTotalMem *memoryOvercommitRatio );
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
}
long totalMem = capacityMem.getTotalCapacity();
String failureReason = "";
if (checkFromReservedCapacity) {
long freeCpu = reservedCpu;
@ -695,10 +709,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());
capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU);
List<CapacityVO> capacityVOCpus = _capacityDao.search(capacitySC, null);
Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"cpuOvercommitRatio").getValue());
Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"memoryOvercommitRatio").getValue());
if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) {
CapacityVO CapacityVOCpu = capacityVOCpus.get(0);
long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor);
long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio);
if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu)
|| ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) {
CapacityVOCpu.setTotalCapacity(newTotalCpu);
@ -713,7 +729,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
_capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu);
} else {
CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L,
(long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor),
(long) (server.getCpus().longValue() * server.getSpeed().longValue()),
CapacityVO.CAPACITY_TYPE_CPU);
_capacityDao.persist(capacity);
}
@ -727,7 +743,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
if (capacityVOMems != null && !capacityVOMems.isEmpty()) {
CapacityVO CapacityVOMem = capacityVOMems.get(0);
long newTotalMem = server.getTotalMemory();
long newTotalMem = (long)((server.getTotalMemory())* memoryOvercommitRatio);
if (CapacityVOMem.getTotalCapacity() <= newTotalMem
|| (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() <= newTotalMem)) {
CapacityVOMem.setTotalCapacity(newTotalMem);

View File

@ -42,12 +42,11 @@ public class ComputeCapacityListener implements Listener {
public ComputeCapacityListener(CapacityDao _capacityDao,
CapacityManager _capacityMgr,
float _overProvisioningFactor) {
CapacityManager _capacityMgr
) {
super();
this._capacityDao = _capacityDao;
this._capacityMgr = _capacityMgr;
this._cpuOverProvisioningFactor = _overProvisioningFactor;
}

View File

@ -26,20 +26,20 @@ import com.cloud.utils.db.GenericDao;
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
CapacityVO findByHostIdType(Long hostId, short capacityType);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType);
boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId);
List<SummedCapacity> findByClusterPodZone(Long zoneId, Long podId, Long clusterId);
List<SummedCapacity> findNonSharedStorageForClusterPodZone(Long zoneId,Long podId, Long clusterId);
Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone);
List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId);
List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityType, float cpuOverprovisioningFactor);
List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType);
Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityType);
List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId,
Long podId, Long clusterId, String resourceState);
List<SummedCapacity> listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit);
void updateCapacityState(Long dcId, Long podId, Long clusterId,
Long hostId, String capacityState);
List<Long> listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested, Float overProvFactor);
}
List<Long> listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested);
}

View File

@ -55,10 +55,9 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " +
"AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster_details ON (cluster.id = cluster_details.cluster_id ) WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value ) - used_capacity + reserved_capacity) >= ? AND capacity.cluster_id IN (SELECT distinct capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id ) WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value) - used_capacity + reserved_capacity) >= ?) ";
private final SearchBuilder<CapacityVO> _hostIdTypeSearch;
private final SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
@ -66,47 +65,54 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
@Inject protected StoragePoolDao _storagePoolDao;
private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " +
"AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " +
"JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0";
private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT host_capacity.host_id FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id = cluster_details.cluster_id) AND host.type = ? AND cluster_details.name='cpuOvercommitRatio' AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity) >= ? and host_capacity.capacity_type = '1' " +
" AND host_capacity.host_id IN (SELECT capacity.host_id FROM `cloud`.`op_host_capacity` capacity JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id= cluster_details.cluster_id) where capacity_type='0' AND cluster_details.name='memoryOvercommitRatio' AND ((total_capacity* cluster_details.value) - used_capacity ) >= ?)) ";
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ;
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC";
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity WHERE ";
private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " +
" ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " +
" capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " +
" AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " +
" capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) ";
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " +
" AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC";
private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE ";
private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " +
" ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id = cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? ";
private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC ";
private static final String ORDER_PODS_BY_AGGREGATE_OVERCOMMIT_CAPACITY ="SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE data_center_id=? AND capacity_type = ? AND cluster_details.name = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
private static final String LIST_CAPACITY_BY_RESOURCE_STATE = "SELECT capacity.data_center_id, sum(capacity.used_capacity), sum(capacity.reserved_quantity), sum(capacity.total_capacity), capacity_capacity_type "+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+
"WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) ";
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+
"FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+
"WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) ";
private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " +
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+
" capacity.capacity_type, capacity.data_center_id "+
"FROM `cloud`.`op_host_capacity` capacity "+
"WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'";
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+
" capacity.capacity_type, capacity.data_center_id "+
"FROM `cloud`.`op_host_capacity` capacity "+
"WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'";
private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id, capacity_type order by percent desc limit ";
private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " +
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+
" capacity.capacity_type, capacity.data_center_id, pod_id "+
"FROM `cloud`.`op_host_capacity` capacity "+
"WHERE total_capacity > 0 AND pod_id is not null AND capacity_state='Enabled'";
private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," +
" (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
"when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," +
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
"when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," +
"capacity.capacity_type, capacity.data_center_id, pod_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' ";
private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id, capacity_type order by percent desc limit ";
private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " +
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+
"capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id "+
"FROM `cloud`.`op_host_capacity` capacity "+
"WHERE total_capacity > 0 AND cluster_id is not null AND capacity_state='Enabled'";
private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," +
" (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
"when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," +
"((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " +
"when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," +
"capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' ";
private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id, capacity_type order by percent desc limit ";
private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity` SET capacity_state = ? WHERE ";
private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT cluster_id " +
@ -138,34 +144,34 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
_allFieldsSearch.done();
}
@Override
public List<Long> listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested, Float overProvFactor){
public List<Long> listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD);
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1,compute_requested);
pstmt.setShort(2,capacityType);
pstmt.setFloat(3,disableThreshold);
pstmt.setLong(4,zoneId);
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, compute_requested);
pstmt.setLong(2, zoneId);
pstmt.setShort(3, capacityType);
pstmt.setFloat(4, disableThreshold*overProvFactor);
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
result.add(rs.getLong(1));
}
return result;
} catch (SQLException e) {
throw new CloudRuntimeException("DB Exception on: " + sql, e);
} catch (Throwable e) {
throw new CloudRuntimeException("Caught: " + sql, e);
}
}
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
result.add(rs.getLong(1));
}
return result;
} catch (SQLException e) {
throw new CloudRuntimeException("DB Exception on: " + sql, e);
} catch (Throwable e) {
throw new CloudRuntimeException("Caught: " + sql, e);
}
}
/*public static String preparePlaceHolders(int length) {
StringBuilder builder = new StringBuilder();
@ -242,6 +248,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
PreparedStatement pstmt = null;
List<SummedCapacity> result = new ArrayList<SummedCapacity>();
List<Long> resourceIdList = new ArrayList<Long>();
switch(level){
case 1: // List all the capacities grouped by zone, capacity Type
finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1);
@ -257,17 +265,21 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
if (zoneId != null){
finalQuery.append(" AND data_center_id="+zoneId);
finalQuery.append(" AND data_center_id = ?" );
resourceIdList.add(zoneId);
}
if (podId != null){
finalQuery.append(" AND pod_id="+podId);
finalQuery.append(" AND pod_id = ?" );
resourceIdList.add(podId);
}
if (clusterId != null){
finalQuery.append(" AND cluster_id="+clusterId);
finalQuery.append(" AND cluster_id = ?" );
resourceIdList.add(clusterId );
}
if (capacityType != null){
finalQuery.append(" AND capacity_type="+capacityType);
}
finalQuery.append(" AND capacity_type = ?");
resourceIdList.add(capacityType.longValue() );
}
switch(level){
case 1: // List all the capacities grouped by zone, capacity Type
@ -283,12 +295,16 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
break;
}
finalQuery.append(limit.toString());
finalQuery.append("?");
resourceIdList.add((long) limit);
try {
pstmt = txn.prepareAutoCloseStatement(finalQuery.toString());
pstmt = txn.prepareAutoCloseStatement(finalQuery.toString());
for (int i = 0; i < resourceIdList.size(); i++){
pstmt.setLong(1+i, resourceIdList.get(i));
}
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
while (rs.next()) {
SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getFloat(3),
(short)rs.getLong(4), rs.getLong(5),
level != 1 ? rs.getLong(6): null,
@ -390,8 +406,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){
Transaction txn = Transaction.currentTxn();
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
@ -414,11 +430,11 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, id);
pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
pstmt.setFloat(3, cpuOverprovisioningFactor);
pstmt.setString(3,"cpuOvercommitRatio");
pstmt.setLong(4, requiredCpu);
pstmt.setLong(5, id);
pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
pstmt.setFloat(7, 1);
pstmt.setString(7,"memoryOvercommitRatio");
pstmt.setLong(8, requiredRam);
ResultSet rs = pstmt.executeQuery();
@ -435,8 +451,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
@Override
public List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){
Transaction txn = Transaction.currentTxn();
public List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
@ -445,9 +461,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, clusterId);
pstmt.setString(2, hostType);
pstmt.setFloat(3, cpuOverprovisioningFactor);
pstmt.setLong(4, requiredCpu);
pstmt.setLong(5, requiredRam);
pstmt.setLong(3, requiredCpu);
pstmt.setLong(4, requiredRam);
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
@ -629,31 +644,44 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){
public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
StringBuilder sql = new StringBuilder();
if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY) {
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1);
}
else {
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1);
}
StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1);
if(isZone){
sql.append("data_center_id = ?");
sql.append(" data_center_id = ?");
}else{
sql.append("pod_id = ?");
sql.append(" pod_id = ?");
}
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2);
if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY){
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2);
}
else {
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2);
}
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
pstmt.setFloat(1, cpuOverprovisioningFactor);
pstmt.setFloat(4, cpuOverprovisioningFactor);
}else{
pstmt.setFloat(1, 1);
pstmt.setFloat(4, 1);
pstmt.setLong(1, id);
pstmt.setShort(2,capacityTypeForOrdering);
if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_CPU){
pstmt.setString(3,"cpuOvercommitRatio");
}
pstmt.setLong(2, id);
pstmt.setShort(3, capacityTypeForOrdering);
else if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_MEMORY){
pstmt.setString(3,"memoryOvercommitRatio");
}
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
Long clusterId = rs.getLong(1);
@ -669,22 +697,25 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor) {
public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITIES);
StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE);
sql.append("AND capacity.pod_id IN (");
sql.append(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE);
sql.append(")");
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, zoneId);
pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
pstmt.setFloat(3, cpuOverprovisioningFactor);
pstmt.setString(3, "cpuOvercommitRatio");
pstmt.setLong(4, requiredCpu);
pstmt.setLong(5, zoneId);
pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
pstmt.setFloat(7, 1);
pstmt.setString(7,"memoryOvercommitRatio" );
pstmt.setLong(8, requiredRam);
ResultSet rs = pstmt.executeQuery();
@ -700,26 +731,22 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering, float cpuOverprovisioningFactor) {
public Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering) {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
Map<Long, Double> podCapacityMap = new HashMap<Long, Double>();
StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY);
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(2, zoneId);
pstmt.setShort(3, capacityTypeForOrdering);
if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
pstmt.setFloat(1, cpuOverprovisioningFactor);
pstmt.setFloat(4, cpuOverprovisioningFactor);
}else{
pstmt.setFloat(1, 1);
pstmt.setFloat(4, 1);
pstmt.setString(3, "cpuOvercommitRatio");
}
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
Long podId = rs.getLong(1);
@ -737,7 +764,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
@Override
public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) {
Transaction txn = Transaction.currentTxn();
StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE);
StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE);
List<Long> resourceIdList = new ArrayList<Long>();
if (dcId != null){

View File

@ -28,6 +28,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import com.cloud.dc.*;
import org.apache.log4j.Logger;
import com.cloud.agent.manager.allocator.HostAllocator;
@ -38,11 +39,6 @@ import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@ -100,6 +96,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Inject protected AccountManager _accountMgr;
@Inject protected StorageManager _storageMgr;
@Inject DataStoreManager dataStoreMgr;
@Inject protected ClusterDetailsDao _clusterDetailsDao;
//@com.cloud.utils.component.Inject(adapter=StoragePoolAllocator.class)
@Inject protected List<StoragePoolAllocator> _storagePoolAllocators;
@ -128,9 +125,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
@ -199,7 +193,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
}else{
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor, true)){
long cluster_id = host.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
//search for storage under the zone, pod, cluster of the last host.
@ -287,12 +286,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
ServiceOffering offering = vmProfile.getServiceOffering();
int requiredCpu = offering.getCpu() * offering.getSpeed();
long requiredRam = offering.getRamSize() * 1024L * 1024L;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
//list pods under this zone by cpu and ram capacity
List<Long> prioritizedPodIds = new ArrayList<Long>();
Pair<List<Long>, Map<Long, Double>> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam);
List<Long> podsWithCapacity = podCapacityInfo.first();
if(!podsWithCapacity.isEmpty()){
@ -350,11 +346,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
DataCenter dc = _dcDao.findById(vm.getDataCenterId());
int requiredCpu = offering.getCpu() * offering.getSpeed();
long requiredRam = offering.getRamSize() * 1024L * 1024L;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
//list clusters under this zone by cpu and ram capacity
Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone);
List<Long> prioritizedClusterIds = clusterCapacityInfo.first();
if(!prioritizedClusterIds.isEmpty()){
if(avoid.getClustersToAvoid() != null){
@ -468,30 +462,30 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
// For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation.
for(short capacity : capacityList){
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
return;
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
return;
}
if (capacity == Capacity.CAPACITY_TYPE_CPU) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
capacityThresholdMap.get(capacity), cpu_requested);
}
if (capacity == Capacity.CAPACITY_TYPE_CPU){
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, plan.getDataCenterId(),
capacityThresholdMap.get(capacity), cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor());
}else{
else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
capacityThresholdMap.get(capacity), ram_requested, 1.0f);//Mem overprov not supported yet
}
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
// addToAvoid Set
avoid.addClusterList(clustersCrossingThreshold);
// Remove clusters crossing disabled threshold
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
" crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters");
capacityThresholdMap.get(capacity), ram_requested );
}
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
// addToAvoid Set
avoid.addClusterList(clustersCrossingThreshold);
// Remove clusters crossing disabled threshold
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
" crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters");
}
}
}
@ -560,7 +554,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
//look at the aggregate available cpu and ram per cluster
//although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot
@ -574,14 +568,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor);
}
List<Long> clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone, cpuOverprovisioningFactor);
List<Long> clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone);
if (s_logger.isTraceEnabled()) {
s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity);
}
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone);
List<Long> clusterIdsOrderedByAggregateCapacity = result.first();
//only keep the clusters that have enough capacity to host this VM
if (s_logger.isTraceEnabled()) {
@ -597,7 +588,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
protected Pair<List<Long>, Map<Long, Double>> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){
protected Pair<List<Long>, Map<Long, Double>> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam){
//look at the aggregate available cpu and ram per pod
//although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot
@ -611,14 +603,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor);
}
List<Long> podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType, cpuOverprovisioningFactor);
List<Long> podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType);
if (s_logger.isTraceEnabled()) {
s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity);
}
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType, cpuOverprovisioningFactor);
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType);
List<Long> podIdsOrderedByAggregateCapacity = result.first();
//only keep the clusters that have enough capacity to host this VM
if (s_logger.isTraceEnabled()) {

View File

@ -76,9 +76,11 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
ServiceOffering offering = vmProfile.getServiceOffering();
VirtualMachine vm = vmProfile.getVirtualMachine();
VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), offering.getSpeed(),
offering.getRamSize() * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword());
Long minMemory = (long) (offering.getRamSize()/vmProfile.getCpuOvercommitRatio());
int minspeed= (int)(offering.getSpeed()/vmProfile.getMemoryOvercommitRatio());
int maxspeed = (offering.getSpeed());
VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), minspeed, maxspeed,
minMemory * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword());
to.setBootArgs(vmProfile.getBootArgs());
List<NicProfile> nicProfiles = vmProfile.getNics();

View File

@ -30,6 +30,7 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.dc.*;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd;
import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd;
@ -72,12 +73,6 @@ import com.cloud.cluster.ManagementServerNode;
import com.cloud.configuration.Config;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterIpAddressVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.PodCluster;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.ClusterVSMMapDao;
import com.cloud.dc.dao.DataCenterDao;
@ -483,6 +478,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
clusterId = cluster.getId();
result.add(cluster);
ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", Float.toString(cmd.getCpuOvercommitRatio()));
ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", Float.toString(cmd.getMemoryOvercommitRaito()));
_clusterDetailsDao.persist(cluster_detail_cpu);
_clusterDetailsDao.persist(cluster_detail_ram);
if (clusterType == Cluster.ClusterType.CloudManaged) {
return result;
}
@ -494,6 +494,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
details.put("password", password);
_clusterDetailsDao.persist(cluster.getId(), details);
_clusterDetailsDao.persist(cluster_detail_cpu);
_clusterDetailsDao.persist(cluster_detail_ram);
boolean success = false;
try {
try {
@ -1061,7 +1065,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
@Override
@DB
public Cluster updateCluster(Cluster clusterToUpdate, String clusterType,
String hypervisor, String allocationState, String managedstate) {
String hypervisor, String allocationState, String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio) {
ClusterVO cluster = (ClusterVO) clusterToUpdate;
// Verify cluster information and update the cluster if needed

View File

@ -37,6 +37,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import com.cloud.dc.*;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
@ -76,6 +77,7 @@ import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DataCenterDeployment;
@ -157,6 +159,7 @@ import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotManager;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.cloud.vm.dao.UserVmDetailsDao;
@Local(value = VirtualMachineManager.class)
public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, Listener {
@ -232,6 +235,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Inject
protected VMSnapshotManager _vmSnapshotMgr = null;
@Inject
protected ClusterDetailsDao _clusterDetailsDao;
@Inject
protected UserVmDetailsDao _uservmDetailsDao;
@Inject
protected ConfigurationDao _configDao;
@ -398,6 +405,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
VirtualMachineGuru<T> guru = getVmGuru(vm);
guru.finalizeExpunge(vm);
//remove the overcommit detials from the uservm details
_uservmDetailsDao.deleteDetails(vm.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Expunged " + vm);
@ -718,6 +727,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
long destHostId = dest.getHost().getId();
vm.setPodId(dest.getPod().getId());
Long cluster_id = dest.getCluster().getId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
vmProfile.setcpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setramOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {

View File

@ -43,13 +43,16 @@ public class VirtualMachineProfileImpl<T extends VMInstanceVO> implements Virtua
T _vm;
ServiceOfferingVO _offering;
VMTemplateVO _template;
UserVmDetailVO _userVmDetails;
Map<Param, Object> _params;
List<NicProfile> _nics = new ArrayList<NicProfile>();
List<VolumeTO> _disks = new ArrayList<VolumeTO>();
StringBuilder _bootArgs = new StringBuilder();
Account _owner;
BootloaderType _bootloader;
Float cpuOvercommitRatio = 1.0f;
Float memoryOvercommitRatio = 1.0f;
VirtualMachine.Type _type;
public VirtualMachineProfileImpl(T vm, VMTemplateVO template, ServiceOfferingVO offering, Account owner, Map<Param, Object> params) {
@ -239,6 +242,25 @@ public class VirtualMachineProfileImpl<T extends VMInstanceVO> implements Virtua
public void setServiceOffering(ServiceOfferingVO offering) {
_offering = offering;
}
public void setcpuOvercommitRatio(Float cpuOvercommitRatio){
this.cpuOvercommitRatio= cpuOvercommitRatio;
}
public void setramOvercommitRatio(Float memoryOvercommitRatio){
this.memoryOvercommitRatio= memoryOvercommitRatio;
}
@Override
public Float getCpuOvercommitRatio(){
return this.cpuOvercommitRatio;
}
@Override
public Float getMemoryOvercommitRatio(){
return this.memoryOvercommitRatio;
}
}

View File

@ -0,0 +1,61 @@
package com.cloud.capacity;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.vm.UserVmDetailVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.UserVmDetailsDao;
import org.apache.log4j.Logger;
import org.junit.*;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Mockito.*;
public class CapacityManagerTest {
CapacityDao CDao = mock(CapacityDao.class);
ServiceOfferingDao SOfferingDao = mock(ServiceOfferingDao.class);
ClusterDetailsDao ClusterDetailsDao= mock(com.cloud.dc.ClusterDetailsDao.class);
CapacityManagerImpl capMgr;
private ServiceOfferingVO svo = mock(ServiceOfferingVO.class);
private CapacityVO cvo_cpu = mock(CapacityVO.class);
private CapacityVO cvo_ram = mock(CapacityVO.class);
private VirtualMachine vm = mock(VirtualMachine.class);
private ClusterDetailsVO cluster_detail_cpu = mock(ClusterDetailsVO.class);
private ClusterDetailsVO cluster_detail_ram = mock(ClusterDetailsVO.class);
public CapacityManagerImpl setUp() {
CapacityManagerImpl capMgr = new CapacityManagerImpl();
((CapacityManagerImpl)capMgr)._clusterDetailsDao= ClusterDetailsDao;
capMgr._capacityDao = CDao;
capMgr._offeringsDao = SOfferingDao;
return capMgr;
}
@Test
public void allocateCapacityTest(){
capMgr=setUp();
when(vm.getHostId()).thenReturn(1l);
when(vm.getServiceOfferingId()).thenReturn(2l);
when(SOfferingDao.findById(anyLong())).thenReturn(svo);
when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_CPU))).thenReturn(cvo_cpu);
when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_MEMORY))).thenReturn(cvo_ram);
when(cvo_cpu.getUsedCapacity()).thenReturn(500l);
when(cvo_cpu.getTotalCapacity()).thenReturn(2000l);
when(cvo_ram.getUsedCapacity()).thenReturn(3000l);
when(cvo_ram.getTotalCapacity()).thenReturn((long) 1024*1024*1024);
when(svo.getCpu()).thenReturn(500);
when(svo.getRamSize()).thenReturn(512);
when(cvo_cpu.getReservedCapacity()).thenReturn(0l);
when(cvo_ram.getReservedCapacity()).thenReturn(0l);
when(cluster_detail_ram.getValue()).thenReturn("1.5");
when(cluster_detail_cpu.getValue()).thenReturn("2");
when(CDao.update(anyLong(), isA(CapacityVO.class))).thenReturn(true) ;
boolean hasCapacity=capMgr.checkIfHostHasCapacity(1l,500,1024*1024*1024,false,2,2,false);
Assert.assertTrue(hasCapacity);
}
}

View File

@ -53,7 +53,6 @@ import com.cloud.storage.S3;
import com.cloud.storage.Swift;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.utils.Pair;
import com.cloud.utils.component.Manager;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.fsm.NoTransitionException;
@ -111,7 +110,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
*/
@Override
public Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState,
String managedstate) {
String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio) {
// TODO Auto-generated method stub
return null;
}

View File

@ -539,7 +539,12 @@ public class VmwareHelper {
cpuInfo.setReservation((long)cpuReservedMhz);
vmConfig.setCpuAllocation(cpuInfo);
if (cpuSpeedMHz != cpuReservedMhz){
vmConfig.setCpuHotAddEnabled(true);
}
if (memoryMB != memoryReserveMB){
vmConfig.setMemoryHotAddEnabled(true);
}
ResourceAllocationInfo memInfo = new ResourceAllocationInfo();
memInfo.setLimit((long)memoryMB);
memInfo.setReservation((long)memoryReserveMB);