From 23e54bb0f4c64735bb34413ea26ea1804c846ff5 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Fri, 22 Feb 2013 17:31:06 +0530 Subject: [PATCH] Cloudstack-711: Cpu and Ram Overcommit Ratio. --- .../cloud/agent/api/to/VirtualMachineTO.java | 15 +- .../com/cloud/resource/ResourceService.java | 2 +- .../com/cloud/vm/VirtualMachineProfile.java | 6 + .../apache/cloudstack/api/ApiConstants.java | 2 + .../command/admin/cluster/AddClusterCmd.java | 27 ++ .../admin/cluster/UpdateClusterCmd.java | 27 +- .../api/response/ClusterResponse.java | 20 ++ .../baremetal/manager/BareMetalPlanner.java | 21 +- .../resource/LibvirtComputingResource.java | 16 +- .../hypervisor/kvm/resource/LibvirtVMDef.java | 8 + .../agent/manager/MockVmManagerImpl.java | 2 +- .../cloud/resource/AgentRoutingResource.java | 6 +- .../vmware/manager/VmwareManagerImpl.java | 12 - .../vmware/resource/VmwareResource.java | 44 +--- .../xen/resource/CitrixResourceBase.java | 8 +- .../xen/resource/XcpServerResource.java | 16 +- .../xen/resource/XenServer56FP1Resource.java | 6 +- .../allocator/impl/FirstFitAllocator.java | 16 +- server/src/com/cloud/api/ApiDBUtils.java | 7 + .../src/com/cloud/api/ApiResponseHelper.java | 16 +- .../com/cloud/capacity/CapacityManager.java | 4 +- .../cloud/capacity/CapacityManagerImpl.java | 72 ++++-- .../capacity/ComputeCapacityListener.java | 5 +- .../com/cloud/capacity/dao/CapacityDao.java | 14 +- .../cloud/capacity/dao/CapacityDaoImpl.java | 239 ++++++++++-------- .../src/com/cloud/deploy/FirstFitPlanner.java | 85 +++---- .../cloud/hypervisor/HypervisorGuruBase.java | 8 +- .../cloud/resource/ResourceManagerImpl.java | 18 +- .../cloud/vm/VirtualMachineManagerImpl.java | 14 + .../cloud/vm/VirtualMachineProfileImpl.java | 28 +- .../cloud/capacity/CapacityManagerTest.java | 61 +++++ .../resource/MockResourceManagerImpl.java | 3 +- .../hypervisor/vmware/util/VmwareHelper.java | 7 +- 33 files changed, 534 insertions(+), 301 deletions(-) create mode 100644 server/test/com/cloud/capacity/CapacityManagerTest.java diff --git a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java index 8f3f0eb39d6..bdd636e727b 100644 --- a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java @@ -28,7 +28,8 @@ public class VirtualMachineTO { private BootloaderType bootloader; Type type; int cpus; - Integer speed; + Integer minSpeed; + Integer maxSpeed; long minRam; long maxRam; String hostName; @@ -47,12 +48,13 @@ public class VirtualMachineTO { VolumeTO[] disks; NicTO[] nics; - public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { + public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer minSpeed, Integer maxSpeed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { this.id = id; this.name = instanceName; this.type = type; this.cpus = cpus; - this.speed = speed; + this.minSpeed = minSpeed; + this.maxSpeed = maxSpeed; this.minRam = minRam; this.maxRam = maxRam; this.bootloader = bootloader; @@ -101,10 +103,13 @@ public class VirtualMachineTO { this.cpus = cpus; } - public Integer getSpeed() { - return speed; + public Integer getMinSpeed() { + return minSpeed; } + public Integer getMaxSpeed() { + return maxSpeed; + } public boolean getLimitCpuUse() { return limitCpuUse; } diff --git a/api/src/com/cloud/resource/ResourceService.java b/api/src/com/cloud/resource/ResourceService.java index 1e77cc8a4e2..83718d485ca 100755 --- a/api/src/com/cloud/resource/ResourceService.java +++ b/api/src/com/cloud/resource/ResourceService.java @@ -71,7 +71,7 @@ public interface ResourceService { boolean deleteCluster(DeleteClusterCmd cmd); - Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate); + Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio); List discoverHosts(AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; diff --git a/api/src/com/cloud/vm/VirtualMachineProfile.java b/api/src/com/cloud/vm/VirtualMachineProfile.java index 0fab4436807..33a9171e732 100644 --- a/api/src/com/cloud/vm/VirtualMachineProfile.java +++ b/api/src/com/cloud/vm/VirtualMachineProfile.java @@ -136,4 +136,10 @@ public interface VirtualMachineProfile { BootloaderType getBootLoaderType(); Map getParameters(); + + Float getCpuOvercommitRatio(); + + Float getMemoryOvercommitRatio(); + + } diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index b12e4cd8b55..8b4bb98e06c 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -46,6 +46,7 @@ public class ApiConstants { public static final String COMPONENT = "component"; public static final String CPU_NUMBER = "cpunumber"; public static final String CPU_SPEED = "cpuspeed"; + public static final String CPU_OVERCOMMIT_RATIO="cpuovercommitratio"; public static final String CREATED = "created"; public static final String CUSTOMIZED = "customized"; public static final String DESCRIPTION = "description"; @@ -119,6 +120,7 @@ public class ApiConstants { public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; public static final String MEMORY = "memory"; + public static final String MEMORY_OVERCOMMIT_RATIO="memoryovercommitratio"; public static final String MODE = "mode"; public static final String NAME = "name"; public static final String METHOD_NAME = "methodname"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 912c396bf4f..7b1cd067eb1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -20,6 +20,10 @@ package org.apache.cloudstack.api.command.admin.cluster; import java.util.ArrayList; import java.util.List; +import com.cloud.exception.InvalidParameterValueException; +import org.apache.cloudstack.api.*; +import org.apache.log4j.Logger; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -81,6 +85,12 @@ public class AddClusterCmd extends BaseCmd { @Parameter(name = ApiConstants.VSM_IPADDRESS, type = CommandType.STRING, required = false, description = "the ipaddress of the VSM associated with this cluster") private String vsmipaddress; + @Parameter (name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false , description = "value of the cpu overcommit ratio, defaults to 1") + private String cpuovercommitRatio; + + @Parameter(name = ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false ,description = "value of the default ram overcommit ratio, defaults to 1") + private String memoryovercommitratio; + public String getVSMIpaddress() { return vsmipaddress; } @@ -147,9 +157,26 @@ public class AddClusterCmd extends BaseCmd { this.allocationState = allocationState; } + public Float getCpuOvercommitRatio (){ + if(cpuovercommitRatio != null){ + return Float.parseFloat(cpuovercommitRatio); + } + return 1.0f; + } + + public Float getMemoryOvercommitRaito (){ + if (memoryovercommitratio != null){ + return Float.parseFloat(memoryovercommitratio); + } + return 1.0f; + } + @Override public void execute(){ try { + if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) { + throw new InvalidParameterValueException("Cpu and ram overcommit ratios should not be less than 1"); + } List result = _resourceService.discoverCluster(this); ListResponse response = new ListResponse(); List clusterResponses = new ArrayList(); diff --git a/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index 058c7ebc952..95728dd184d 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -54,6 +54,13 @@ public class UpdateClusterCmd extends BaseCmd { @Parameter(name=ApiConstants.MANAGED_STATE, type=CommandType.STRING, description="whether this cluster is managed by cloudstack") private String managedState; + @Parameter(name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of cpu overcommit ratio") + private String cpuovercommitratio; + + @Parameter(name=ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of ram overcommit ratio") + private String memoryovercommitratio; + + public String getClusterName() { return clusterName; } @@ -100,6 +107,20 @@ public class UpdateClusterCmd extends BaseCmd { this.managedState = managedstate; } + public Float getCpuOvercommitRatio (){ + if(cpuovercommitratio != null){ + return Float.parseFloat(cpuovercommitratio); + } + return 1.0f; + } + + public Float getMemoryOvercommitRaito (){ + if (memoryovercommitratio != null){ + return Float.parseFloat(memoryovercommitratio); + } + return 1.0f; + } + @Override public void execute(){ Cluster cluster = _resourceService.getCluster(getId()); @@ -107,7 +128,11 @@ public class UpdateClusterCmd extends BaseCmd { throw new InvalidParameterValueException("Unable to find the cluster by id=" + getId()); } - Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate()); + if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) { + throw new InvalidParameterValueException("Cpu and ram overcommit ratios should be greater than one"); + } + + Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate(), getMemoryOvercommitRaito(), getCpuOvercommitRatio()); if (result != null) { ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false); clusterResponse.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/response/ClusterResponse.java b/api/src/org/apache/cloudstack/api/response/ClusterResponse.java index 551e530cc38..a90acde6145 100644 --- a/api/src/org/apache/cloudstack/api/response/ClusterResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ClusterResponse.java @@ -62,6 +62,12 @@ public class ClusterResponse extends BaseResponse { @SerializedName("capacity") @Param(description="the capacity of the Cluster", responseObject = CapacityResponse.class) private List capacitites; + @SerializedName("cpuovercommitratio") @Param(description = "The cpu overcommit ratio of the cluster") + private String cpuovercommitratio; + + @SerializedName("memoryovercommitratio") @Param (description = "The ram overcommit ratio of the cluster") + private String memoryovercommitratio; + public String getId() { return id; } @@ -149,4 +155,18 @@ public class ClusterResponse extends BaseResponse { public void setCapacitites(ArrayList arrayList) { this.capacitites = arrayList; } + public void setCpuovercommitratio(String cpuovercommitratio){ + this.cpuovercommitratio= cpuovercommitratio; + } + public void setRamovercommitratio (String memoryOvercommitRatio){ + this.memoryovercommitratio= memoryOvercommitRatio; + } + + public String getCpuovercommitratio (){ + return cpuovercommitratio; + } + + public String getRamovercommitratio (){ + return memoryovercommitratio; + } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java index fe51eb0918e..97b2840f419 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -23,14 +23,13 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.*; +import com.cloud.dc.ClusterDetailsDao; import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; @@ -61,16 +60,14 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { @Inject protected ConfigurationDao _configDao; @Inject protected CapacityManager _capacityMgr; @Inject protected ResourceManager _resourceMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; @Override public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { VirtualMachine vm = vmProfile.getVirtualMachine(); - ServiceOffering offering = vmProfile.getServiceOffering(); + ServiceOffering offering = vmProfile.getServiceOffering(); String hostTag = null; - - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (vm.getLastHostId() != null && haVmTag == null) { @@ -126,7 +123,13 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { return null; } for (HostVO h : hosts) { - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOverprovisioningFactor, true)) { + long cluster_id = h.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio") ; + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + + if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { s_logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 9472ea2f313..f7d1b4ad940 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2921,12 +2921,24 @@ ServerResource { vm.addComp(guest); GuestResourceDef grd = new GuestResourceDef(); - grd.setMemorySize(vmTO.getMinRam() / 1024); + //check if overcommit should be considered. + if(vmTO.getMinSpeed() == vmTO.getMaxSpeed()){ + + + } + if (vmTO.getMinRam() != vmTO.getMaxRam()){ + grd.setMemBalloning(true); + grd.setCurrentMem((int)vmTO.getMinRam()/1024); + grd.setMemorySize((int)vmTO.getMaxRam()/1024); + } + else{ + grd.setMemorySize(vmTO.getMaxRam() / 1024); + } grd.setVcpuNum(vmTO.getCpus()); vm.addComp(grd); CpuTuneDef ctd = new CpuTuneDef(); - ctd.setShares(vmTO.getCpus() * vmTO.getSpeed()); + ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed()); vm.addComp(ctd); FeaturesDef features = new FeaturesDef(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index acfd9cf1fe8..5ab37702ed6 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -116,6 +116,7 @@ public class LibvirtVMDef { private int _currentMem = -1; private String _memBacking; private int _vcpu = -1; + private boolean _memBalloning= false; public void setMemorySize(long mem) { _mem = mem; @@ -133,6 +134,10 @@ public class LibvirtVMDef { _vcpu = vcpu; } + public void setMemBalloning(boolean turnon){ + _memBalloning = turnon; + } + @Override public String toString() { StringBuilder resBuidler = new StringBuilder(); @@ -145,6 +150,9 @@ public class LibvirtVMDef { resBuidler.append("" + "<" + _memBacking + "/>" + "\n"); } + if (_memBalloning){ + resBuidler.append("\n" + "\n" + "\n"); + } if (_vcpu != -1) { resBuidler.append("" + _vcpu + "\n"); } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index 40a5b9846db..c0ccbe43978 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -348,7 +348,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public Answer startVM(StartCommand cmd, SimulatorInfo info) { VirtualMachineTO vm = cmd.getVirtualMachine(); - String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid()); + String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getMaxSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid()); if (result != null) { return new StartAnswer(cmd, result); } else { diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java index 721e5f70222..46df50c2133 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java @@ -184,7 +184,7 @@ public class AgentRoutingResource extends AgentStorageResource { throws IllegalArgumentException { VirtualMachineTO vmSpec = cmd.getVirtualMachine(); String vmName = vmSpec.getName(); - if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getSpeed() + this.usedCpu) || + if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getMaxSpeed() + this.usedCpu) || this.totalMem < (vmSpec.getMaxRam() + this.usedMem)) { return new StartAnswer(cmd, "Not enough resource to start the vm"); } @@ -199,9 +199,9 @@ public class AgentRoutingResource extends AgentStorageResource { return new StartAnswer(cmd, result.getDetails()); } - this.usedCpu += vmSpec.getCpus() * vmSpec.getSpeed(); + this.usedCpu += vmSpec.getCpus() * vmSpec.getMaxSpeed(); this.usedMem += vmSpec.getMaxRam(); - _runningVms.put(vmName, new Pair(Long.valueOf(vmSpec.getCpus() * vmSpec.getSpeed()), vmSpec.getMaxRam())); + _runningVms.put(vmName, new Pair(Long.valueOf(vmSpec.getCpus() * vmSpec.getMaxSpeed()), vmSpec.getMaxRam())); state = State.Running; } finally { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 70f98cc64b4..6b6bf19af79 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -138,10 +138,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw int _additionalPortRangeSize; int _routerExtraPublicNics = 2; - String _cpuOverprovisioningFactor = "1"; String _reserveCpu = "false"; - String _memOverprovisioningFactor = "1"; String _reserveMem = "false"; String _rootDiskController = DiskControllerType.ide.toString(); @@ -262,14 +260,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2); - _cpuOverprovisioningFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - if(_cpuOverprovisioningFactor == null || _cpuOverprovisioningFactor.isEmpty()) - _cpuOverprovisioningFactor = "1"; - - _memOverprovisioningFactor = _configDao.getValue(Config.MemOverprovisioningFactor.key()); - if(_memOverprovisioningFactor == null || _memOverprovisioningFactor.isEmpty()) - _memOverprovisioningFactor = "1"; - _reserveCpu = _configDao.getValue(Config.VmwareReserveCpu.key()); if(_reserveCpu == null || _reserveCpu.isEmpty()) _reserveCpu = "false"; @@ -507,9 +497,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw params.put("vmware.use.nexus.vswitch", _nexusVSwitchActive); params.put("service.console.name", _serviceConsoleName); params.put("management.portgroup.name", _managemetPortGroupName); - params.put("cpu.overprovisioning.factor", _cpuOverprovisioningFactor); params.put("vmware.reserve.cpu", _reserveCpu); - params.put("mem.overprovisioning.factor", _memOverprovisioningFactor); params.put("vmware.reserve.mem", _reserveMem); params.put("vmware.root.disk.controller", _rootDiskController); params.put("vmware.recycle.hung.wokervm", _recycleHungWorker); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 5cac253fc0c..f754c58fecf 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -289,10 +289,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch; protected boolean _nexusVSwitch = false; - protected float _cpuOverprovisioningFactor = 1; protected boolean _reserveCpu = false; - protected float _memOverprovisioningFactor = 1; protected boolean _reserveMem = false; protected boolean _recycleHungWorker = false; protected DiskControllerType _rootDiskController = DiskControllerType.ide; @@ -2089,10 +2087,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - assert (vmSpec.getSpeed() != null) && (rootDiskDataStoreDetails != null); - if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), - getReserveCpuMHz(vmSpec.getSpeed().intValue()), vmSpec.getLimitCpuUse(), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) { + assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null); + if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), + vmSpec.getMinSpeed(), vmSpec.getLimitCpuUse(),(int)(vmSpec.getMaxRam()/(1024*1024)), ramMb, + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) { throw new Exception("Failed to create VM. vmName: " + vmName); } } @@ -2122,10 +2120,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); - VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), - getReserveCpuMHz(vmSpec.getSpeed().intValue()), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse()); - + VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), + vmSpec.getMinSpeed(),(int) (vmSpec.getMaxRam()/(1024*1024)), ramMb, + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse()); + VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; int i = 0; int ideControllerKey = vmMo.getIDEDeviceControllerKey(); @@ -2375,22 +2373,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return validatedDetails; } - private int getReserveCpuMHz(int cpuMHz) { - if(this._reserveCpu) { - return (int)(cpuMHz / this._cpuOverprovisioningFactor); - } - - return 0; - } - - private int getReserveMemMB(int memMB) { - if(this._reserveMem) { - return (int)(memMB / this._memOverprovisioningFactor); - } - - return 0; - } + private NicTO[] sortNicsByDeviceId(NicTO[] nics) { List listForSort = new ArrayList(); @@ -4882,11 +4866,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name"); } - String value = (String) params.get("cpu.overprovisioning.factor"); - if(value != null) - _cpuOverprovisioningFactor = Float.parseFloat(value); - - value = (String) params.get("vmware.reserve.cpu"); + String value = (String) params.get("vmware.reserve.cpu"); if(value != null && value.equalsIgnoreCase("true")) _reserveCpu = true; @@ -4894,10 +4874,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(value != null && value.equalsIgnoreCase("true")) _recycleHungWorker = true; - value = (String) params.get("mem.overprovisioning.factor"); - if(value != null) - _memOverprovisioningFactor = Float.parseFloat(value); - value = (String) params.get("vmware.reserve.mem"); if(value != null && value.equalsIgnoreCase("true")) _reserveMem = true; diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 33ad18d0331..4a89806a49f 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -1114,13 +1114,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vm.setAffinity(conn, host); vm.removeFromOtherConfig(conn, "disks"); vm.setNameLabel(conn, vmSpec.getName()); - setMemory(conn, vm, vmSpec.getMinRam()); + setMemory(conn, vm, vmSpec.getMinRam(),vmSpec.getMaxRam()); vm.setVCPUsMax(conn, (long)vmSpec.getCpus()); vm.setVCPUsAtStartup(conn, (long)vmSpec.getCpus()); Map vcpuParams = new HashMap(); - Integer speed = vmSpec.getSpeed(); + Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; //cpu_weight @@ -3253,8 +3253,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException { - vm.setMemoryLimits(conn, memsize, memsize, memsize, memsize); + protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException { + vm.setMemoryLimits(conn, maxMemsize, maxMemsize, minMemsize, maxMemsize); } private void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException { diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java index 0ce91bc58e2..7a958708e76 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java @@ -72,17 +72,17 @@ public class XcpServerResource extends CitrixResourceBase { } @Override - protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException { + protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException { vm.setMemoryStaticMin(conn, 33554432L); - vm.setMemoryDynamicMin(conn, 33554432L); - vm.setMemoryDynamicMax(conn, 33554432L); + //vm.setMemoryDynamicMin(conn, 33554432L); + //vm.setMemoryDynamicMax(conn, 33554432L); vm.setMemoryStaticMax(conn, 33554432L); - vm.setMemoryStaticMax(conn, memsize); - vm.setMemoryDynamicMax(conn, memsize); - vm.setMemoryDynamicMin(conn, memsize); - vm.setMemoryStaticMin(conn, memsize); + //vm.setMemoryStaticMax(conn, maxMemsize ); + vm.setMemoryDynamicMax(conn, maxMemsize ); + vm.setMemoryDynamicMin(conn, minMemsize ); + //vm.setMemoryStaticMin(conn, maxMemsize ); } @@ -99,7 +99,7 @@ public class XcpServerResource extends CitrixResourceBase { return answer; } catch (Exception ex) { s_logger.warn("Failed to get network usage stats due to ", ex); - return new NetworkUsageAnswer(cmd, ex); + return new NetworkUsageAnswer(cmd, ex); } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java index 58b8a035171..7040311d04e 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java @@ -136,9 +136,9 @@ public class XenServer56FP1Resource extends XenServer56Resource { record.nameLabel = vmSpec.getName(); record.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; record.actionsAfterShutdown = Types.OnNormalExit.DESTROY; - record.memoryDynamicMax = vmSpec.getMinRam(); + record.memoryDynamicMax = vmSpec.getMaxRam(); record.memoryDynamicMin = vmSpec.getMinRam(); - record.memoryStaticMax = vmSpec.getMinRam(); + record.memoryStaticMax = vmSpec.getMaxRam(); record.memoryStaticMin = vmSpec.getMinRam(); record.VCPUsMax = (long) vmSpec.getCpus(); record.VCPUsAtStartup = (long) vmSpec.getCpus(); @@ -152,7 +152,7 @@ public class XenServer56FP1Resource extends XenServer56Resource { Map vcpuParams = new HashMap(); - Integer speed = vmSpec.getSpeed(); + Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; // cpu_weight diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index a25e4014dea..0091e43cab8 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -26,6 +26,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.org.Cluster; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -78,6 +82,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { @Inject GuestOSCategoryDao _guestOSCategoryDao = null; @Inject VMInstanceDao _vmInstanceDao = null; @Inject ResourceManager _resourceMgr; + @Inject ClusterDao _clusterDao; + @Inject ClusterDetailsDao _clusterDetailsDao; float _factor = 1; boolean _checkHvm = true; protected String _allocationAlgorithm = "random"; @@ -214,8 +220,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu(); boolean cpuFreqGood = host.getSpeed().intValue() >= offering.getSpeed(); int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, _factor, considerReservedCapacity); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + Cluster cluster = _clusterDao.findById(host.getClusterId()); + ClusterDetailsVO clusterDetailsCpuOvercommit = _clusterDetailsDao.findDetail(cluster.getId(),"cpuOvercommitRatio"); + ClusterDetailsVO clusterDetailsRamOvercommmt = _clusterDetailsDao.findDetail(cluster.getId(),"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); + + boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false,cpuOvercommitRatio,memoryOvercommitRatio, considerReservedCapacity); if (numCpusGood && cpuFreqGood && hostHasCapacity) { if (s_logger.isDebugEnabled()) { diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index c28daefa69a..ffee22fa1ae 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -318,6 +318,7 @@ public class ApiDBUtils { static AsyncJobDao _asyncJobDao; static HostDetailsDao _hostDetailsDao; static VMSnapshotDao _vmSnapshotDao; + static ClusterDetailsDao _clusterDetailsDao; @Inject private ManagementServer ms; @Inject public AsyncJobManager asyncMgr; @@ -418,6 +419,7 @@ public class ApiDBUtils { @Inject private SnapshotPolicyDao snapshotPolicyDao; @Inject private AsyncJobDao asyncJobDao; @Inject private HostDetailsDao hostDetailsDao; + @Inject private ClusterDetailsDao clusterDetailsDao; @Inject private VMSnapshotDao vmSnapshotDao; @PostConstruct void init() { @@ -517,6 +519,7 @@ public class ApiDBUtils { _snapshotPolicyDao = snapshotPolicyDao; _asyncJobDao = asyncJobDao; _hostDetailsDao = hostDetailsDao; + _clusterDetailsDao = clusterDetailsDao; _vmSnapshotDao = vmSnapshotDao; // Note: stats collector should already have been initialized by this time, otherwise a null instance is returned _statsCollector = StatsCollector.getInstance(); @@ -681,6 +684,10 @@ public class ApiDBUtils { return _clusterDao.findById(clusterId); } + public static ClusterDetailsVO findClusterDetails(long clusterId, String name){ + return _clusterDetailsDao.findDetail(clusterId,name); + } + public static DiskOfferingVO findDiskOfferingById(Long diskOfferingId) { return _diskOfferingDao.findByIdIncludingRemoved(diskOfferingId); } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 41cf96e44d1..eafee8a2a4a 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -690,14 +690,12 @@ public class ApiResponseHelper implements ResponseGenerator { if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null); Set capacityResponses = new HashSet(); - float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor(); - for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityType(capacity.getCapacityType()); capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { - capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor))); + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity()))); } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, pod.getId(), null); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); @@ -827,12 +825,15 @@ public class ApiResponseHelper implements ResponseGenerator { clusterResponse.setClusterType(cluster.getClusterType().toString()); clusterResponse.setAllocationState(cluster.getAllocationState().toString()); clusterResponse.setManagedState(cluster.getManagedState().toString()); + String cpuOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"cpuOvercommitRatio").getValue(); + String memoryOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"memoryOvercommitRatio").getValue(); + clusterResponse.setCpuovercommitratio(cpuOvercommitRatio); + clusterResponse.setRamovercommitratio(memoryOvercommitRatio); if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId()); Set capacityResponses = new HashSet(); - float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor(); for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); @@ -840,8 +841,11 @@ public class ApiResponseHelper implements ResponseGenerator { capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { - capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor))); - } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(cpuOvercommitRatio)))); + }else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY){ + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(memoryOvercommitRatio)))); + } + else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, null, cluster.getId()); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity()); diff --git a/server/src/com/cloud/capacity/CapacityManager.java b/server/src/com/cloud/capacity/CapacityManager.java index 656e744ecf7..bdd9ccd155b 100755 --- a/server/src/com/cloud/capacity/CapacityManager.java +++ b/server/src/com/cloud/capacity/CapacityManager.java @@ -39,8 +39,8 @@ public interface CapacityManager extends Manager { * @param ram required RAM * @param cpuOverprovisioningFactor factor to apply to the actual host cpu */ - boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity); - + boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity); + void updateCapacityForHost(HostVO host); /** diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 74152ff9c39..292ef0abd5c 100755 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -28,6 +28,12 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.DataCenter; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.resource.ResourceState; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -119,26 +125,25 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Inject protected UserVmDao _userVMDao; + @Inject + ClusterDetailsDao _clusterDetailsDao; + @Inject + ClusterDao _clusterDao; private int _vmCapacityReleaseInterval; private ScheduledExecutorService _executor; private boolean _stopped; long _extraBytesPerVolume = 0; private float _storageOverProvisioningFactor = 1.0f; - private float _cpuOverProvisioningFactor = 1.0f; @Override public boolean configure(String name, Map params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); _storageOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.StorageOverprovisioningFactor.key()), 1.0f); - _cpuOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.CPUOverprovisioningFactor.key()), 1.0f); - if (_cpuOverProvisioningFactor < 1.0f) { - _cpuOverProvisioningFactor = 1.0f; - } _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("HostCapacity-Checker")); VirtualMachine.State.getStateMachine().registerListener(this); _agentManager.registerForHostEvents(new StorageCapacityListener(_capacityDao, _storageOverProvisioningFactor), true, false, false); - _agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this, _cpuOverProvisioningFactor), true, false, false); + _agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this), true, false, false); return true; } @@ -163,7 +168,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_MEMORY); - + Long clusterId=null; + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + clusterId= host.getClusterId(); + } if (capacityCpu == null || capacityMemory == null || svo == null) { return false; } @@ -172,9 +181,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, try { txn.start(); - int vmCPU = svo.getCpu() * svo.getSpeed(); - long vmMem = svo.getRamSize() * 1024L * 1024L; - capacityCpu = _capacityDao.lockRow(capacityCpu.getId(), true); capacityMemory = _capacityDao.lockRow(capacityMemory.getId(), true); @@ -183,13 +189,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMemory.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue()); + int vmCPU = (int) (svo.getCpu() * svo.getSpeed()); + long vmMem = (long) (svo.getRamSize() * 1024L * 1024L); + long actualTotalMem = capacityMemory.getTotalCapacity(); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); } - long totalMem = capacityMemory.getTotalCapacity(); + if (!moveFromReserved) { /* move resource from used */ @@ -241,6 +252,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, public void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost) { long hostId = vm.getHostId(); + HostVO host = _hostDao.findById(hostId); + long clusterId = host.getClusterId(); + float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue()); ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); @@ -251,11 +266,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return; } - int cpu = svo.getCpu() * svo.getSpeed(); - long ram = svo.getRamSize() * 1024L * 1024L; + int cpu = (int) (svo.getCpu() * svo.getSpeed()); + long ram = (long) (svo.getRamSize() * 1024L * 1024L); - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); Transaction txn = Transaction.currentTxn(); @@ -269,11 +282,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + long actualTotalMem = capacityMem.getTotalCapacity(); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } - long totalMem = capacityMem.getTotalCapacity(); long freeCpu = totalCpu - (reservedCpu + usedCpu); long freeMem = totalMem - (reservedMem + usedMem); @@ -325,12 +339,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } @Override - public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity) { + public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio,float memoryOvercommitRatio, boolean considerReservedCapacity) { boolean hasCapacity = false; if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram - + " , cpuOverprovisioningFactor: " + cpuOverprovisioningFactor); + + " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); } CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); @@ -356,13 +370,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + long actualTotalMem = capacityMem.getTotalCapacity(); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio ); + long totalMem = (long) (actualTotalMem *memoryOvercommitRatio ); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } - long totalMem = capacityMem.getTotalCapacity(); - String failureReason = ""; if (checkFromReservedCapacity) { long freeCpu = reservedCpu; @@ -695,10 +709,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU); List capacityVOCpus = _capacityDao.search(capacitySC, null); + Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"cpuOvercommitRatio").getValue()); + Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"memoryOvercommitRatio").getValue()); if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { CapacityVO CapacityVOCpu = capacityVOCpus.get(0); - long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor); + long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio); if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu) || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) { CapacityVOCpu.setTotalCapacity(newTotalCpu); @@ -713,7 +729,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); } else { CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, - (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor), + (long) (server.getCpus().longValue() * server.getSpeed().longValue()), CapacityVO.CAPACITY_TYPE_CPU); _capacityDao.persist(capacity); } @@ -727,7 +743,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (capacityVOMems != null && !capacityVOMems.isEmpty()) { CapacityVO CapacityVOMem = capacityVOMems.get(0); - long newTotalMem = server.getTotalMemory(); + long newTotalMem = (long)((server.getTotalMemory())* memoryOvercommitRatio); if (CapacityVOMem.getTotalCapacity() <= newTotalMem || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() <= newTotalMem)) { CapacityVOMem.setTotalCapacity(newTotalMem); diff --git a/server/src/com/cloud/capacity/ComputeCapacityListener.java b/server/src/com/cloud/capacity/ComputeCapacityListener.java index 8ea695ad072..16e154a80a6 100755 --- a/server/src/com/cloud/capacity/ComputeCapacityListener.java +++ b/server/src/com/cloud/capacity/ComputeCapacityListener.java @@ -42,12 +42,11 @@ public class ComputeCapacityListener implements Listener { public ComputeCapacityListener(CapacityDao _capacityDao, - CapacityManager _capacityMgr, - float _overProvisioningFactor) { + CapacityManager _capacityMgr + ) { super(); this._capacityDao = _capacityDao; this._capacityMgr = _capacityMgr; - this._cpuOverProvisioningFactor = _overProvisioningFactor; } diff --git a/server/src/com/cloud/capacity/dao/CapacityDao.java b/server/src/com/cloud/capacity/dao/CapacityDao.java index 0c0723b837e..0132f69cd50 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDao.java +++ b/server/src/com/cloud/capacity/dao/CapacityDao.java @@ -26,20 +26,20 @@ import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); - List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor); - List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor); + List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); + List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId); List findByClusterPodZone(Long zoneId, Long podId, Long clusterId); List findNonSharedStorageForClusterPodZone(Long zoneId,Long podId, Long clusterId); - Pair, Map> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor); + Pair, Map> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone); List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId); - List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor); - Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityType, float cpuOverprovisioningFactor); + List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType); + Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityType); List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, String resourceState); List listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit); void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState); - List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested, Float overProvFactor); -} + List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested); +} diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index 358261470e3..3aef29b954a 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -55,10 +55,9 @@ public class CapacityDaoImpl extends GenericDaoBase implements private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?"; private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?"; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE "; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - "AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE "; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster_details ON (cluster.id = cluster_details.cluster_id ) WHERE "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value ) - used_capacity + reserved_capacity) >= ? AND capacity.cluster_id IN (SELECT distinct capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id ) WHERE "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value) - used_capacity + reserved_capacity) >= ?) "; private final SearchBuilder _hostIdTypeSearch; private final SearchBuilder _hostOrPoolIdSearch; @@ -66,47 +65,54 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Inject protected StoragePoolDao _storagePoolDao; - private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " + - "AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " + - "JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0"; + private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT host_capacity.host_id FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id = cluster_details.cluster_id) AND host.type = ? AND cluster_details.name='cpuOvercommitRatio' AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity) >= ? and host_capacity.capacity_type = '1' " + + " AND host_capacity.host_id IN (SELECT capacity.host_id FROM `cloud`.`op_host_capacity` capacity JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id= cluster_details.cluster_id) where capacity_type='0' AND cluster_details.name='memoryOvercommitRatio' AND ((total_capacity* cluster_details.value) - used_capacity ) >= ?)) "; - private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ; - private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity WHERE "; - private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + - " ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - " AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; - private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " + - " AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE "; + + private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; + + private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + + " ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id = cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? "; + + private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC "; + + private static final String ORDER_PODS_BY_AGGREGATE_OVERCOMMIT_CAPACITY ="SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE data_center_id=? AND capacity_type = ? AND cluster_details.name = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; private static final String LIST_CAPACITY_BY_RESOURCE_STATE = "SELECT capacity.data_center_id, sum(capacity.used_capacity), sum(capacity.reserved_quantity), sum(capacity.total_capacity), capacity_capacity_type "+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ - "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; - + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ + "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; + private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ + " capacity.capacity_type, capacity.data_center_id "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id, capacity_type order by percent desc limit "; - private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id, pod_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND pod_id is not null AND capacity_state='Enabled'"; + private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," + + " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," + + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," + + "capacity.capacity_type, capacity.data_center_id, pod_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' "; + private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id, capacity_type order by percent desc limit "; - private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND cluster_id is not null AND capacity_state='Enabled'"; + private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," + + " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," + + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," + + "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' "; + + private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id, capacity_type order by percent desc limit "; private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity` SET capacity_state = ? WHERE "; private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT cluster_id " + @@ -138,34 +144,34 @@ public class CapacityDaoImpl extends GenericDaoBase implements _allFieldsSearch.done(); } - + @Override - public List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested, Float overProvFactor){ + public List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested){ - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); + + + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1,compute_requested); + pstmt.setShort(2,capacityType); + pstmt.setFloat(3,disableThreshold); + pstmt.setLong(4,zoneId); - - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, compute_requested); - pstmt.setLong(2, zoneId); - pstmt.setShort(3, capacityType); - pstmt.setFloat(4, disableThreshold*overProvFactor); - - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - result.add(rs.getLong(1)); - } - return result; - } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); - } catch (Throwable e) { - throw new CloudRuntimeException("Caught: " + sql, e); - } - } + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } /*public static String preparePlaceHolders(int length) { StringBuilder builder = new StringBuilder(); @@ -242,6 +248,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements PreparedStatement pstmt = null; List result = new ArrayList(); + List resourceIdList = new ArrayList(); + switch(level){ case 1: // List all the capacities grouped by zone, capacity Type finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1); @@ -257,17 +265,21 @@ public class CapacityDaoImpl extends GenericDaoBase implements } if (zoneId != null){ - finalQuery.append(" AND data_center_id="+zoneId); + finalQuery.append(" AND data_center_id = ?" ); + resourceIdList.add(zoneId); } if (podId != null){ - finalQuery.append(" AND pod_id="+podId); + finalQuery.append(" AND pod_id = ?" ); + resourceIdList.add(podId); } if (clusterId != null){ - finalQuery.append(" AND cluster_id="+clusterId); + finalQuery.append(" AND cluster_id = ?" ); + resourceIdList.add(clusterId ); } if (capacityType != null){ - finalQuery.append(" AND capacity_type="+capacityType); - } + finalQuery.append(" AND capacity_type = ?"); + resourceIdList.add(capacityType.longValue() ); + } switch(level){ case 1: // List all the capacities grouped by zone, capacity Type @@ -283,12 +295,16 @@ public class CapacityDaoImpl extends GenericDaoBase implements break; } - finalQuery.append(limit.toString()); - + finalQuery.append("?"); + resourceIdList.add((long) limit); + try { - pstmt = txn.prepareAutoCloseStatement(finalQuery.toString()); + pstmt = txn.prepareAutoCloseStatement(finalQuery.toString()); + for (int i = 0; i < resourceIdList.size(); i++){ + pstmt.setLong(1+i, resourceIdList.get(i)); + } ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { + while (rs.next()) { SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getFloat(3), (short)rs.getLong(4), rs.getLong(5), level != 1 ? rs.getLong(6): null, @@ -390,8 +406,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); + public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){ + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -414,11 +430,11 @@ public class CapacityDaoImpl extends GenericDaoBase implements pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, id); pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); - pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setString(3,"cpuOvercommitRatio"); pstmt.setLong(4, requiredCpu); pstmt.setLong(5, id); pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); - pstmt.setFloat(7, 1); + pstmt.setString(7,"memoryOvercommitRatio"); pstmt.setLong(8, requiredRam); ResultSet rs = pstmt.executeQuery(); @@ -435,8 +451,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override - public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); + public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){ + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -445,9 +461,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, clusterId); pstmt.setString(2, hostType); - pstmt.setFloat(3, cpuOverprovisioningFactor); - pstmt.setLong(4, requiredCpu); - pstmt.setLong(5, requiredRam); + pstmt.setLong(3, requiredCpu); + pstmt.setLong(4, requiredRam); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { @@ -629,31 +644,44 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ + public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone){ Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map clusterCapacityMap = new HashMap(); + StringBuilder sql = new StringBuilder(); + if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY) { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); + } + else { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1); + } - StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); if(isZone){ - sql.append("data_center_id = ?"); + sql.append(" data_center_id = ?"); }else{ - sql.append("pod_id = ?"); + sql.append(" pod_id = ?"); } - sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2); + if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY){ + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2); + } + else { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2); + } + try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); - if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ - pstmt.setFloat(1, cpuOverprovisioningFactor); - pstmt.setFloat(4, cpuOverprovisioningFactor); - }else{ - pstmt.setFloat(1, 1); - pstmt.setFloat(4, 1); + pstmt.setLong(1, id); + pstmt.setShort(2,capacityTypeForOrdering); + + if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_CPU){ + pstmt.setString(3,"cpuOvercommitRatio"); } - pstmt.setLong(2, id); - pstmt.setShort(3, capacityTypeForOrdering); + else if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_MEMORY){ + pstmt.setString(3,"memoryOvercommitRatio"); + } + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); @@ -669,22 +697,25 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor) { + public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) { Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); - StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITIES); + StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE); + sql.append("AND capacity.pod_id IN ("); + sql.append(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE); + sql.append(")"); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, zoneId); pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); - pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setString(3, "cpuOvercommitRatio"); pstmt.setLong(4, requiredCpu); pstmt.setLong(5, zoneId); pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); - pstmt.setFloat(7, 1); + pstmt.setString(7,"memoryOvercommitRatio" ); pstmt.setLong(8, requiredRam); ResultSet rs = pstmt.executeQuery(); @@ -700,26 +731,22 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering, float cpuOverprovisioningFactor) { + public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering) { Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map podCapacityMap = new HashMap(); - + StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(2, zoneId); pstmt.setShort(3, capacityTypeForOrdering); - + if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ - pstmt.setFloat(1, cpuOverprovisioningFactor); - pstmt.setFloat(4, cpuOverprovisioningFactor); - }else{ - pstmt.setFloat(1, 1); - pstmt.setFloat(4, 1); + pstmt.setString(3, "cpuOvercommitRatio"); } - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long podId = rs.getLong(1); @@ -737,7 +764,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) { Transaction txn = Transaction.currentTxn(); - StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); + StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); List resourceIdList = new ArrayList(); if (dcId != null){ diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index b452da06807..4933467bd8f 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -28,6 +28,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import com.cloud.dc.*; import org.apache.log4j.Logger; import com.cloud.agent.manager.allocator.HostAllocator; @@ -38,11 +39,6 @@ import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; @@ -100,6 +96,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Inject protected AccountManager _accountMgr; @Inject protected StorageManager _storageMgr; @Inject DataStoreManager dataStoreMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; //@com.cloud.utils.component.Inject(adapter=StoragePoolAllocator.class) @Inject protected List _storagePoolAllocators; @@ -128,9 +125,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - if (s_logger.isDebugEnabled()) { s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm); @@ -199,7 +193,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); }else{ if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { - if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor, true)){ + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){ s_logger.debug("The last host of this VM is UP and has enough capacity"); s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); //search for storage under the zone, pod, cluster of the last host. @@ -287,12 +286,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { ServiceOffering offering = vmProfile.getServiceOffering(); int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - //list pods under this zone by cpu and ram capacity List prioritizedPodIds = new ArrayList(); - Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor); + Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam); List podsWithCapacity = podCapacityInfo.first(); if(!podsWithCapacity.isEmpty()){ @@ -350,11 +346,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { DataCenter dc = _dcDao.findById(vm.getDataCenterId()); int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); //list clusters under this zone by cpu and ram capacity - Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor); + Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone); List prioritizedClusterIds = clusterCapacityInfo.first(); if(!prioritizedClusterIds.isEmpty()){ if(avoid.getClustersToAvoid() != null){ @@ -468,30 +462,30 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { // For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation. for(short capacity : capacityList){ - - if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ - return; + + if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ + return; + } + if (capacity == Capacity.CAPACITY_TYPE_CPU) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), + capacityThresholdMap.get(capacity), cpu_requested); } - - if (capacity == Capacity.CAPACITY_TYPE_CPU){ - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor()); - }else{ + else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) { clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), ram_requested, 1.0f);//Mem overprov not supported yet - } - - - if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ - // addToAvoid Set - avoid.addClusterList(clustersCrossingThreshold); - // Remove clusters crossing disabled threshold - clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + - " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); + capacityThresholdMap.get(capacity), ram_requested ); } + + if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ + // addToAvoid Set + avoid.addClusterList(clustersCrossingThreshold); + // Remove clusters crossing disabled threshold + clusterListForVmAllocation.removeAll(clustersCrossingThreshold); + + s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); + } + } } @@ -560,7 +554,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){ + protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){ //look at the aggregate available cpu and ram per cluster //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot @@ -574,14 +568,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor); - } - List clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone, cpuOverprovisioningFactor); + List clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone); if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); } - Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor); + Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone); List clusterIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM if (s_logger.isTraceEnabled()) { @@ -597,7 +588,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){ + + protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam){ //look at the aggregate available cpu and ram per pod //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot @@ -611,14 +603,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor); - } - List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType, cpuOverprovisioningFactor); + List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType); if (s_logger.isTraceEnabled()) { s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); } - Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType, cpuOverprovisioningFactor); + Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType); List podIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM if (s_logger.isTraceEnabled()) { diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java index e158962aa11..efe93966fe9 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java @@ -76,9 +76,11 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis ServiceOffering offering = vmProfile.getServiceOffering(); VirtualMachine vm = vmProfile.getVirtualMachine(); - - VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), offering.getSpeed(), - offering.getRamSize() * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword()); + Long minMemory = (long) (offering.getRamSize()/vmProfile.getCpuOvercommitRatio()); + int minspeed= (int)(offering.getSpeed()/vmProfile.getMemoryOvercommitRatio()); + int maxspeed = (offering.getSpeed()); + VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), minspeed, maxspeed, + minMemory * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword()); to.setBootArgs(vmProfile.getBootArgs()); List nicProfiles = vmProfile.getNics(); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 98044fb5968..0df1563f9c7 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -30,6 +30,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.*; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; @@ -72,12 +73,6 @@ import com.cloud.cluster.ManagementServerNode; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenterIpAddressVO; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.dc.PodCluster; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; @@ -483,6 +478,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, clusterId = cluster.getId(); result.add(cluster); + ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", Float.toString(cmd.getCpuOvercommitRatio())); + ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", Float.toString(cmd.getMemoryOvercommitRaito())); + _clusterDetailsDao.persist(cluster_detail_cpu); + _clusterDetailsDao.persist(cluster_detail_ram); + if (clusterType == Cluster.ClusterType.CloudManaged) { return result; } @@ -494,6 +494,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, details.put("password", password); _clusterDetailsDao.persist(cluster.getId(), details); + _clusterDetailsDao.persist(cluster_detail_cpu); + _clusterDetailsDao.persist(cluster_detail_ram); + + boolean success = false; try { try { @@ -1061,7 +1065,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override @DB public Cluster updateCluster(Cluster clusterToUpdate, String clusterType, - String hypervisor, String allocationState, String managedstate) { + String hypervisor, String allocationState, String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio) { ClusterVO cluster = (ClusterVO) clusterToUpdate; // Verify cluster information and update the cluster if needed diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index 2b70ff68914..f8448c0ac8f 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -37,6 +37,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import com.cloud.dc.*; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -76,6 +77,7 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; +import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; @@ -157,6 +159,7 @@ import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotManager; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.dao.UserVmDetailsDao; @Local(value = VirtualMachineManager.class) public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, Listener { @@ -232,6 +235,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Inject protected VMSnapshotManager _vmSnapshotMgr = null; + @Inject + protected ClusterDetailsDao _clusterDetailsDao; + @Inject + protected UserVmDetailsDao _uservmDetailsDao; @Inject protected ConfigurationDao _configDao; @@ -398,6 +405,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); + //remove the overcommit detials from the uservm details + _uservmDetailsDao.deleteDetails(vm.getId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Expunged " + vm); @@ -718,6 +727,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long destHostId = dest.getHost().getId(); vm.setPodId(dest.getPod().getId()); + Long cluster_id = dest.getCluster().getId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + vmProfile.setcpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue())); + vmProfile.setramOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue())); try { if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) { diff --git a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java index eb9e5ad29c2..24f44cb07ac 100644 --- a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java @@ -43,13 +43,16 @@ public class VirtualMachineProfileImpl implements Virtua T _vm; ServiceOfferingVO _offering; VMTemplateVO _template; + UserVmDetailVO _userVmDetails; Map _params; List _nics = new ArrayList(); List _disks = new ArrayList(); StringBuilder _bootArgs = new StringBuilder(); Account _owner; BootloaderType _bootloader; - + Float cpuOvercommitRatio = 1.0f; + Float memoryOvercommitRatio = 1.0f; + VirtualMachine.Type _type; public VirtualMachineProfileImpl(T vm, VMTemplateVO template, ServiceOfferingVO offering, Account owner, Map params) { @@ -239,6 +242,25 @@ public class VirtualMachineProfileImpl implements Virtua public void setServiceOffering(ServiceOfferingVO offering) { _offering = offering; } - - + + public void setcpuOvercommitRatio(Float cpuOvercommitRatio){ + this.cpuOvercommitRatio= cpuOvercommitRatio; + + } + + public void setramOvercommitRatio(Float memoryOvercommitRatio){ + this.memoryOvercommitRatio= memoryOvercommitRatio; + + } + @Override + public Float getCpuOvercommitRatio(){ + return this.cpuOvercommitRatio; + } + + @Override + public Float getMemoryOvercommitRatio(){ + return this.memoryOvercommitRatio; + } + + } diff --git a/server/test/com/cloud/capacity/CapacityManagerTest.java b/server/test/com/cloud/capacity/CapacityManagerTest.java new file mode 100644 index 00000000000..381d401e188 --- /dev/null +++ b/server/test/com/cloud/capacity/CapacityManagerTest.java @@ -0,0 +1,61 @@ +package com.cloud.capacity; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.vm.UserVmDetailVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDetailsDao; +import org.apache.log4j.Logger; +import org.junit.*; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.Mockito.*; + + +public class CapacityManagerTest { + CapacityDao CDao = mock(CapacityDao.class); + ServiceOfferingDao SOfferingDao = mock(ServiceOfferingDao.class); + ClusterDetailsDao ClusterDetailsDao= mock(com.cloud.dc.ClusterDetailsDao.class); + CapacityManagerImpl capMgr; + private ServiceOfferingVO svo = mock(ServiceOfferingVO.class); + private CapacityVO cvo_cpu = mock(CapacityVO.class); + private CapacityVO cvo_ram = mock(CapacityVO.class); + private VirtualMachine vm = mock(VirtualMachine.class); + private ClusterDetailsVO cluster_detail_cpu = mock(ClusterDetailsVO.class); + private ClusterDetailsVO cluster_detail_ram = mock(ClusterDetailsVO.class); + + public CapacityManagerImpl setUp() { + CapacityManagerImpl capMgr = new CapacityManagerImpl(); + ((CapacityManagerImpl)capMgr)._clusterDetailsDao= ClusterDetailsDao; + capMgr._capacityDao = CDao; + capMgr._offeringsDao = SOfferingDao; + return capMgr; + } + + @Test + public void allocateCapacityTest(){ + capMgr=setUp(); + when(vm.getHostId()).thenReturn(1l); + when(vm.getServiceOfferingId()).thenReturn(2l); + when(SOfferingDao.findById(anyLong())).thenReturn(svo); + when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_CPU))).thenReturn(cvo_cpu); + when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_MEMORY))).thenReturn(cvo_ram); + when(cvo_cpu.getUsedCapacity()).thenReturn(500l); + when(cvo_cpu.getTotalCapacity()).thenReturn(2000l); + when(cvo_ram.getUsedCapacity()).thenReturn(3000l); + when(cvo_ram.getTotalCapacity()).thenReturn((long) 1024*1024*1024); + when(svo.getCpu()).thenReturn(500); + when(svo.getRamSize()).thenReturn(512); + when(cvo_cpu.getReservedCapacity()).thenReturn(0l); + when(cvo_ram.getReservedCapacity()).thenReturn(0l); + when(cluster_detail_ram.getValue()).thenReturn("1.5"); + when(cluster_detail_cpu.getValue()).thenReturn("2"); + when(CDao.update(anyLong(), isA(CapacityVO.class))).thenReturn(true) ; + boolean hasCapacity=capMgr.checkIfHostHasCapacity(1l,500,1024*1024*1024,false,2,2,false); + Assert.assertTrue(hasCapacity); + + } +} \ No newline at end of file diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java index 889318bcd46..e7ab8581581 100644 --- a/server/test/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java @@ -53,7 +53,6 @@ import com.cloud.storage.S3; import com.cloud.storage.Swift; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.fsm.NoTransitionException; @@ -111,7 +110,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana */ @Override public Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, - String managedstate) { + String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio) { // TODO Auto-generated method stub return null; } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 47ff8e20004..352c5847336 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -539,7 +539,12 @@ public class VmwareHelper { cpuInfo.setReservation((long)cpuReservedMhz); vmConfig.setCpuAllocation(cpuInfo); - + if (cpuSpeedMHz != cpuReservedMhz){ + vmConfig.setCpuHotAddEnabled(true); + } + if (memoryMB != memoryReserveMB){ + vmConfig.setMemoryHotAddEnabled(true); + } ResourceAllocationInfo memInfo = new ResourceAllocationInfo(); memInfo.setLimit((long)memoryMB); memInfo.setReservation((long)memoryReserveMB);