Bug 9539 - cpu.overprovisioning.factor does not work

Changes:
- Changed host allocators/planner  to use cpu.overprovisioning.factor
- Removed following: while adding a new host, we were setting the total_cpu in op_host_capacity to be actual_cpu * cpu.overprovisioning.factor. Now we set it to actual_cpu.
- ListCapacities response now calculates the total CPU as actual * cpu.overprovisioning.factor (This change does not add anything new - listCapacities was pulling total CPU from op_host_capacity DB earlier which had the cpu.overprovisioning.factor applied already. Now we need to apply it over the DB entry.)
- HostResponse has a new field: 'cpuWithOverprovisioning' that returns the cpu after applying the cpu.overprovisioning.factor

- Db Upgrade 222 to 224 now updates the total_cpu in op_host_capacity to be the actual_cpu for each Routing host.
This commit is contained in:
prachi 2011-04-22 17:54:36 -07:00
parent e6194b6e09
commit b84a7477f0
13 changed files with 4933 additions and 4803 deletions

View File

@ -80,6 +80,9 @@ public class HostResponse extends BaseResponse {
@SerializedName("cpuused") @Param(description="the amount of the host's CPU currently used")
private String cpuUsed;
@SerializedName("cpuwithoverprovisioning") @Param(description="the amount of the host's CPU after applying the cpu.overprovisioning.factor ")
private String cpuWithOverprovisioning;
@SerializedName("averageload") @Param(description="the cpu average load on the host")
private Long averageLoad;
@ -486,5 +489,13 @@ public class HostResponse extends BaseResponse {
public void setAllocationState(String allocationState) {
this.allocationState = allocationState;
}
}
public String getCpuWithOverprovisioning() {
return cpuWithOverprovisioning;
}
public void setCpuWithOverprovisioning(String cpuWithOverprovisioning) {
this.cpuWithOverprovisioning = cpuWithOverprovisioning;
}
}

View File

@ -2723,7 +2723,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, ResourceS
if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) {
CapacityVO CapacityVOCpu = capacityVOCpus.get(0);
long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor);
long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue());
if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu) || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) {
CapacityVOCpu.setTotalCapacity(newTotalCpu);
} else if ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity() > newTotalCpu) && (CapacityVOCpu.getUsedCapacity() < newTotalCpu)) {
@ -2736,7 +2736,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, ResourceS
_capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu);
} else {
CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, (long) (server.getCpus().longValue()
* server.getSpeed().longValue() * _cpuOverProvisioningFactor), CapacityVO.CAPACITY_TYPE_CPU);
* server.getSpeed().longValue()), CapacityVO.CAPACITY_TYPE_CPU);
_capacityDao.persist(capacity);
}

View File

@ -162,7 +162,7 @@ public class FirstFitAllocator implements HostAllocator {
boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false);
boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, _factor);
if (numCpusGood && hostHasCapacity) {
if (s_logger.isDebugEnabled()) {
@ -304,10 +304,6 @@ public class FirstFitAllocator implements HostAllocator {
Map<String, String> configs = _configDao.getConfiguration(params);
String opFactor = configs.get("cpu.overprovisioning.factor");
_factor = NumbersUtil.parseFloat(opFactor, 1);
//Over provisioning factor cannot be < 1. Reset to 1 in such cases
if (_factor < 1){
_factor = 1;
}
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
if (allocationAlgorithm != null && (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("firstfit"))) {

View File

@ -25,8 +25,10 @@ import java.util.Map;
import com.cloud.agent.AgentManager;
import com.cloud.async.AsyncJobManager;
import com.cloud.async.AsyncJobVO;
import com.cloud.configuration.Config;
import com.cloud.configuration.ConfigurationService;
import com.cloud.configuration.ResourceCount.ResourceType;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.AccountVlanMapVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
@ -103,6 +105,7 @@ import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.user.dao.UserStatisticsDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.vm.DomainRouterVO;
import com.cloud.vm.InstanceGroupVO;
@ -156,6 +159,7 @@ public class ApiDBUtils {
private static NetworkOfferingDao _networkOfferingDao;
private static NetworkDao _networkDao;
private static ConfigurationService _configMgr;
private static ConfigurationDao _configDao;
static {
_ms = (ManagementServer) ComponentLocator.getComponent(ManagementServer.Name);
@ -199,6 +203,7 @@ public class ApiDBUtils {
_securityGroupDao = locator.getDao(SecurityGroupDao.class);
_networkOfferingDao = locator.getDao(NetworkOfferingDao.class);
_networkDao = locator.getDao(NetworkDao.class);
_configDao = locator.getDao(ConfigurationDao.class);
// Note: stats collector should already have been initialized by this time, otherwise a null instance is returned
_statsCollector = StatsCollector.getInstance();
@ -569,5 +574,11 @@ public class ApiDBUtils {
public static Long getDedicatedNetworkDomain(long networkId) {
return _networkMgr.getDedicatedNetworkDomain(networkId);
}
public static float getCpuOverprovisioningFactor(){
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
return cpuOverprovisioningFactor;
}
}

View File

@ -522,6 +522,9 @@ public class ApiResponseHelper implements ResponseGenerator {
}
cpuAlloc = decimalFormat.format(((float) cpu / (float) (host.getCpus() * host.getSpeed())) * 100f) + "%";
hostResponse.setCpuAllocated(cpuAlloc);
String cpuWithOverprovisioning = new Float(host.getCpus() * host.getSpeed() * ApiDBUtils.getCpuOverprovisioningFactor()).toString();
hostResponse.setCpuWithOverprovisioning(cpuWithOverprovisioning);
}
// calculate cpu utilized
@ -2093,6 +2096,8 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor();
// collect all the capacity types, sum allocated/used and sum total...get one capacity number for each
for (Capacity capacity : hostCapacities) {
if (poolIdsToIgnore.contains(capacity.getHostOrPoolId())) {
@ -2110,11 +2115,17 @@ public class ApiResponseHelper implements ResponseGenerator {
Long totalCapacity = totalCapacityMap.get(key);
Long usedCapacity = usedCapacityMap.get(key);
//reset overprovisioning factor to 1
float overprovisioningFactor = 1;
if (capacityType == Capacity.CAPACITY_TYPE_CPU){
overprovisioningFactor = cpuOverprovisioningFactor;
}
if (totalCapacity == null) {
totalCapacity = new Long(capacity.getTotalCapacity());
totalCapacity = new Long((long)(capacity.getTotalCapacity() * overprovisioningFactor));
} else {
totalCapacity = new Long(capacity.getTotalCapacity() + totalCapacity);
totalCapacity = new Long((long)(capacity.getTotalCapacity() * overprovisioningFactor)) + totalCapacity;
}
if (usedCapacity == null) {
@ -2140,10 +2151,15 @@ public class ApiResponseHelper implements ResponseGenerator {
totalCapacity = totalCapacityMap.get(keyForPodTotal);
usedCapacity = usedCapacityMap.get(keyForPodTotal);
overprovisioningFactor = 1;
if (capacityType == Capacity.CAPACITY_TYPE_CPU){
overprovisioningFactor = cpuOverprovisioningFactor;
}
if (totalCapacity == null) {
totalCapacity = new Long(capacity.getTotalCapacity());
totalCapacity = new Long((long)(capacity.getTotalCapacity() * overprovisioningFactor));
} else {
totalCapacity = new Long(capacity.getTotalCapacity() + totalCapacity);
totalCapacity = new Long((long)(capacity.getTotalCapacity() * overprovisioningFactor)) + totalCapacity;
}
if (usedCapacity == null) {

View File

@ -31,5 +31,5 @@ public interface CapacityManager extends Manager {
void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost);
boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity);
boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor);
}

View File

@ -124,7 +124,13 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
long usedMem = capacityMemory.getUsedCapacity();
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMemory.getReservedCapacity();
long totalCpu = capacityCpu.getTotalCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
long totalCpu = (long)(actualTotalCpu * cpuOverprovisioningFactor);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
}
long totalMem = capacityMemory.getTotalCapacity();
if (!moveFromReserved) {
@ -153,12 +159,12 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
}
}
s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", total: " + totalCpu +
"; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + ",total: " + capacityCpu.getTotalCapacity() +
s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu +
"; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() +
"; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered);
s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem +
"; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() + ",total: " + capacityMemory.getTotalCapacity() +
"; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() +
"; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered);
_capacityDao.update(capacityCpu.getId(), capacityCpu);
@ -188,7 +194,10 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
}
int cpu = svo.getCpu() * svo.getSpeed();
long ram = svo.getRamSize() * 1024L * 1024L;
long ram = svo.getRamSize() * 1024L * 1024L;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
Transaction txn = Transaction.currentTxn();
@ -201,7 +210,11 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
long usedMem = capacityMem.getUsedCapacity();
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMem.getReservedCapacity();
long totalCpu = capacityCpu.getTotalCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
long totalCpu = (long)(actualTotalCpu * cpuOverprovisioningFactor);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
}
long totalMem = capacityMem.getTotalCapacity();
long freeCpu = totalCpu - (reservedCpu + usedCpu);
@ -236,13 +249,13 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
}
s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " +
reservedCpu + ", old total: " + totalCpu +
"; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + ", total: " + capacityCpu.getTotalCapacity() +
reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu +
"; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() +
"; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost);
s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " +
reservedMem + ", old total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " +
capacityMem.getReservedCapacity() + ", total: " + capacityMem.getTotalCapacity() + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost);
reservedMem + ", total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " +
capacityMem.getReservedCapacity() + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost);
_capacityDao.update(capacityCpu.getId(), capacityCpu);
_capacityDao.update(capacityMem.getId(), capacityMem);
@ -254,11 +267,11 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
}
@Override
public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity){
public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor){
boolean hasCapacity = false;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: "+ cpu + " and requested RAM: "+ ram);
s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: "+ cpu + " and requested RAM: "+ ram + " , cpuOverprovisioningFactor: "+cpuOverprovisioningFactor);
}
CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU);
@ -268,7 +281,12 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
long usedMem = capacityMem.getUsedCapacity();
long reservedCpu = capacityCpu.getReservedCapacity();
long reservedMem = capacityMem.getReservedCapacity();
long totalCpu = capacityCpu.getTotalCapacity();
long actualTotalCpu = capacityCpu.getTotalCapacity();
long totalCpu = (long)(actualTotalCpu * cpuOverprovisioningFactor);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
}
long totalMem = capacityMem.getTotalCapacity();
@ -318,7 +336,7 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
}
s_logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " +
reservedCpu + ", total: " + totalCpu +
reservedCpu + ", actual total: "+actualTotalCpu + ", total with overprovisioning: " + totalCpu +
"; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity);
s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + usedMem + ", reserved: " +
@ -330,7 +348,7 @@ public class CapacityManagerImpl implements CapacityManager , StateListener<Stat
", reservedMem: " + reservedMem + ", requested mem: " + ram);
} else {
s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + cpu +
", total cpu: " + totalCpu +
", actual total cpu: "+actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu +
", reservedMem: " + reservedMem + ", used Mem: " + usedMem + ", requested mem: " + ram + ", total Mem:" + totalMem);
}

View File

@ -28,6 +28,6 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
void clearStorageCapacities();
CapacityVO findByHostIdType(Long hostId, short capacityType);
void clearNonStorageCapacities2();
List<Long> orderClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType);
List<Long> orderClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor);
}

View File

@ -46,16 +46,16 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
private static final String CLEAR_NON_STORAGE_CAPACITIES2 = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type<>2 AND capacity_type<>3 AND capacity_type<>6 AND capacity_type<>0 AND capacity_type<>1"; //clear non-storage and non-secondary_storage capacities
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT cluster_id FROM `cloud`.`op_host_capacity` WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND (total_capacity - used_capacity + reserved_capacity) >= ? " +
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " +
"AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND (total_capacity - used_capacity + reserved_capacity) >= ?) " +
"ORDER BY (total_capacity - used_capacity + reserved_capacity) DESC";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) " +
"ORDER BY ((total_capacity * ?) - used_capacity + reserved_capacity) DESC";
private SearchBuilder<CapacityVO> _hostIdTypeSearch;
private SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " +
"AND a.total_capacity - a.used_capacity >= ? and a.capacity_type = 1) " +
"AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " +
"JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0";
public CapacityDaoImpl() {
@ -150,7 +150,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public List<Long> orderClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){
public List<Long> orderClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
@ -174,16 +174,22 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
pstmt.setLong(1, id);
if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
pstmt.setLong(3, requiredCpu);
pstmt.setLong(4, id);
pstmt.setShort(5, CapacityVO.CAPACITY_TYPE_MEMORY);
pstmt.setLong(6, requiredRam);
pstmt.setFloat(3, cpuOverprovisioningFactor);
pstmt.setLong(4, requiredCpu);
pstmt.setLong(5, id);
pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
pstmt.setFloat(7, 1);
pstmt.setLong(8, requiredRam);
pstmt.setFloat(9, cpuOverprovisioningFactor);
}else{
pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_MEMORY);
pstmt.setLong(3, requiredRam);
pstmt.setLong(4, id);
pstmt.setShort(5, CapacityVO.CAPACITY_TYPE_CPU);
pstmt.setLong(6, requiredCpu);
pstmt.setFloat(3, 1);
pstmt.setLong(4, requiredRam);
pstmt.setLong(5, id);
pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_CPU);
pstmt.setFloat(7, cpuOverprovisioningFactor);
pstmt.setLong(8, requiredCpu);
pstmt.setFloat(9, 1);
}
ResultSet rs = pstmt.executeQuery();
@ -200,7 +206,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
@Override
public List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){
public List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
@ -210,8 +216,9 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, clusterId);
pstmt.setString(2, hostType);
pstmt.setLong(3, requiredCpu);
pstmt.setLong(4, requiredRam);
pstmt.setFloat(3, cpuOverprovisioningFactor);
pstmt.setLong(4, requiredCpu);
pstmt.setLong(5, requiredRam);
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {

View File

@ -27,6 +27,8 @@ import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Pod;
@ -41,6 +43,7 @@ import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.Inject;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
@ -54,6 +57,7 @@ public class BareMetalPlanner implements DeploymentPlanner {
@Inject protected HostPodDao _podDao;
@Inject protected ClusterDao _clusterDao;
@Inject protected HostDao _hostDao;
@Inject protected ConfigurationDao _configDao;
@Inject protected CapacityManager _capacityMgr;
String _name;
@ -63,6 +67,10 @@ public class BareMetalPlanner implements DeploymentPlanner {
ServiceOffering offering = vmProfile.getServiceOffering();
String hostTag = null;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
if (vm.getLastHostId() != null) {
HostVO h = _hostDao.findById(vm.getLastHostId());
DataCenter dc = _dcDao.findById(h.getDataCenterId());
@ -110,7 +118,7 @@ public class BareMetalPlanner implements DeploymentPlanner {
for (HostVO h : hosts) {
if (h.getStatus() == Status.Up) {
if(_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false)){
if(_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOverprovisioningFactor)){
s_logger.debug("Find host " + h.getId() + " has enough capacity");
DataCenter dc = _dcDao.findById(h.getDataCenterId());
Pod pod = _podDao.findById(h.getPodId());

View File

@ -66,6 +66,7 @@ import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.Inject;
@ -113,6 +114,10 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
s_logger.debug("In FirstFitPlanner:: plan");
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
@ -163,7 +168,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
if (host.getStatus() == Status.Up && host.getHostAllocationState() == Host.HostAllocationState.Enabled) {
//check zone/pod/cluster are enabled
if(isEnabledForAllocation(vm.getDataCenterId(), vm.getPodId(), host.getClusterId())){
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true)){
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor)){
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: "+vm.getDataCenterId() +", pod: "+ vm.getPodId()+", cluster: "+ host.getClusterId());
//search for storage under the zone, pod, cluster of the last host.
@ -221,7 +226,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
HostPodVO pod = _podDao.findById(podIdSpecified);
if (pod != null) {
//list clusters under this pod by cpu and ram capacity
clusterList = listClustersByCapacity(podIdSpecified, cpu_requested, ram_requested, avoid, false);
clusterList = listClustersByCapacity(podIdSpecified, cpu_requested, ram_requested, avoid, false, cpuOverprovisioningFactor);
if(!clusterList.isEmpty()){
if(avoid.getClustersToAvoid() != null){
if (s_logger.isDebugEnabled()) {
@ -259,7 +264,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
//consider all clusters under this zone.
s_logger.debug("Searching all possible resources under this Zone: "+ plan.getDataCenterId());
//list clusters under this zone by cpu and ram capacity
List<Long> prioritizedClusterIds = listClustersByCapacity(plan.getDataCenterId(), cpu_requested, ram_requested, avoid, true);
List<Long> prioritizedClusterIds = listClustersByCapacity(plan.getDataCenterId(), cpu_requested, ram_requested, avoid, true, cpuOverprovisioningFactor);
if(!prioritizedClusterIds.isEmpty()){
if(avoid.getClustersToAvoid() != null){
if (s_logger.isDebugEnabled()) {
@ -438,7 +443,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
return prioritizedPods;
}
protected List<Long> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
protected List<Long> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){
//look at the aggregate available cpu and ram per cluster
//although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot
@ -451,8 +456,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
if("RAM".equalsIgnoreCase(capacityTypeToOrder)){
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
}
List<Long> clusterIdswithEnoughCapacity = _capacityDao.orderClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone);
if (s_logger.isDebugEnabled()) {
s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor);
}
List<Long> clusterIdswithEnoughCapacity = _capacityDao.orderClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone, cpuOverprovisioningFactor);
if (s_logger.isDebugEnabled()) {
s_logger.debug("ClusterId List having enough aggregate capacity: "+clusterIdswithEnoughCapacity );
}

File diff suppressed because it is too large Load Diff

View File

@ -66,6 +66,7 @@ public class Upgrade222to224 implements DbUpgrade {
updateUserStatsWithNetwork(conn);
dropIndexIfExists(conn);
fixBasicZoneNicCount(conn);
updateTotalCPUInOpHostCapacity(conn);
}
@Override
@ -290,4 +291,52 @@ public class Upgrade222to224 implements DbUpgrade {
throw new CloudRuntimeException("Unable to drop 'path' index for 'domain' table due to:", e);
}
}
private void updateTotalCPUInOpHostCapacity(Connection conn) {
PreparedStatement pstmt = null;
ResultSet rs = null;
PreparedStatement pstmtUpdate = null;
try {
// Load all Routing hosts
s_logger.debug("Updating total CPU capacity entries in op_host_capacity");
pstmt = conn.prepareStatement("SELECT id, cpus, speed FROM host WHERE type = 'Routing'");
rs = pstmt.executeQuery();
while (rs.next()) {
long hostId = rs.getLong(1);
int cpus = rs.getInt(2);
long speed = rs.getLong(3);
long totalCapacity = cpus * speed;
String updateSQL = "UPDATE op_host_capacity SET total_capacity = ? WHERE host_id = ? AND capacity_type = 1";
pstmtUpdate = conn.prepareStatement(updateSQL);
pstmtUpdate.setLong(1, totalCapacity);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.executeUpdate();
pstmtUpdate.close();
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to update the total host CPU capacity in Op_Host_capacity table", e);
} finally {
if (pstmtUpdate != null) {
try {
pstmtUpdate.close();
} catch (SQLException e) {
}
}
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
}
}
if (pstmt != null) {
try {
pstmt.close();
} catch (SQLException e) {
}
}
}
}
}