CPU to Memory weight based algorithm to order cluster (#10997)

* CPU to Memory weight based algorithm to order cluster
host.capacityType.to.order.clusters config will support new algorithm: COMBINED
which will work with host.capacityType.to.order.clusters.cputomemoryweight and capacity will be
computed based on CPU and memory both and using weight factor

* minor changes

* add unit tests

* update desc and add validation

* handle copilot review comments

* add log indicating chosen capacityType for ordering

---------

Co-authored-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
Manoj Kumar 2025-07-15 16:40:53 +05:30 committed by GitHub
parent fb6adacc51
commit e8ab0ae70a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 661 additions and 38 deletions

View File

@ -90,9 +90,11 @@ public class ApiConstants {
public static final String CONVERT_INSTANCE_HOST_ID = "convertinstancehostid";
public static final String CONVERT_INSTANCE_STORAGE_POOL_ID = "convertinstancepoolid";
public static final String ENABLED_REVOCATION_CHECK = "enabledrevocationcheck";
public static final String COMBINED_CAPACITY_ORDERING = "COMBINED";
public static final String CONTROLLER = "controller";
public static final String CONTROLLER_UNIT = "controllerunit";
public static final String COPY_IMAGE_TAGS = "copyimagetags";
public static final String CPU_OVERCOMMIT_RATIO = "cpuOvercommitRatio";
public static final String CSR = "csr";
public static final String PRIVATE_KEY = "privatekey";
public static final String DATASTORE_HOST = "datastorehost";
@ -124,6 +126,7 @@ public class ApiConstants {
public static final String CNI_CONFIG_DETAILS = "cniconfigdetails";
public static final String CNI_CONFIG_NAME = "cniconfigname";
public static final String COMPONENT = "component";
public static final String CPU = "CPU";
public static final String CPU_CORE_PER_SOCKET = "cpucorepersocket";
public static final String CPU_NUMBER = "cpunumber";
public static final String CPU_SPEED = "cpuspeed";
@ -344,6 +347,7 @@ public class ApiConstants {
public static final String MAX_BACKUPS = "maxbackups";
public static final String MAX_CPU_NUMBER = "maxcpunumber";
public static final String MAX_MEMORY = "maxmemory";
public static final String MEMORY_OVERCOMMIT_RATIO = "memoryOvercommitRatio";
public static final String MIN_CPU_NUMBER = "mincpunumber";
public static final String MIN_MEMORY = "minmemory";
public static final String MIGRATION_TYPE = "migrationtype";
@ -441,6 +445,7 @@ public class ApiConstants {
public static final String PUBLIC_END_PORT = "publicendport";
public static final String PUBLIC_ZONE = "publiczone";
public static final String PURGE_RESOURCES = "purgeresources";
public static final String RAM = "RAM";
public static final String REBALANCE = "rebalance";
public static final String RECEIVED_BYTES = "receivedbytes";
public static final String RECONNECT = "reconnect";

View File

@ -65,6 +65,12 @@ public interface ConfigurationManager {
"allow.non.rfc1918.compliant.ips", "Advanced", "false",
"Allows non-compliant RFC 1918 IPs for Shared, Isolated networks and VPCs", true, null);
ConfigKey<Float> HostCapacityTypeCpuMemoryWeight = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Float.class,
"host.capacityType.to.order.clusters.cputomemoryweight",
"0.5",
"Weight for CPU (as a value between 0 and 1) applied to compute capacity for Pods, Clusters and Hosts for COMBINED capacityType for ordering. Weight for RAM will be (1 - weight of CPU)",
true, ConfigKey.Scope.Global);
/**
* @param offering
* @return

View File

@ -30,7 +30,7 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
List<CapacityVO> listByHostIdTypes(Long hostId, List<Short> capacityTypes);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, boolean isZone);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType);
@ -48,7 +48,7 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
List<SummedCapacity> findFilteredCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, List<Long> hostIds, List<Long> poolIds);
List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType);
List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam);
Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityType);
@ -65,4 +65,10 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
float findClusterConsumption(Long clusterId, short capacityType, long computeRequested);
Pair<List<Long>, Map<Long, Double>> orderHostsByFreeCapacity(Long zoneId, Long clusterId, short capacityType);
List<CapacityVO> listHostCapacityByCapacityTypes(Long zoneId, Long clusterId, List<Short> capacityTypes);
List<CapacityVO> listPodCapacityByCapacityTypes(Long zoneId, List<Short> capacityTypes);
List<CapacityVO> listClusterCapacityByCapacityTypes(Long zoneId, Long podId, List<Short> capacityTypes);
}

View File

@ -684,7 +684,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) {
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, boolean isZone) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
@ -1068,7 +1068,65 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
@Override
public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) {
public List<CapacityVO> listHostCapacityByCapacityTypes(Long zoneId, Long clusterId, List<Short> capacityTypes) {
SearchBuilder<CapacityVO> sb = createSearchBuilder();
sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ);
sb.and("capacityTypes", sb.entity().getCapacityType(), SearchCriteria.Op.IN);
sb.and("capacityState", sb.entity().getCapacityState(), Op.EQ);
sb.done();
SearchCriteria<CapacityVO> sc = sb.create();
sc.setParameters("capacityState", "Enabled");
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
if (clusterId != null) {
sc.setParameters("clusterId", clusterId);
}
sc.setParameters("capacityTypes", capacityTypes.toArray());
return listBy(sc);
}
@Override
public List<CapacityVO> listPodCapacityByCapacityTypes(Long zoneId, List<Short> capacityTypes) {
SearchBuilder<CapacityVO> sb = createSearchBuilder();
sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
sb.and("capacityTypes", sb.entity().getCapacityType(), SearchCriteria.Op.IN);
sb.and("capacityState", sb.entity().getCapacityState(), Op.EQ);
sb.done();
SearchCriteria<CapacityVO> sc = sb.create();
sc.setParameters("capacityState", "Enabled");
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
sc.setParameters("capacityTypes", capacityTypes.toArray());
return listBy(sc);
}
@Override
public List<CapacityVO> listClusterCapacityByCapacityTypes(Long zoneId, Long podId, List<Short> capacityTypes) {
SearchBuilder<CapacityVO> sb = createSearchBuilder();
sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);
sb.and("capacityTypes", sb.entity().getCapacityType(), SearchCriteria.Op.IN);
sb.and("capacityState", sb.entity().getCapacityState(), Op.EQ);
sb.done();
SearchCriteria<CapacityVO> sc = sb.create();
sc.setParameters("capacityState", "Enabled");
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
if (podId != null) {
sc.setParameters("podId", podId);
}
sc.setParameters("capacityTypes", capacityTypes.toArray());
return listBy(sc);
}
@Override
public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();

View File

@ -204,6 +204,12 @@ SET `sort_key` = CASE
END;
-- End: Changes for Guest OS category cleanup
-- Update description for configuration: host.capacityType.to.order.clusters
UPDATE `cloud`.`configuration` SET
`description` = 'The host capacity type (CPU, RAM or COMBINED) is used by deployment planner to order clusters during VM resource allocation'
WHERE `name` = 'host.capacityType.to.order.clusters'
AND `description` = 'The host capacity type (CPU or RAM) is used by deployment planner to order clusters during VM resource allocation';
-- Whitelabel GUI
CREATE TABLE IF NOT EXISTS `cloud`.`gui_themes` (
`id` bigint(20) unsigned NOT NULL auto_increment,

View File

@ -51,6 +51,7 @@ import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ -61,6 +62,9 @@ public class CapacityDaoImplTest {
@InjectMocks
CapacityDaoImpl capacityDao = new CapacityDaoImpl();
@Mock
private CapacityVO mockEntity;
@Mock
private TransactionLegacy txn;
@Mock
@ -71,6 +75,8 @@ public class CapacityDaoImplTest {
private SearchBuilder<CapacityVO> searchBuilder;
private SearchCriteria<CapacityVO> searchCriteria;
private List<Short> capacityTypes;
private List<CapacityVO> expectedCapacities;
@Before
public void setUp() {
@ -83,6 +89,17 @@ public class CapacityDaoImplTest {
mockedTransactionLegacy = Mockito.mockStatic(TransactionLegacy.class);
mockedTransactionLegacy.when(TransactionLegacy::currentTxn).thenReturn(txn);
// Setup common test data
capacityTypes = Arrays.asList((short) 1, (short) 2, (short) 3);
expectedCapacities = Arrays.asList(mock(CapacityVO.class), mock(CapacityVO.class));
doReturn(expectedCapacities).when(capacityDao).listBy(searchCriteria);
}
private CapacityVO createMockCapacityVO(Long id) {
CapacityVO capacity = mock(CapacityVO.class);
when(capacity.getId()).thenReturn(id);
return capacity;
}
@After
@ -205,11 +222,11 @@ public class CapacityDaoImplTest {
when(pstmt.executeQuery()).thenReturn(resultSet);
when(resultSet.next()).thenReturn(false);
List<Long> resultZone = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, (short)0, true);
List<Long> resultZone = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, true);
assertNotNull(resultZone);
assertTrue(resultZone.isEmpty());
List<Long> resultPod = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, (short)0, false);
List<Long> resultPod = capacityDao.listClustersInZoneOrPodByHostCapacities(1L, 123L, 2, 2048L, false);
assertNotNull(resultPod);
assertTrue(resultPod.isEmpty());
}
@ -281,7 +298,7 @@ public class CapacityDaoImplTest {
when(pstmt.executeQuery()).thenReturn(resultSet);
when(resultSet.next()).thenReturn(false);
List<Long> result = capacityDao.listPodsByHostCapacities(1L, 2, 1024L, (short)0);
List<Long> result = capacityDao.listPodsByHostCapacities(1L, 2, 1024L);
assertNotNull(result);
assertTrue(result.isEmpty());
}
@ -330,4 +347,207 @@ public class CapacityDaoImplTest {
assertNotNull(result);
assertTrue(result.isEmpty());
}
@Test
public void testListHostCapacityByCapacityTypes_WithAllParameters() {
// Given
Long zoneId = 100L;
Long clusterId = 200L;
// When
List<CapacityVO> result = capacityDao.listHostCapacityByCapacityTypes(zoneId, clusterId, capacityTypes);
// Then
verify(searchBuilder).and("zoneId", mockEntity.getDataCenterId(), SearchCriteria.Op.EQ);
verify(searchBuilder).and("clusterId", mockEntity.getClusterId(), SearchCriteria.Op.EQ);
verify(searchBuilder).and("capacityTypes", mockEntity.getCapacityType(), SearchCriteria.Op.IN);
verify(searchBuilder).and("capacityState", mockEntity.getCapacityState(), SearchCriteria.Op.EQ);
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria).setParameters("zoneId", zoneId);
verify(searchCriteria).setParameters("clusterId", clusterId);
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
verify(capacityDao).listBy(searchCriteria);
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListHostCapacityByCapacityTypes_WithNullZoneId() {
// Given
Long clusterId = 200L;
// When
List<CapacityVO> result = capacityDao.listHostCapacityByCapacityTypes(null, clusterId, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria, Mockito.times(0)).setParameters(eq("zoneId"), any());
verify(searchCriteria).setParameters("clusterId", clusterId);
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListHostCapacityByCapacityTypes_WithNullClusterId() {
// Given
Long zoneId = 100L;
// When
List<CapacityVO> result = capacityDao.listHostCapacityByCapacityTypes(zoneId, null, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria).setParameters("zoneId", zoneId);
verify(searchCriteria, never()).setParameters(eq("clusterId"), any());
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListHostCapacityByCapacityTypes_WithEmptyCapacityTypes() {
// Given
Long zoneId = 100L;
Long clusterId = 200L;
List<Short> emptyCapacityTypes = Collections.emptyList();
// When
List<CapacityVO> result = capacityDao.listHostCapacityByCapacityTypes(zoneId, clusterId, emptyCapacityTypes);
// Then
verify(searchCriteria).setParameters("capacityTypes", emptyCapacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListPodCapacityByCapacityTypes_WithAllParameters() {
// Given
Long zoneId = 100L;
// When
List<CapacityVO> result = capacityDao.listPodCapacityByCapacityTypes(zoneId, capacityTypes);
// Then
verify(searchBuilder).and("zoneId", mockEntity.getDataCenterId(), SearchCriteria.Op.EQ);
verify(searchBuilder).and("capacityTypes", mockEntity.getCapacityType(), SearchCriteria.Op.IN);
verify(searchBuilder).and("capacityState", mockEntity.getCapacityState(), SearchCriteria.Op.EQ);
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria).setParameters("zoneId", zoneId);
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListPodCapacityByCapacityTypes_WithNullZoneId() {
// When
List<CapacityVO> result = capacityDao.listPodCapacityByCapacityTypes(null, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria, never()).setParameters(eq("zoneId"), any());
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListClusterCapacityByCapacityTypes_WithAllParameters() {
// Given
Long zoneId = 100L;
Long podId = 300L;
// When
List<CapacityVO> result = capacityDao.listClusterCapacityByCapacityTypes(zoneId, podId, capacityTypes);
// Then
verify(searchBuilder).and("zoneId", mockEntity.getDataCenterId(), SearchCriteria.Op.EQ);
verify(searchBuilder).and("podId", mockEntity.getPodId(), SearchCriteria.Op.EQ);
verify(searchBuilder).and("capacityTypes", mockEntity.getCapacityType(), SearchCriteria.Op.IN);
verify(searchBuilder).and("capacityState", mockEntity.getCapacityState(), SearchCriteria.Op.EQ);
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria).setParameters("zoneId", zoneId);
verify(searchCriteria).setParameters("podId", podId);
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListClusterCapacityByCapacityTypes_WithNullZoneId() {
// Given
Long podId = 300L;
// When
List<CapacityVO> result = capacityDao.listClusterCapacityByCapacityTypes(null, podId, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria, never()).setParameters(eq("zoneId"), any());
verify(searchCriteria).setParameters("podId", podId);
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListClusterCapacityByCapacityTypes_WithNullPodId() {
// Given
Long zoneId = 100L;
// When
List<CapacityVO> result = capacityDao.listClusterCapacityByCapacityTypes(zoneId, null, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria).setParameters("zoneId", zoneId);
verify(searchCriteria, never()).setParameters(eq("podId"), any());
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testListClusterCapacityByCapacityTypes_WithBothIdsNull() {
// When
List<CapacityVO> result = capacityDao.listClusterCapacityByCapacityTypes(null, null, capacityTypes);
// Then
verify(searchCriteria).setParameters("capacityState", "Enabled");
verify(searchCriteria, never()).setParameters(eq("zoneId"), any());
verify(searchCriteria, never()).setParameters(eq("podId"), any());
verify(searchCriteria).setParameters("capacityTypes", capacityTypes.toArray());
assertEquals("Should return expected capacities", expectedCapacities, result);
}
@Test
public void testAllMethods_VerifySearchBuilderSetup() {
// Test that all methods properly set up the search builder
Long zoneId = 100L;
Long clusterId = 200L;
Long podId = 300L;
// Test host capacity method
capacityDao.listHostCapacityByCapacityTypes(zoneId, clusterId, capacityTypes);
// Test pod capacity method
capacityDao.listPodCapacityByCapacityTypes(zoneId, capacityTypes);
// Test cluster capacity method
capacityDao.listClusterCapacityByCapacityTypes(zoneId, podId, capacityTypes);
// Verify createSearchBuilder was called 3 times
verify(capacityDao, times(3)).createSearchBuilder();
// Verify done() was called 3 times
verify(searchBuilder, times(3)).done();
// Verify listBy was called 3 times
verify(capacityDao, times(3)).listBy(searchCriteria);
}
}

View File

@ -359,7 +359,7 @@ public class ImplicitPlannerTest {
clustersWithEnoughCapacity.add(3L);
when(
capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, 12L, noOfCpusInOffering * cpuSpeedInOffering, ramInOffering * 1024L * 1024L,
Capacity.CAPACITY_TYPE_CPU, true)).thenReturn(clustersWithEnoughCapacity);
true)).thenReturn(clustersWithEnoughCapacity);
Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
clusterCapacityMap.put(1L, 2048D);

View File

@ -30,15 +30,18 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentClusterPlanner;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.deploy.FirstFitPlanner;
import com.cloud.gpu.GPU;
import com.cloud.host.DetailVO;
import com.cloud.host.Host;
@ -67,6 +70,7 @@ import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.jetbrains.annotations.NotNull;
import org.springframework.stereotype.Component;
/**
@ -295,7 +299,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
Collections.shuffle(hosts);
} else if (vmAllocationAlgorithm.equals("userdispersing")) {
hosts = reorderHostsByNumberOfVms(plan, hosts, account);
}else if(vmAllocationAlgorithm.equals("firstfitleastconsumed")){
} else if(vmAllocationAlgorithm.equals("firstfitleastconsumed")){
hosts = reorderHostsByCapacity(plan, hosts);
}
@ -372,13 +376,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
private List<? extends Host> reorderHostsByCapacity(DeploymentPlan plan, List<? extends Host> hosts) {
Long zoneId = plan.getDataCenterId();
Long clusterId = plan.getClusterId();
//Get capacity by which we should reorder
String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());
short capacityType = CapacityVO.CAPACITY_TYPE_CPU;
if("RAM".equalsIgnoreCase(capacityTypeToOrder)){
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
}
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
Pair<List<Long>, Map<Long, Double>> result = getOrderedHostsByCapacity(zoneId, clusterId);
List<Long> hostIdsByFreeCapacity = result.first();
Map<Long, String> sortedHostByCapacity = result.second().entrySet()
.stream()
@ -407,6 +405,37 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
return reorderedHosts;
}
private Pair<List<Long>, Map<Long, Double>> getOrderedHostsByCapacity(Long zoneId, Long clusterId) {
double cpuToMemoryWeight = ConfigurationManager.HostCapacityTypeCpuMemoryWeight.value();
// Get capacity by which we should reorder
short capacityType = FirstFitPlanner.getHostCapacityTypeToOrderCluster(
_configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()), cpuToMemoryWeight);
logger.debug("CapacityType: {} is used for Host ordering", FirstFitPlanner.getCapacityTypeName(capacityType));
if (capacityType >= 0) { // for CPU or RAM
return _capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
}
List<CapacityVO> capacities = _capacityDao.listHostCapacityByCapacityTypes(zoneId, clusterId,
List.of(Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY));
Map<Long, Double> hostByComputedCapacity = getHostByCombinedCapacities(capacities, cpuToMemoryWeight);
return new Pair<>(new ArrayList<>(hostByComputedCapacity.keySet()), hostByComputedCapacity);
}
@NotNull
public static Map<Long, Double> getHostByCombinedCapacities(List<CapacityVO> capacities, double cpuToMemoryWeight) {
Map<Long, Double> hostByComputedCapacity = new HashMap<>();
for (CapacityVO capacityVO : capacities) {
long hostId = capacityVO.getHostOrPoolId();
double applicableWeight = capacityVO.getCapacityType() == Capacity.CAPACITY_TYPE_CPU ? cpuToMemoryWeight : 1 - cpuToMemoryWeight;
double capacityMetric = applicableWeight * (capacityVO.getTotalCapacity() - (capacityVO.getUsedCapacity() + capacityVO.getReservedCapacity()))/capacityVO.getTotalCapacity();
hostByComputedCapacity.merge(hostId, capacityMetric, Double::sum);
}
return hostByComputedCapacity.entrySet()
.stream()
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new));
}
private List<? extends Host> reorderHostsByNumberOfVms(DeploymentPlan plan, List<? extends Host> hosts, Account account) {
if (account == null) {
return hosts;

View File

@ -897,8 +897,9 @@ public enum Config {
String.class,
"host.capacityType.to.order.clusters",
"CPU",
"The host capacity type (CPU or RAM) is used by deployment planner to order clusters during VM resource allocation",
"CPU,RAM"),
"The host capacity type (CPU, RAM, COMBINED) is used by deployment planner to order clusters during VM resource allocation",
"CPU,RAM,COMBINED"),
ApplyAllocationAlgorithmToPods(
"Advanced",
ManagementServer.class,

View File

@ -601,6 +601,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
weightBasedParametersForValidation.add(CapacityManager.SecondaryStorageCapacityThreshold.key());
weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceThreshold.key());
weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceSkipThreshold.key());
weightBasedParametersForValidation.add(ConfigurationManager.HostCapacityTypeCpuMemoryWeight.key());
}
protected void overProvisioningFactorsForValidation() {
@ -8274,7 +8275,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE,
VM_SERVICE_OFFERING_MAX_CPU_CORES, VM_SERVICE_OFFERING_MAX_RAM_SIZE, MIGRATE_VM_ACROSS_CLUSTERS,
ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN, ENABLE_DOMAIN_SETTINGS_FOR_CHILD_DOMAIN,
ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, DELETE_QUERY_BATCH_SIZE, AllowNonRFC1918CompliantIPs
ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, DELETE_QUERY_BATCH_SIZE, AllowNonRFC1918CompliantIPs, HostCapacityTypeCpuMemoryWeight
};
}

View File

@ -20,14 +20,19 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.capacity.CapacityVO;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
@ -457,17 +462,14 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
logger.debug("Listing clusters in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this " +
(isZone ? "Zone: " : "Pod: ") + id);
}
String capacityTypeToOrder = configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());
short capacityType = Capacity.CAPACITY_TYPE_CPU;
if ("RAM".equalsIgnoreCase(capacityTypeToOrder)) {
capacityType = Capacity.CAPACITY_TYPE_MEMORY;
}
List<Long> clusterIdswithEnoughCapacity = capacityDao.listClustersInZoneOrPodByHostCapacities(id, vmId, requiredCpu, requiredRam, capacityType, isZone);
List<Long> clusterIdswithEnoughCapacity = capacityDao.listClustersInZoneOrPodByHostCapacities(id, vmId, requiredCpu, requiredRam, isZone);
if (logger.isTraceEnabled()) {
logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity);
}
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderClustersByAggregateCapacity(id, vmId, capacityType, isZone);
Pair<List<Long>, Map<Long, Double>> result = getOrderedClustersByCapacity(id, vmId, isZone);
List<Long> clusterIdsOrderedByAggregateCapacity = result.first();
//only keep the clusters that have enough capacity to host this VM
if (logger.isTraceEnabled()) {
@ -491,17 +493,12 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
if (logger.isDebugEnabled()) {
logger.debug("Listing pods in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this Zone: " + zoneId);
}
String capacityTypeToOrder = configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());
short capacityType = Capacity.CAPACITY_TYPE_CPU;
if ("RAM".equalsIgnoreCase(capacityTypeToOrder)) {
capacityType = Capacity.CAPACITY_TYPE_MEMORY;
}
List<Long> podIdswithEnoughCapacity = capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType);
List<Long> podIdswithEnoughCapacity = capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam);
if (logger.isTraceEnabled()) {
logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity);
}
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType);
Pair<List<Long>, Map<Long, Double>> result = getOrderedPodsByCapacity(zoneId);
List<Long> podIdsOrderedByAggregateCapacity = result.first();
//only keep the clusters that have enough capacity to host this VM
if (logger.isTraceEnabled()) {
@ -517,6 +514,104 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
}
private Pair<List<Long>, Map<Long, Double>> getOrderedPodsByCapacity(long zoneId) {
double cpuToMemoryWeight = ConfigurationManager.HostCapacityTypeCpuMemoryWeight.value();
short capacityType = getHostCapacityTypeToOrderCluster(
configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()), cpuToMemoryWeight);
logger.debug("CapacityType: {} is used for Pod ordering", getCapacityTypeName(capacityType));
if (capacityType >= 0) { // for capacityType other than COMBINED
return capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType);
}
List<CapacityVO> capacities = capacityDao.listPodCapacityByCapacityTypes(zoneId, List.of(Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY));
Map<Long, Double> podsByCombinedCapacities = getPodByCombinedCapacities(capacities, cpuToMemoryWeight);
return new Pair<>(new ArrayList<>(podsByCombinedCapacities.keySet()), podsByCombinedCapacities);
}
// order pods by combining cpu and memory capacity considering cpuToMemoeryWeight
public Map<Long, Double> getPodByCombinedCapacities(List<CapacityVO> capacities, double cpuToMemoryWeight) {
Map<Long, Double> podByCombinedCapacity = new HashMap<>();
for (CapacityVO capacityVO : capacities) {
boolean isCPUCapacity = capacityVO.getCapacityType() == Capacity.CAPACITY_TYPE_CPU;
long podId = capacityVO.getPodId();
double applicableWeight = isCPUCapacity ? cpuToMemoryWeight : 1 - cpuToMemoryWeight;
String overCommitRatioParam = isCPUCapacity ? ApiConstants.CPU_OVERCOMMIT_RATIO : ApiConstants.MEMORY_OVERCOMMIT_RATIO;
ClusterDetailsVO overCommitRatioVO = clusterDetailsDao.findDetail(capacityVO.getClusterId(), overCommitRatioParam);
float overCommitRatio = Float.parseFloat(overCommitRatioVO.getValue());
double capacityMetric = applicableWeight *
(capacityVO.getUsedCapacity() + capacityVO.getReservedCapacity())/(capacityVO.getTotalCapacity() * overCommitRatio);
podByCombinedCapacity.merge(podId, capacityMetric, Double::sum);
}
return podByCombinedCapacity.entrySet()
.stream()
.sorted(Map.Entry.comparingByValue())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new));
}
private Pair<List<Long>, Map<Long, Double>> getOrderedClustersByCapacity(long id, long vmId, boolean isZone) {
double cpuToMemoryWeight = ConfigurationManager.HostCapacityTypeCpuMemoryWeight.value();
short capacityType = getHostCapacityTypeToOrderCluster(
configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()), cpuToMemoryWeight);
logger.debug("CapacityType: {} is used for Cluster ordering", getCapacityTypeName(capacityType));
if (capacityType >= 0) { // for capacityType other than COMBINED
return capacityDao.orderClustersByAggregateCapacity(id, vmId, capacityType, isZone);
}
Long zoneId = isZone ? id : null;
Long podId = isZone ? null : id;
List<CapacityVO> capacities = capacityDao.listClusterCapacityByCapacityTypes(zoneId, podId,
List.of(Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY));
Map<Long, Double> clusterByCombinedCapacities = getClusterByCombinedCapacities(capacities, cpuToMemoryWeight);
return new Pair<>(new ArrayList<>(clusterByCombinedCapacities.keySet()), clusterByCombinedCapacities);
}
public static String getCapacityTypeName(short capacityType) {
switch (capacityType) {
case 0: return ApiConstants.RAM;
case 1: return ApiConstants.CPU;
case -1: return ApiConstants.COMBINED_CAPACITY_ORDERING;
default: return "UNKNOWN";
}
}
public Map<Long, Double> getClusterByCombinedCapacities(List<CapacityVO> capacities, double cpuToMemoryWeight) {
Map<Long, Double> clusterByCombinedCapacity = new HashMap<>();
for (CapacityVO capacityVO : capacities) {
boolean isCPUCapacity = capacityVO.getCapacityType() == Capacity.CAPACITY_TYPE_CPU;
long clusterId = capacityVO.getClusterId();
double applicableWeight = isCPUCapacity ? cpuToMemoryWeight : 1 - cpuToMemoryWeight;
String overCommitRatioParam = isCPUCapacity ? ApiConstants.CPU_OVERCOMMIT_RATIO : ApiConstants.MEMORY_OVERCOMMIT_RATIO;
ClusterDetailsVO overCommitRatioVO = clusterDetailsDao.findDetail(clusterId, overCommitRatioParam);
float overCommitRatio = Float.parseFloat(overCommitRatioVO.getValue());
double capacityMetric = applicableWeight *
(capacityVO.getUsedCapacity() + capacityVO.getReservedCapacity())/(capacityVO.getTotalCapacity() * overCommitRatio);
clusterByCombinedCapacity.merge(clusterId, capacityMetric, Double::sum);
}
return clusterByCombinedCapacity.entrySet()
.stream()
.sorted(Map.Entry.comparingByValue())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new));
}
public static short getHostCapacityTypeToOrderCluster(String capacityTypeToOrder, double cpuToMemoryWeight) {
if (ApiConstants.RAM.equalsIgnoreCase(capacityTypeToOrder)) {
return CapacityVO.CAPACITY_TYPE_MEMORY;
}
if (ApiConstants.COMBINED_CAPACITY_ORDERING.equalsIgnoreCase(capacityTypeToOrder)) {
if (cpuToMemoryWeight == 1.0) {
return CapacityVO.CAPACITY_TYPE_CPU;
}
if (cpuToMemoryWeight == 0.0) {
return CapacityVO.CAPACITY_TYPE_MEMORY;
}
return -1; // represents COMBINED
}
return CapacityVO.CAPACITY_TYPE_CPU;
}
private void removeClustersWithoutMatchingTag(List<Long> clusterListForVmAllocation, String hostTagOnOffering) {
List<Long> matchingClusters = hostDao.listClustersByHostTag(hostTagOnOffering);

View File

@ -19,6 +19,7 @@
package com.cloud.agent.manager.allocator.impl;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.CapacityVO;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.host.Host;
@ -28,12 +29,14 @@ import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@ -47,6 +50,7 @@ import static org.mockito.Mockito.when;
public class FirstFitAllocatorTest {
private static final double TOLERANCE = 0.0001;
private FirstFitAllocator allocator;
private CapacityManager capacityMgr;
private ServiceOfferingDetailsDao offeringDetailsDao;
@ -156,4 +160,62 @@ public class FirstFitAllocatorTest {
assertTrue(result.isEmpty());
}
@Test
public void testHostByCombinedCapacityOrder() {
// Test scenario 1: Default capacity usage (0.5 weight)
List<CapacityVO> mockCapacity = getHostCapacities();
Map<Long, Double> hostByCombinedCapacity = FirstFitAllocator.getHostByCombinedCapacities(mockCapacity, 0.5);
// Verify host ordering and capacity values
Long firstHostId = hostByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Host with ID 1 should be first in ordering", Long.valueOf(1L), firstHostId);
Assert.assertEquals("Host 1 combined capacity should match expected value",
0.9609375, hostByCombinedCapacity.get(1L), TOLERANCE);
Assert.assertEquals("Host 2 combined capacity should match expected value",
0.9296875, hostByCombinedCapacity.get(2L), TOLERANCE);
// Test scenario 2: Modified capacity usage (0.7 weight)
when(mockCapacity.get(0).getUsedCapacity()).thenReturn(1500L);
hostByCombinedCapacity = FirstFitAllocator.getHostByCombinedCapacities(mockCapacity, 0.7);
// Verify new ordering after capacity change
firstHostId = hostByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Host with ID 2 should be first after capacity change", Long.valueOf(2L), firstHostId);
Assert.assertEquals("Host 2 combined capacity should match expected value after change",
0.9515625, hostByCombinedCapacity.get(2L), TOLERANCE);
Assert.assertEquals("Host 1 combined capacity should match expected value after change",
0.9484375, hostByCombinedCapacity.get(1L), TOLERANCE);
}
List<CapacityVO> getHostCapacities() {
CapacityVO cpuCapacity1 = mock(CapacityVO.class);
when(cpuCapacity1.getHostOrPoolId()).thenReturn(1L);
when(cpuCapacity1.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity1.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity1.getUsedCapacity()).thenReturn(500L);
when(cpuCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO cpuCapacity2 = mock(CapacityVO.class);
when(cpuCapacity2.getHostOrPoolId()).thenReturn(2L);
when(cpuCapacity2.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity2.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity2.getUsedCapacity()).thenReturn(500L);
when(cpuCapacity2.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO memCapacity1 = mock(CapacityVO.class);
when(memCapacity1.getHostOrPoolId()).thenReturn(1L);
when(memCapacity1.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity1.getReservedCapacity()).thenReturn(0L);
when(memCapacity1.getUsedCapacity()).thenReturn(536870912L);
when(memCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO memCapacity2 = mock(CapacityVO.class);
when(memCapacity2.getHostOrPoolId()).thenReturn(2L);
when(memCapacity2.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity2.getReservedCapacity()).thenReturn(0L);
when(memCapacity2.getUsedCapacity()).thenReturn(1073741824L);
when(memCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
return Arrays.asList(cpuCapacity1, memCapacity1, cpuCapacity2, memCapacity2);
}
}

View File

@ -17,6 +17,7 @@
package com.cloud.vm;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -30,6 +31,8 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.capacity.CapacityVO;
import com.cloud.dc.ClusterDetailsVO;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.framework.config.ConfigDepot;
@ -42,10 +45,10 @@ import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.test.utils.SpringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
@ -138,7 +141,9 @@ public class FirstFitPlannerTest {
ScopedConfigStorage scopedStorage;
@Inject
HostDao hostDao;
@Inject
private ClusterDetailsDao clusterDetailsDao;
private static final double TOLERANCE = 0.0001;
private static long domainId = 1L;
long dataCenterId = 1L;
long accountId = 1L;
@ -241,6 +246,69 @@ public class FirstFitPlannerTest {
assertTrue("Reordered cluster list does not have clusters exceeding threshold", (clusterList.containsAll(clustersCrossingThreshold)));
}
@Test
public void testGetClusterOrderCapacityType() {
Assert.assertEquals(1, FirstFitPlanner.getHostCapacityTypeToOrderCluster("CPU", 0.5));
Assert.assertEquals(0, FirstFitPlanner.getHostCapacityTypeToOrderCluster("RAM", 0.5));
String combinedOrder = "COMBINED";
Assert.assertEquals(1, FirstFitPlanner.getHostCapacityTypeToOrderCluster(combinedOrder, 1)); // cputomemoryweight:1 -> CPU
Assert.assertEquals(0, FirstFitPlanner.getHostCapacityTypeToOrderCluster(combinedOrder, 0)); // cputomemoryweight: 0 -> RAM
Assert.assertEquals(-1, FirstFitPlanner.getHostCapacityTypeToOrderCluster(combinedOrder, 0.5));
}
@Test
public void testGetPodByCombinedCapacities() {
List<CapacityVO> mockCapacity = getPodCapacities();
ClusterDetailsVO clusterDetailsOverCommitRatio = mock(ClusterDetailsVO.class);
when(clusterDetailsOverCommitRatio.getValue()).thenReturn("1.0");
when(clusterDetailsDao.findDetail(anyLong(), anyString())).thenReturn(clusterDetailsOverCommitRatio);
Map<Long, Double> podByCombinedCapacity = planner.getPodByCombinedCapacities(mockCapacity, 0.5);
Long firstPodId = podByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Pod with ID 1 should be first in ordering", Long.valueOf(1L), firstPodId);
Assert.assertEquals("Pod 1 combined capacity should match expected value",
0.0390625, podByCombinedCapacity.get(1L), TOLERANCE);
Assert.assertEquals("Pod 2 combined capacity should match expected value",
0.0703125, podByCombinedCapacity.get(2L), TOLERANCE);
// Test scenario 2: Modified capacity usage (0.7 weight)
when(mockCapacity.get(0).getUsedCapacity()).thenReturn(1500L);
podByCombinedCapacity = planner.getPodByCombinedCapacities(mockCapacity, 0.7);
firstPodId = podByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Pod with ID 2 should be first in ordering", Long.valueOf(2L), firstPodId);
Assert.assertEquals("Pod 2 combined capacity should match expected value",
0.04843750, podByCombinedCapacity.get(2L), TOLERANCE);
Assert.assertEquals("Pod 1 combined capacity should match expected value",
0.05156250, podByCombinedCapacity.get(1L), TOLERANCE);
}
@Test
public void testGetClusterByCombinedCapacities() {
List<CapacityVO> mockCapacity = getClusterCapacities();
ClusterDetailsVO clusterDetailsOverCommitRatio = mock(ClusterDetailsVO.class);
when(clusterDetailsOverCommitRatio.getValue()).thenReturn("1.0");
when(clusterDetailsDao.findDetail(anyLong(), anyString())).thenReturn(clusterDetailsOverCommitRatio);
Map<Long, Double> clusterByCombinedCapacity = planner.getClusterByCombinedCapacities(mockCapacity, 0.5);
Long firstClusterId = clusterByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Cluster with ID 1 should be first in ordering", Long.valueOf(1L), firstClusterId);
Assert.assertEquals("Cluster 1 combined capacity should match expected value",
0.046875, clusterByCombinedCapacity.get(1L), TOLERANCE);
Assert.assertEquals("Cluster 2 combined capacity should match expected value",
0.07421875, clusterByCombinedCapacity.get(2L), TOLERANCE);
// Test scenario 2: Modified capacity usage (0.7 weight)
when(mockCapacity.get(0).getUsedCapacity()).thenReturn(2000L);
clusterByCombinedCapacity = planner.getClusterByCombinedCapacities(mockCapacity, 0.7);
firstClusterId = clusterByCombinedCapacity.keySet().iterator().next();
Assert.assertEquals("Cluster with ID 2 should be first in ordering", Long.valueOf(2L), firstClusterId);
Assert.assertEquals("Cluster 2 combined capacity should match expected value",
0.05390625, clusterByCombinedCapacity.get(2L), TOLERANCE);
Assert.assertEquals("Cluster 1 combined capacity should match expected value",
0.0625, clusterByCombinedCapacity.get(1L), TOLERANCE);
}
private List<Long> initializeForClusterThresholdDisabled() {
when(configDepot.getConfigStringValue(DeploymentClusterPlanner.ClusterThresholdEnabled.key(),
ConfigKey.Scope.Global, null)).thenReturn(Boolean.FALSE.toString());
@ -293,7 +361,7 @@ public class FirstFitPlannerTest {
when(
capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, 12L, noOfCpusInOffering * cpuSpeedInOffering, ramInOffering * 1024L * 1024L,
Capacity.CAPACITY_TYPE_CPU, true)).thenReturn(clustersWithEnoughCapacity);
true)).thenReturn(clustersWithEnoughCapacity);
Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
clusterCapacityMap.put(1L, 2048D);
@ -327,7 +395,7 @@ public class FirstFitPlannerTest {
hostList6.add(new Long(15));
String[] implicitHostTags = {"GPU"};
int ramInBytes = ramInOffering * 1024 * 1024;
when(serviceOfferingDetailsDao.findDetail(ArgumentMatchers.anyLong(), anyString())).thenReturn(null);
when(serviceOfferingDetailsDao.findDetail(anyLong(), anyString())).thenReturn(null);
when(hostGpuGroupsDao.listHostIds()).thenReturn(hostList0);
when(capacityDao.listHostsWithEnoughCapacity(noOfCpusInOffering * cpuSpeedInOffering, ramInBytes, new Long(1), Host.Type.Routing.toString())).thenReturn(hostList1);
when(capacityDao.listHostsWithEnoughCapacity(noOfCpusInOffering * cpuSpeedInOffering, ramInBytes, new Long(2), Host.Type.Routing.toString())).thenReturn(hostList2);
@ -505,4 +573,70 @@ public class FirstFitPlannerTest {
}
}
}
List<CapacityVO> getClusterCapacities() {
CapacityVO cpuCapacity1 = mock(CapacityVO.class);
when(cpuCapacity1.getClusterId()).thenReturn(1L);
when(cpuCapacity1.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity1.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity1.getUsedCapacity()).thenReturn(1000L);
when(cpuCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO cpuCapacity2 = mock(CapacityVO.class);
when(cpuCapacity2.getClusterId()).thenReturn(2L);
when(cpuCapacity2.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity2.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity2.getUsedCapacity()).thenReturn(750L);
when(cpuCapacity2.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO memCapacity1 = mock(CapacityVO.class);
when(memCapacity1.getClusterId()).thenReturn(1L);
when(memCapacity1.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity1.getReservedCapacity()).thenReturn(0L);
when(memCapacity1.getUsedCapacity()).thenReturn(536870912L);
when(memCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO memCapacity2 = mock(CapacityVO.class);
when(memCapacity2.getClusterId()).thenReturn(2L);
when(memCapacity2.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity2.getReservedCapacity()).thenReturn(0L);
when(memCapacity2.getUsedCapacity()).thenReturn(1073741824L);
when(memCapacity2.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
return Arrays.asList(cpuCapacity1, memCapacity1, cpuCapacity2, memCapacity2);
}
List<CapacityVO> getPodCapacities() {
CapacityVO cpuCapacity1 = mock(CapacityVO.class);
when(cpuCapacity1.getPodId()).thenReturn(1L);
when(cpuCapacity1.getClusterId()).thenReturn(1L);
when(cpuCapacity1.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity1.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity1.getUsedCapacity()).thenReturn(500L);
when(cpuCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO cpuCapacity2 = mock(CapacityVO.class);
when(cpuCapacity2.getPodId()).thenReturn(2L);
when(cpuCapacity2.getClusterId()).thenReturn(1L);
when(cpuCapacity2.getTotalCapacity()).thenReturn(32000L);
when(cpuCapacity2.getReservedCapacity()).thenReturn(0L);
when(cpuCapacity2.getUsedCapacity()).thenReturn(500L);
when(cpuCapacity2.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_CPU);
CapacityVO memCapacity1 = mock(CapacityVO.class);
when(memCapacity1.getPodId()).thenReturn(1L);
when(memCapacity1.getClusterId()).thenReturn(1L);
when(memCapacity1.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity1.getReservedCapacity()).thenReturn(0L);
when(memCapacity1.getUsedCapacity()).thenReturn(536870912L);
when(memCapacity1.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO memCapacity2 = mock(CapacityVO.class);
when(memCapacity2.getPodId()).thenReturn(2L);
when(memCapacity2.getClusterId()).thenReturn(1L);
when(memCapacity2.getTotalCapacity()).thenReturn(8589934592L);
when(memCapacity2.getReservedCapacity()).thenReturn(0L);
when(memCapacity2.getUsedCapacity()).thenReturn(1073741824L);
when(memCapacity2.getCapacityType()).thenReturn(CapacityVO.CAPACITY_TYPE_MEMORY);
return Arrays.asList(cpuCapacity1, memCapacity1, cpuCapacity2, memCapacity2);
}
}