CLOUDSTACK-5995 ; change service offering is not honouring host tags

- Check host tag when the lastHostId is set.
This commit is contained in:
Prachi Damle 2014-01-30 11:24:46 -08:00
parent 158280181d
commit 7d0472bdaa

View File

@ -362,49 +362,68 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
} else { } else {
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
long cluster_id = host.getClusterId(); boolean hostTagsMatch = true;
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); if(offering.getHostTag() != null){
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); _hostDao.loadHostTags(host);
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); hostTagsMatch = false;
if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true) }
&& _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed())) { }
s_logger.debug("The last host of this VM is UP and has enough capacity"); if (hostTagsMatch) {
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + long cluster_id = host.getClusterId();
host.getClusterId()); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
// search for storage under the zone, pod, cluster of "cpuOvercommitRatio");
// the last host. ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
DataCenterDeployment lastPlan = "memoryOvercommitRatio");
new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first(); cpuOvercommitRatio, memoryOvercommitRatio, true)
List<Volume> readyAndReusedVolumes = result.second(); && _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(),
offering.getSpeed())) {
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
+ ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
// search for storage under the zone, pod, cluster
// of
// the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this host // choose the potential pool for this VM for this
if (!suitableVolumeStoragePools.isEmpty()) { // host
List<Host> suitableHosts = new ArrayList<Host>(); if (!suitableVolumeStoragePools.isEmpty()) {
suitableHosts.add(host); List<Host> suitableHosts = new ArrayList<Host>();
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources( suitableHosts.add(host);
suitableHosts, suitableVolumeStoragePools, avoids, Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes); suitableHosts, suitableVolumeStoragePools, avoids,
if (potentialResources != null) { getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
Pod pod = _podDao.findById(host.getPodId()); if (potentialResources != null) {
Cluster cluster = _clusterDao.findById(host.getClusterId()); Pod pod = _podDao.findById(host.getPodId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second(); Cluster cluster = _clusterDao.findById(host.getClusterId());
// remove the reused vol<->pool from Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// destination, since we don't have to prepare // remove the reused vol<->pool from
// this volume. // destination, since we don't have to
for (Volume vol : readyAndReusedVolumes) { // prepare
storageVolMap.remove(vol); // this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host,
storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
} }
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
} }
} else {
s_logger.debug("The last host of this VM does not have enough capacity");
} }
} else { } else {
s_logger.debug("The last host of this VM does not have enough capacity"); s_logger.debug("Service Offering host tag does not match the last host of this VM");
} }
} else { } else {
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +