Fix CID 1114609 Log the correct number

This commit is contained in:
Hugo Trippaers 2014-07-24 12:14:06 +02:00
parent ec43bfce90
commit 1440a1c6a0

View File

@ -31,6 +31,8 @@ import javax.ejb.Local;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupProcessor;
import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupService;
import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
@ -49,7 +51,6 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager;
import com.cloud.agent.Listener; import com.cloud.agent.Listener;
@ -130,7 +131,7 @@ import com.cloud.vm.dao.VMInstanceDao;
@Local(value = {DeploymentPlanningManager.class}) @Local(value = {DeploymentPlanningManager.class})
public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener, public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener,
StateListener<State, VirtualMachine.Event, VirtualMachine> { StateListener<State, VirtualMachine.Event, VirtualMachine> {
private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class);
@Inject @Inject
@ -276,14 +277,14 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
ServiceOffering offering = vmProfile.getServiceOffering(); ServiceOffering offering = vmProfile.getServiceOffering();
if(planner == null){ if(planner == null){
String plannerName = offering.getDeploymentPlanner(); String plannerName = offering.getDeploymentPlanner();
if (plannerName == null) { if (plannerName == null) {
if (vm.getHypervisorType() == HypervisorType.BareMetal) { if (vm.getHypervisorType() == HypervisorType.BareMetal) {
plannerName = "BareMetalPlanner"; plannerName = "BareMetalPlanner";
} else { } else {
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
}
} }
}
planner = getDeploymentPlannerByName(plannerName); planner = getDeploymentPlannerByName(plannerName);
} }
@ -294,7 +295,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" +
plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
} }
@ -314,13 +315,13 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} else { } else {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " +
host.getClusterId()); host.getClusterId());
} }
// search for storage under the zone, pod, cluster of the host. // search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan = DataCenterDeployment lastPlan =
new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null,
plan.getReservationContext()); plan.getReservationContext());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first(); Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
@ -331,8 +332,8 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
List<Host> suitableHosts = new ArrayList<Host>(); List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host); suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources( Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools, avoids, suitableHosts, suitableVolumeStoragePools, avoids,
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes); getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) { if (potentialResources != null) {
Pod pod = _podDao.findById(host.getPodId()); Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId()); Cluster cluster = _clusterDao.findById(host.getClusterId());
@ -363,7 +364,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
s_logger.debug("The last host of this VM is in avoid set"); s_logger.debug("The last host of this VM is in avoid set");
} else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
s_logger.debug("The last Host, hostId: " + host.getId() + s_logger.debug("The last Host, hostId: " + host.getId() +
" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
} else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){
@ -379,64 +380,64 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} }
} }
if (hostTagsMatch) { if (hostTagsMatch) {
long cluster_id = host.getClusterId(); long cluster_id = host.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
"cpuOvercommitRatio"); "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
"memoryOvercommitRatio"); "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
cpuOvercommitRatio, memoryOvercommitRatio, true) cpuOvercommitRatio, memoryOvercommitRatio, true)
&& _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), && _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(),
offering.getSpeed())) { offering.getSpeed())) {
s_logger.debug("The last host of this VM is UP and has enough capacity"); s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
+ ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
// search for storage under the zone, pod, cluster // search for storage under the zone, pod, cluster
// of // of
// the last host. // the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes( Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first(); Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second(); List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this // choose the potential pool for this VM for this
// host // host
if (!suitableVolumeStoragePools.isEmpty()) { if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>(); List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host); suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources( Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools, avoids, suitableHosts, suitableVolumeStoragePools, avoids,
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes); getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) { if (potentialResources != null) {
Pod pod = _podDao.findById(host.getPodId()); Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId()); Cluster cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second(); Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from // remove the reused vol<->pool from
// destination, since we don't have to // destination, since we don't have to
// prepare // prepare
// this volume. // this volume.
for (Volume vol : readyAndReusedVolumes) { for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol); storageVolMap.remove(vol);
} }
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, DeployDestination dest = new DeployDestination(dc, pod, cluster, host,
storageVolMap); storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest); s_logger.debug("Returning Deployment Destination: " + dest);
return dest; return dest;
}
} }
} else {
s_logger.debug("The last host of this VM does not have enough capacity");
} }
} else { } else {
s_logger.debug("The last host of this VM does not have enough capacity");
}
} else {
s_logger.debug("Service Offering host tag does not match the last host of this VM"); s_logger.debug("Service Offering host tag does not match the last host of this VM");
} }
} else { } else {
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +
host.getResourceState()); host.getResourceState());
} }
} }
s_logger.debug("Cannot choose the last host to deploy this VM "); s_logger.debug("Cannot choose the last host to deploy this VM ");
@ -450,21 +451,21 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
if (planner instanceof DeploymentClusterPlanner) { if (planner instanceof DeploymentClusterPlanner) {
ExcludeList plannerAvoidInput = ExcludeList plannerAvoidInput =
new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
avoids.getPoolsToAvoid()); avoids.getPoolsToAvoid());
clusterList = ((DeploymentClusterPlanner)planner).orderClusters(vmProfile, plan, avoids); clusterList = ((DeploymentClusterPlanner)planner).orderClusters(vmProfile, plan, avoids);
if (clusterList != null && !clusterList.isEmpty()) { if (clusterList != null && !clusterList.isEmpty()) {
// planner refactoring. call allocators to list hosts // planner refactoring. call allocators to list hosts
ExcludeList plannerAvoidOutput = ExcludeList plannerAvoidOutput =
new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
avoids.getPoolsToAvoid()); avoids.getPoolsToAvoid());
resetAvoidSet(plannerAvoidOutput, plannerAvoidInput); resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
dest = dest =
checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
if (dest != null) { if (dest != null) {
return dest; return dest;
} }
@ -591,7 +592,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} }
private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids)
throws InsufficientServerCapacityException { throws InsufficientServerCapacityException {
if (planner != null && planner instanceof DeploymentClusterPlanner) { if (planner != null && planner instanceof DeploymentClusterPlanner) {
return ((DeploymentClusterPlanner)planner).getResourceUsage(vmProfile, plan, avoids); return ((DeploymentClusterPlanner)planner).getResourceUsage(vmProfile, plan, avoids);
} else { } else {
@ -619,7 +620,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
return true; return true;
} else { } else {
s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " +
hostResourceType); hostResourceType);
return false; return false;
} }
} else { } else {
@ -629,27 +630,27 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
return Transaction.execute(new TransactionCallback<Boolean>() { return Transaction.execute(new TransactionCallback<Boolean>() {
@Override @Override
public Boolean doInTransaction(TransactionStatus status) { public Boolean doInTransaction(TransactionStatus status) {
final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true);
if (lockedEntry == null) { if (lockedEntry == null) {
s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); s_logger.error("Unable to lock the host entry for reservation, host: " + hostId);
return false;
}
// check before updating
if (lockedEntry.getResourceUsage() == null) {
lockedEntry.setResourceUsage(resourceUsageRequired);
_plannerHostReserveDao.persist(lockedEntry);
return true;
} else {
// someone updated it earlier. check if we can still use it
if (lockedEntry.getResourceUsage() == resourceUsageRequired) {
return true;
} else {
s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " +
hostResourceTypeFinal);
return false; return false;
} }
// check before updating
if (lockedEntry.getResourceUsage() == null) {
lockedEntry.setResourceUsage(resourceUsageRequired);
_plannerHostReserveDao.persist(lockedEntry);
return true;
} else {
// someone updated it earlier. check if we can still use it
if (lockedEntry.getResourceUsage() == resourceUsageRequired) {
return true;
} else {
s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " +
hostResourceTypeFinal);
return false;
}
}
} }
}
}); });
} }
@ -695,7 +696,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
List<VMInstanceVO> vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); List<VMInstanceVO> vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting);
if (vmsStoppingMigratingByHostId.size() > 0) { if (vmsStoppingMigratingByHostId.size() > 0) {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs stopping/migrating on host " + hostId); s_logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId);
} }
return false; return false;
} }
@ -721,20 +722,20 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
return Transaction.execute(new TransactionCallback<Boolean>() { return Transaction.execute(new TransactionCallback<Boolean>() {
@Override @Override
public Boolean doInTransaction(TransactionStatus status) { public Boolean doInTransaction(TransactionStatus status) {
final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true);
if (lockedEntry == null) { if (lockedEntry == null) {
s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); s_logger.error("Unable to lock the host entry for reservation, host: " + hostId);
return false; return false;
} }
// check before updating // check before updating
if (lockedEntry.getResourceUsage() != null) { if (lockedEntry.getResourceUsage() != null) {
lockedEntry.setResourceUsage(null); lockedEntry.setResourceUsage(null);
_plannerHostReserveDao.persist(lockedEntry); _plannerHostReserveDao.persist(lockedEntry);
return true; return true;
} }
return false; return false;
} }
}); });
} }
@ -833,7 +834,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
public void onPublishMessage(String senderAddress, String subject, Object obj) { public void onPublishMessage(String senderAddress, String subject, Object obj) {
VMInstanceVO vm = ((VMInstanceVO)obj); VMInstanceVO vm = ((VMInstanceVO)obj);
s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() +
", checking if host reservation can be released for host:" + vm.getLastHostId()); ", checking if host reservation can be released for host:" + vm.getLastHostId());
Long hostId = vm.getLastHostId(); Long hostId = vm.getLastHostId();
checkHostReservationRelease(hostId); checkHostReservationRelease(hostId);
} }
@ -890,7 +891,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
// /refactoring planner methods // /refactoring planner methods
private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc,
DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) { DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) {
if (s_logger.isTraceEnabled()) { if (s_logger.isTraceEnabled()) {
s_logger.trace("ClusterId List to consider: " + clusterList); s_logger.trace("ClusterId List to consider: " + clusterList);
@ -909,7 +910,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
// search for resources(hosts and storage) under this zone, pod, // search for resources(hosts and storage) under this zone, pod,
// cluster. // cluster.
DataCenterDeployment potentialPlan = DataCenterDeployment potentialPlan =
new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
// find suitable hosts under this cluster, need as many hosts as we // find suitable hosts under this cluster, need as many hosts as we
// get. // get.
@ -924,15 +925,15 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} }
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = Pair<Map<Volume, List<StoragePool>>, List<Volume>> result =
findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first(); Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second(); List<Volume> readyAndReusedVolumes = result.second();
// choose the potential host and pool for the VM // choose the potential host and pool for the VM
if (!suitableVolumeStoragePools.isEmpty()) { if (!suitableVolumeStoragePools.isEmpty()) {
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources( Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired, suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired,
readyAndReusedVolumes); readyAndReusedVolumes);
if (potentialResources != null) { if (potentialResources != null) {
Pod pod = _podDao.findById(clusterVO.getPodId()); Pod pod = _podDao.findById(clusterVO.getPodId());
@ -965,7 +966,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput, VirtualMachineProfile vmProfile) { private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput, VirtualMachineProfile vmProfile) {
ExcludeList allocatorAvoidOutput = ExcludeList allocatorAvoidOutput =
new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid()); new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
// remove any hosts/pools that the planners might have added // remove any hosts/pools that the planners might have added
// to get the list of hosts/pools that Allocators flagged as 'avoid' // to get the list of hosts/pools that Allocators flagged as 'avoid'
@ -977,7 +978,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
boolean avoidAllHosts = true, avoidAllPools = true; boolean avoidAllHosts = true, avoidAllPools = true;
List<HostVO> allhostsInCluster = List<HostVO> allhostsInCluster =
_hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), clusterVO.getPodId(), clusterVO.getDataCenterId(), null); _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), clusterVO.getPodId(), clusterVO.getDataCenterId(), null);
for (HostVO host : allhostsInCluster) { for (HostVO host : allhostsInCluster) {
if (!allocatorAvoidOutput.shouldAvoid(host)) { if (!allocatorAvoidOutput.shouldAvoid(host)) {
// there's some host in the cluster that is not yet in avoid set // there's some host in the cluster that is not yet in avoid set
@ -1003,21 +1004,21 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
boolean vmRequiresLocalStorege = storageRequirements.second(); boolean vmRequiresLocalStorege = storageRequirements.second();
if (vmRequiresSharedStorage) { if (vmRequiresSharedStorage) {
// check shared pools // check shared pools
List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null);
for (StoragePoolVO pool : allPoolsInCluster) { for (StoragePoolVO pool : allPoolsInCluster) {
if (!allocatorAvoidOutput.shouldAvoid(pool)) { if (!allocatorAvoidOutput.shouldAvoid(pool)) {
// there's some pool in the cluster that is not yet in avoid set // there's some pool in the cluster that is not yet in avoid set
avoidAllPools = false; avoidAllPools = false;
break; break;
}
} }
} }
}
if (vmRequiresLocalStorege) { if (vmRequiresLocalStorege) {
// check local pools // check local pools
List<StoragePoolVO> allLocalPoolsInCluster = List<StoragePoolVO> allLocalPoolsInCluster =
_storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null);
for (StoragePoolVO pool : allLocalPoolsInCluster) { for (StoragePoolVO pool : allLocalPoolsInCluster) {
if (!allocatorAvoidOutput.shouldAvoid(pool)) { if (!allocatorAvoidOutput.shouldAvoid(pool)) {
// there's some pool in the cluster that is not yet // there's some pool in the cluster that is not yet
@ -1058,7 +1059,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} }
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools, protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools,
ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, List<Volume> readyAndReusedVolumes) { ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, List<Volume> readyAndReusedVolumes) {
s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
boolean hostCanAccessPool = false; boolean hostCanAccessPool = false;
@ -1116,7 +1117,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
} }
if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { if (hostCanAccessPool && haveEnoughSpace && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) {
s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() +
" and associated storage pools for this VM"); " and associated storage pools for this VM");
return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage); return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
} else { } else {
avoid.addHost(potentialHost.getId()); avoid.addHost(potentialHost.getId());
@ -1382,43 +1383,43 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
return Transaction.execute(new TransactionCallback<String>() { return Transaction.execute(new TransactionCallback<String>() {
@Override @Override
public String doInTransaction(TransactionStatus status) { public String doInTransaction(TransactionStatus status) {
boolean saveReservation = true; boolean saveReservation = true;
if (vmGroupCount > 0) { if (vmGroupCount > 0) {
List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId()); List<Long> groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId());
SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria(); SearchCriteria<AffinityGroupVO> criteria = _affinityGroupDao.createSearchCriteria();
criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()])); criteria.addAnd("id", SearchCriteria.Op.IN, groupIds.toArray(new Object[groupIds.size()]));
List<AffinityGroupVO> groups = _affinityGroupDao.lockRows(criteria, null, true); _affinityGroupDao.lockRows(criteria, null, true);
for (AffinityGroupProcessor processor : _affinityProcessors) { for (AffinityGroupProcessor processor : _affinityProcessors) {
if (!processor.check(vmProfile, plannedDestination)) { if (!processor.check(vmProfile, plannedDestination)) {
saveReservation = false; saveReservation = false;
break; break;
}
} }
} }
}
if (saveReservation) { if (saveReservation) {
VMReservationVO vmReservation = VMReservationVO vmReservation =
new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(), plannedDestination.getPod().getId(), plannedDestination.getCluster() new VMReservationVO(vm.getId(), plannedDestination.getDataCenter().getId(), plannedDestination.getPod().getId(), plannedDestination.getCluster()
.getId(), plannedDestination.getHost().getId()); .getId(), plannedDestination.getHost().getId());
if (planner != null) { if (planner != null) {
vmReservation.setDeploymentPlanner(planner.getName()); vmReservation.setDeploymentPlanner(planner.getName());
} }
Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>(); Map<Long, Long> volumeReservationMap = new HashMap<Long, Long>();
if (vm.getHypervisorType() != HypervisorType.BareMetal) { if (vm.getHypervisorType() != HypervisorType.BareMetal) {
for (Volume vo : plannedDestination.getStorageForDisks().keySet()) { for (Volume vo : plannedDestination.getStorageForDisks().keySet()) {
volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId()); volumeReservationMap.put(vo.getId(), plannedDestination.getStorageForDisks().get(vo).getId());
}
vmReservation.setVolumeReservation(volumeReservationMap);
} }
vmReservation.setVolumeReservation(volumeReservationMap); _reservationDao.persist(vmReservation);
return vmReservation.getUuid();
} }
_reservationDao.persist(vmReservation);
return vmReservation.getUuid();
}
return null; return null;
} }
}); });
} }