fixed merge conflict

This commit is contained in:
Alex Huang 2010-08-23 15:10:08 -07:00
commit bb06bc3d57
8 changed files with 251 additions and 183 deletions

View File

@ -28,5 +28,4 @@ public class Hypervisor {
VirtualBox,
Parralels;
}
}

View File

@ -4582,6 +4582,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR
break;
}
}
// assume the memory Virtualization overhead is 1/64
ram = (ram - dom0Ram) * 63/64;
cmd.setMemory(ram);

View File

@ -19,6 +19,7 @@ import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
import com.vmware.vim25.VirtualEthernetCard;
import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
import com.vmware.vim25.VirtualNicManagerNetConfig;
import com.vmware.vim25.VirtualPCNet32;
import com.vmware.vim25.VirtualDeviceConfigSpec;
import com.vmware.vim25.VirtualMachineCloneSpec;
@ -784,6 +785,43 @@ public class TestVMWare {
"cloud.dc.test");
}
private void getPropertyWithPath() throws Exception {
ManagedObjectReference morHost = new ManagedObjectReference();
morHost.setType("HostSystem");
morHost.set_value("host-161");
VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])cb.getServiceUtil3().getDynamicProperty(morHost, "config.virtualNicManagerInfo.netConfig");
}
private void getHostVMs() throws Exception {
ManagedObjectReference morHost = new ManagedObjectReference();
morHost.setType("HostSystem");
morHost.set_value("host-48");
PropertySpec pSpec = new PropertySpec();
pSpec.setType("VirtualMachine");
pSpec.setPathSet(new String[] { "name", "runtime.powerState", "config.template" });
TraversalSpec host2VmTraversal = new TraversalSpec();
host2VmTraversal.setType("HostSystem");
host2VmTraversal.setPath("vm");
host2VmTraversal.setName("host2VmTraversal");
ObjectSpec oSpec = new ObjectSpec();
oSpec.setObj(morHost);
oSpec.setSkip(Boolean.TRUE);
oSpec.setSelectSet(new SelectionSpec[] { host2VmTraversal });
PropertyFilterSpec pfSpec = new PropertyFilterSpec();
pfSpec.setPropSet(new PropertySpec[] { pSpec });
pfSpec.setObjectSet(new ObjectSpec[] { oSpec });
ObjectContent[] ocs = cb.getServiceConnection3().getService().retrieveProperties(
cb.getServiceConnection3().getServiceContent().getPropertyCollector(),
new PropertyFilterSpec[] { pfSpec });
this.printContent(ocs);
}
public static void main(String[] args) throws Exception {
setupLog4j();
TestVMWare client = new TestVMWare();
@ -809,7 +847,10 @@ public class TestVMWare {
// client.addNic();
// client.addNicToNetwork();
client.createDatacenter();
// client.createDatacenter();
// client.getPropertyWithPath();
client.getHostVMs();
cb.disConnect();
} catch (Exception e) {
e.printStackTrace();

View File

@ -478,7 +478,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory {
Enumeration<Discoverer> en = _discoverers.enumeration();
while (en.hasMoreElements()) {
Discoverer discoverer = en.nextElement();
Map<? extends ServerResource, Map<String, String>> resources = discoverer.find(dcId, podId, clusterId, url, username, password);
Map<? extends ServerResource, Map<String, String>> resources = null;
try {
resources = discoverer.find(dcId, podId, clusterId, url, username, password);
} catch(Exception e) {
s_logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any");
}
if (resources != null) {
for (Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
ServerResource resource = entry.getKey();
@ -1677,7 +1683,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory {
// If this command is from a KVM agent, or from an agent that has a
// null hypervisor type, don't do the CIDR check
if (hypervisorType == null || hypervisorType == Hypervisor.Type.KVM)
if (hypervisorType == null || hypervisorType == Hypervisor.Type.KVM || hypervisorType == Hypervisor.Type.VMware)
doCidrCheck = false;
if (doCidrCheck)

View File

@ -155,38 +155,14 @@ public class UserConcentratedAllocator implements PodAllocator {
}
private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, long capacityNeeded, short capacityType, long[] hostCandidate) {
List<CapacityVO> capacities = null;
if (m_capacityCheckLock.lock(120)) { // 2 minutes
try {
SearchCriteria<CapacityVO> sc = _capacityDao.createSearchCriteria();
sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId);
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
List<CapacityVO> capacities = _capacityDao.search(sc, null);
boolean enoughCapacity = false;
if (capacities != null) {
for (CapacityVO capacity : capacities) {
if(capacityType == CapacityVO.CAPACITY_TYPE_CPU || capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
//
// for CPU/Memory, we now switch to static allocation
//
if ((capacity.getTotalCapacity() -
calcHostAllocatedCpuMemoryCapacity(capacity.getHostOrPoolId(), capacityType)) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
} else {
if ((capacity.getTotalCapacity() - capacity.getUsedCapacity()) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
}
}
}
return enoughCapacity;
capacities = _capacityDao.search(sc, null);
} finally {
m_capacityCheckLock.unlock();
}
@ -204,6 +180,31 @@ public class UserConcentratedAllocator implements PodAllocator {
return true;
*/
}
boolean enoughCapacity = false;
if (capacities != null) {
for (CapacityVO capacity : capacities) {
if(capacityType == CapacityVO.CAPACITY_TYPE_CPU || capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
//
// for CPU/Memory, we now switch to static allocation
//
if ((capacity.getTotalCapacity() -
calcHostAllocatedCpuMemoryCapacity(capacity.getHostOrPoolId(), capacityType)) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
} else {
if ((capacity.getTotalCapacity() - capacity.getUsedCapacity()) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
}
}
}
return enoughCapacity;
}
private boolean skipCalculation(VMInstanceVO vm) {

View File

@ -327,16 +327,13 @@ public class AlertManagerImpl implements AlertManager {
// the amount allocated. Hopefully it's limited to 3 entry points and will keep the amount allocated
// per host accurate.
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system capacity");
}
try {
// delete the old records
_capacityDao.clearNonStorageCapacities();
List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();
// get all hosts..
SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
SearchCriteria sc = _hostDao.createSearchCriteria();
sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
List<HostVO> hosts = _hostDao.search(sc, null);
@ -350,6 +347,7 @@ public class AlertManagerImpl implements AlertManager {
if (host.getType() != Host.Type.Routing) {
continue;
}
long cpu = 0;
long usedMemory = 0;
List<DomainRouterVO> domainRouters = _routerDao.listUpByHostId(host.getId());
@ -391,8 +389,8 @@ public class AlertManagerImpl implements AlertManager {
CapacityVO newMemoryCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), usedMemory, totalMemory, CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO newCPUCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), cpu, (long)(host.getCpus()*host.getSpeed()* _cpuOverProvisioningFactor), CapacityVO.CAPACITY_TYPE_CPU);
_capacityDao.persist(newMemoryCapacity);
_capacityDao.persist(newCPUCapacity);
newCapacities.add(newMemoryCapacity);
newCapacities.add(newCPUCapacity);
}
// Calculate storage pool capacity
@ -406,7 +404,7 @@ public class AlertManagerImpl implements AlertManager {
provFactor = _overProvisioningFactor;
}
CapacityVO newStorageCapacity = new CapacityVO(pool.getId(), pool.getDataCenterId(), pool.getPodId(), disk, pool.getCapacityBytes() * provFactor, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
_capacityDao.persist(newStorageCapacity);
newCapacities.add(newStorageCapacity);
continue;
}
@ -420,7 +418,7 @@ public class AlertManagerImpl implements AlertManager {
int allocatedPublicIPs = _publicIPAddressDao.countIPs(dcId, -1, true);
CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, null, allocatedPublicIPs, totalPublicIPs, CapacityVO.CAPACITY_TYPE_PUBLIC_IP);
_capacityDao.persist(newPublicIPCapacity);
newCapacities.add(newPublicIPCapacity);
}
// Calculate new Private IP capacity
@ -433,7 +431,16 @@ public class AlertManagerImpl implements AlertManager {
int allocatedPrivateIPs = _privateIPAddressDao.countIPs(podId, dcId, true);
CapacityVO newPrivateIPCapacity = new CapacityVO(null, dcId, podId, allocatedPrivateIPs, totalPrivateIPs, CapacityVO.CAPACITY_TYPE_PRIVATE_IP);
_capacityDao.persist(newPrivateIPCapacity);
newCapacities.add(newPrivateIPCapacity);
}
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
try {
// delete the old records
_capacityDao.clearNonStorageCapacities();
for (CapacityVO newCapacity : newCapacities) {
_capacityDao.persist(newCapacity);
}
} finally {
m_capacityCheckLock.unlock();

View File

@ -293,18 +293,8 @@ public class StatsCollector {
}
_storagePoolStats = storagePoolStats;
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system storage capacity");
}
try {
// now update the capacity table with the new stats
// FIXME: the right way to do this is to register a listener (see RouterStatsListener)
// for the host stats, send the Watch<something>Command at a regular interval
// to collect the stats from an agent and update the database as needed. The
// listener model has connects/disconnects to keep things in sync much better
// than this model right now
_capacityDao.clearStorageCapacities();
// a list to store the new capacity entries that will be committed once everything is calculated
List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();
// create new entries
for (Long hostId : storageStats.keySet()) {
@ -315,10 +305,12 @@ public class StatsCollector {
if (Host.Type.SecondaryStorage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE);
_capacityDao.persist(capacity);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
} else if (Host.Type.Storage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
}
}
@ -339,7 +331,25 @@ public class StatsCollector {
_storagePoolDao.update(pool.getId(), pool);
CapacityVO capacity = new CapacityVO(poolId, pool.getDataCenterId(), pool.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
}
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system storage capacity");
}
try {
// now update the capacity table with the new stats
// FIXME: the right way to do this is to register a listener (see RouterStatsListener)
// for the host stats, send the Watch<something>Command at a regular interval
// to collect the stats from an agent and update the database as needed. The
// listener model has connects/disconnects to keep things in sync much better
// than this model right now
_capacityDao.clearStorageCapacities();
for (CapacityVO newCapacity : newCapacities) {
_capacityDao.persist(newCapacity);
}
} finally {
m_capacityCheckLock.unlock();

View File

@ -72,3 +72,6 @@ INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (58,
INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (59, 7, 'Other install media', 'Ubuntu');
INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (60, 7, 'Other install media', 'Other');
-- temporarily added for vmware, will be moved when vmware support is fully in-place
INSERT INTO `cloud`.`host_master`(`type`, `service_address`, `admin`, `password`) VALUES('VSphere', 'vsphere-1.lab.vmops.com', 'Administrator', 'Suite219');