fixed merge conflict

This commit is contained in:
Alex Huang 2010-08-23 15:10:08 -07:00
commit bb06bc3d57
8 changed files with 251 additions and 183 deletions

View File

@ -28,5 +28,4 @@ public class Hypervisor {
VirtualBox,
Parralels;
}
}

View File

@ -4582,6 +4582,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR
break;
}
}
// assume the memory Virtualization overhead is 1/64
ram = (ram - dom0Ram) * 63/64;
cmd.setMemory(ram);

View File

@ -19,6 +19,7 @@ import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
import com.vmware.vim25.VirtualEthernetCard;
import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
import com.vmware.vim25.VirtualNicManagerNetConfig;
import com.vmware.vim25.VirtualPCNet32;
import com.vmware.vim25.VirtualDeviceConfigSpec;
import com.vmware.vim25.VirtualMachineCloneSpec;
@ -784,6 +785,43 @@ public class TestVMWare {
"cloud.dc.test");
}
private void getPropertyWithPath() throws Exception {
ManagedObjectReference morHost = new ManagedObjectReference();
morHost.setType("HostSystem");
morHost.set_value("host-161");
VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])cb.getServiceUtil3().getDynamicProperty(morHost, "config.virtualNicManagerInfo.netConfig");
}
private void getHostVMs() throws Exception {
ManagedObjectReference morHost = new ManagedObjectReference();
morHost.setType("HostSystem");
morHost.set_value("host-48");
PropertySpec pSpec = new PropertySpec();
pSpec.setType("VirtualMachine");
pSpec.setPathSet(new String[] { "name", "runtime.powerState", "config.template" });
TraversalSpec host2VmTraversal = new TraversalSpec();
host2VmTraversal.setType("HostSystem");
host2VmTraversal.setPath("vm");
host2VmTraversal.setName("host2VmTraversal");
ObjectSpec oSpec = new ObjectSpec();
oSpec.setObj(morHost);
oSpec.setSkip(Boolean.TRUE);
oSpec.setSelectSet(new SelectionSpec[] { host2VmTraversal });
PropertyFilterSpec pfSpec = new PropertyFilterSpec();
pfSpec.setPropSet(new PropertySpec[] { pSpec });
pfSpec.setObjectSet(new ObjectSpec[] { oSpec });
ObjectContent[] ocs = cb.getServiceConnection3().getService().retrieveProperties(
cb.getServiceConnection3().getServiceContent().getPropertyCollector(),
new PropertyFilterSpec[] { pfSpec });
this.printContent(ocs);
}
public static void main(String[] args) throws Exception {
setupLog4j();
TestVMWare client = new TestVMWare();
@ -809,7 +847,10 @@ public class TestVMWare {
// client.addNic();
// client.addNicToNetwork();
client.createDatacenter();
// client.createDatacenter();
// client.getPropertyWithPath();
client.getHostVMs();
cb.disConnect();
} catch (Exception e) {
e.printStackTrace();

View File

@ -478,7 +478,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory {
Enumeration<Discoverer> en = _discoverers.enumeration();
while (en.hasMoreElements()) {
Discoverer discoverer = en.nextElement();
Map<? extends ServerResource, Map<String, String>> resources = discoverer.find(dcId, podId, clusterId, url, username, password);
Map<? extends ServerResource, Map<String, String>> resources = null;
try {
resources = discoverer.find(dcId, podId, clusterId, url, username, password);
} catch(Exception e) {
s_logger.info("Exception in host discovery process with discoverer: " + discoverer.getName() + ", skip to another discoverer if there is any");
}
if (resources != null) {
for (Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
ServerResource resource = entry.getKey();
@ -1677,7 +1683,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory {
// If this command is from a KVM agent, or from an agent that has a
// null hypervisor type, don't do the CIDR check
if (hypervisorType == null || hypervisorType == Hypervisor.Type.KVM)
if (hypervisorType == null || hypervisorType == Hypervisor.Type.KVM || hypervisorType == Hypervisor.Type.VMware)
doCidrCheck = false;
if (doCidrCheck)

View File

@ -155,47 +155,23 @@ public class UserConcentratedAllocator implements PodAllocator {
}
private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, long capacityNeeded, short capacityType, long[] hostCandidate) {
List<CapacityVO> capacities = null;
if (m_capacityCheckLock.lock(120)) { // 2 minutes
try {
SearchCriteria<CapacityVO> sc = _capacityDao.createSearchCriteria();
sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId);
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
List<CapacityVO> capacities = _capacityDao.search(sc, null);
boolean enoughCapacity = false;
if (capacities != null) {
for (CapacityVO capacity : capacities) {
if(capacityType == CapacityVO.CAPACITY_TYPE_CPU || capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
//
// for CPU/Memory, we now switch to static allocation
//
if ((capacity.getTotalCapacity() -
calcHostAllocatedCpuMemoryCapacity(capacity.getHostOrPoolId(), capacityType)) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
} else {
if ((capacity.getTotalCapacity() - capacity.getUsedCapacity()) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
}
}
}
return enoughCapacity;
capacities = _capacityDao.search(sc, null);
} finally {
m_capacityCheckLock.unlock();
}
} else {
s_logger.error("Unable to acquire synchronization lock for pod allocation");
// we now try to enforce reservation-style allocation, waiting time has been adjusted
// to 2 minutes
return false;
s_logger.error("Unable to acquire synchronization lock for pod allocation");
// we now try to enforce reservation-style allocation, waiting time has been adjusted
// to 2 minutes
return false;
/*
// If we can't lock the table, just return that there is enough capacity and allow instance creation to fail on the agent
@ -204,6 +180,31 @@ public class UserConcentratedAllocator implements PodAllocator {
return true;
*/
}
boolean enoughCapacity = false;
if (capacities != null) {
for (CapacityVO capacity : capacities) {
if(capacityType == CapacityVO.CAPACITY_TYPE_CPU || capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
//
// for CPU/Memory, we now switch to static allocation
//
if ((capacity.getTotalCapacity() -
calcHostAllocatedCpuMemoryCapacity(capacity.getHostOrPoolId(), capacityType)) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
} else {
if ((capacity.getTotalCapacity() - capacity.getUsedCapacity()) >= capacityNeeded) {
hostCandidate[0] = capacity.getHostOrPoolId();
enoughCapacity = true;
break;
}
}
}
}
return enoughCapacity;
}
private boolean skipCalculation(VMInstanceVO vm) {

View File

@ -326,115 +326,122 @@ public class AlertManagerImpl implements AlertManager {
// is stopped we updated the amount allocated, and when VM sync reports a changed state, we update
// the amount allocated. Hopefully it's limited to 3 entry points and will keep the amount allocated
// per host accurate.
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system capacity");
}
try {
// delete the old records
_capacityDao.clearNonStorageCapacities();
// get all hosts..
SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
List<HostVO> hosts = _hostDao.search(sc, null);
// prep the service offerings
List<ServiceOfferingVO> offerings = _offeringsDao.listAll();
Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
for (ServiceOfferingVO offering : offerings) {
offeringsMap.put(offering.getId(), offering);
}
for (HostVO host : hosts) {
if (host.getType() != Host.Type.Routing) {
continue;
}
long cpu = 0;
long usedMemory = 0;
List<DomainRouterVO> domainRouters = _routerDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + domainRouters.size() + " router domains on host " + host.getId());
}
for (DomainRouterVO router : domainRouters) {
usedMemory += router.getRamSize() * 1024L * 1024L;
}
List<ConsoleProxyVO> proxys = _consoleProxyDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + proxys.size() + " console proxy on host " + host.getId());
}
for(ConsoleProxyVO proxy : proxys) {
usedMemory += proxy.getRamSize() * 1024L * 1024L;
}
List<SecondaryStorageVmVO> secStorageVms = _secStorgaeVmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + secStorageVms.size() + " secondary storage VM on host " + host.getId());
}
for(SecondaryStorageVmVO secStorageVm : secStorageVms) {
usedMemory += secStorageVm.getRamSize() * 1024L * 1024L;
}
List<UserVmVO> vms = _userVmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " user VM on host " + host.getId());
}
for (UserVmVO vm : vms) {
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
usedMemory += so.getRamSize() * 1024L * 1024L;
cpu += so.getCpu() * (so.getSpeed() * 0.99);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system capacity");
}
List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();
long totalMemory = host.getTotalMemory();
// get all hosts..
SearchCriteria sc = _hostDao.createSearchCriteria();
sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
List<HostVO> hosts = _hostDao.search(sc, null);
CapacityVO newMemoryCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), usedMemory, totalMemory, CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO newCPUCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), cpu, (long)(host.getCpus()*host.getSpeed()* _cpuOverProvisioningFactor), CapacityVO.CAPACITY_TYPE_CPU);
_capacityDao.persist(newMemoryCapacity);
_capacityDao.persist(newCPUCapacity);
}
// prep the service offerings
List<ServiceOfferingVO> offerings = _offeringsDao.listAll();
Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
for (ServiceOfferingVO offering : offerings) {
offeringsMap.put(offering.getId(), offering);
}
for (HostVO host : hosts) {
if (host.getType() != Host.Type.Routing) {
continue;
}
long cpu = 0;
long usedMemory = 0;
List<DomainRouterVO> domainRouters = _routerDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + domainRouters.size() + " router domains on host " + host.getId());
}
for (DomainRouterVO router : domainRouters) {
usedMemory += router.getRamSize() * 1024L * 1024L;
}
List<ConsoleProxyVO> proxys = _consoleProxyDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + proxys.size() + " console proxy on host " + host.getId());
}
for(ConsoleProxyVO proxy : proxys) {
usedMemory += proxy.getRamSize() * 1024L * 1024L;
}
List<SecondaryStorageVmVO> secStorageVms = _secStorgaeVmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + secStorageVms.size() + " secondary storage VM on host " + host.getId());
}
for(SecondaryStorageVmVO secStorageVm : secStorageVms) {
usedMemory += secStorageVm.getRamSize() * 1024L * 1024L;
}
List<UserVmVO> vms = _userVmDao.listUpByHostId(host.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found " + vms.size() + " user VM on host " + host.getId());
}
for (UserVmVO vm : vms) {
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
usedMemory += so.getRamSize() * 1024L * 1024L;
cpu += so.getCpu() * (so.getSpeed() * 0.99);
}
long totalMemory = host.getTotalMemory();
// Calculate storage pool capacity
List<StoragePoolVO> storagePools = _storagePoolDao.listAllActive();
for (StoragePoolVO pool : storagePools) {
long disk = 0l;
Pair<Long, Long> sizes = _volumeDao.getCountAndTotalByPool(pool.getId());
disk = sizes.second();
int provFactor = 1;
if( pool.getPoolType() == StoragePoolType.NetworkFilesystem ) {
provFactor = _overProvisioningFactor;
}
CapacityVO newStorageCapacity = new CapacityVO(pool.getId(), pool.getDataCenterId(), pool.getPodId(), disk, pool.getCapacityBytes() * provFactor, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
_capacityDao.persist(newStorageCapacity);
CapacityVO newMemoryCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), usedMemory, totalMemory, CapacityVO.CAPACITY_TYPE_MEMORY);
CapacityVO newCPUCapacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), cpu, (long)(host.getCpus()*host.getSpeed()* _cpuOverProvisioningFactor), CapacityVO.CAPACITY_TYPE_CPU);
newCapacities.add(newMemoryCapacity);
newCapacities.add(newCPUCapacity);
}
continue;
}
// Calculate storage pool capacity
List<StoragePoolVO> storagePools = _storagePoolDao.listAllActive();
for (StoragePoolVO pool : storagePools) {
long disk = 0l;
Pair<Long, Long> sizes = _volumeDao.getCountAndTotalByPool(pool.getId());
disk = sizes.second();
int provFactor = 1;
if( pool.getPoolType() == StoragePoolType.NetworkFilesystem ) {
provFactor = _overProvisioningFactor;
}
CapacityVO newStorageCapacity = new CapacityVO(pool.getId(), pool.getDataCenterId(), pool.getPodId(), disk, pool.getCapacityBytes() * provFactor, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
newCapacities.add(newStorageCapacity);
// Calculate new Public IP capacity
List<DataCenterVO> datacenters = _dcDao.listAll();
for (DataCenterVO datacenter : datacenters) {
long dcId = datacenter.getId();
continue;
}
int totalPublicIPs = _publicIPAddressDao.countIPs(dcId, -1, false);
int allocatedPublicIPs = _publicIPAddressDao.countIPs(dcId, -1, true);
// Calculate new Public IP capacity
List<DataCenterVO> datacenters = _dcDao.listAll();
for (DataCenterVO datacenter : datacenters) {
long dcId = datacenter.getId();
CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, null, allocatedPublicIPs, totalPublicIPs, CapacityVO.CAPACITY_TYPE_PUBLIC_IP);
_capacityDao.persist(newPublicIPCapacity);
}
int totalPublicIPs = _publicIPAddressDao.countIPs(dcId, -1, false);
int allocatedPublicIPs = _publicIPAddressDao.countIPs(dcId, -1, true);
CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, null, allocatedPublicIPs, totalPublicIPs, CapacityVO.CAPACITY_TYPE_PUBLIC_IP);
newCapacities.add(newPublicIPCapacity);
}
// Calculate new Private IP capacity
List<HostPodVO> pods = _podDao.listAll();
for (HostPodVO pod : pods) {
long podId = pod.getId();
long dcId = pod.getDataCenterId();
int totalPrivateIPs = _privateIPAddressDao.countIPs(podId, dcId, false);
int allocatedPrivateIPs = _privateIPAddressDao.countIPs(podId, dcId, true);
// Calculate new Private IP capacity
List<HostPodVO> pods = _podDao.listAll();
for (HostPodVO pod : pods) {
long podId = pod.getId();
long dcId = pod.getDataCenterId();
int totalPrivateIPs = _privateIPAddressDao.countIPs(podId, dcId, false);
int allocatedPrivateIPs = _privateIPAddressDao.countIPs(podId, dcId, true);
CapacityVO newPrivateIPCapacity = new CapacityVO(null, dcId, podId, allocatedPrivateIPs, totalPrivateIPs, CapacityVO.CAPACITY_TYPE_PRIVATE_IP);
_capacityDao.persist(newPrivateIPCapacity);
}
CapacityVO newPrivateIPCapacity = new CapacityVO(null, dcId, podId, allocatedPrivateIPs, totalPrivateIPs, CapacityVO.CAPACITY_TYPE_PRIVATE_IP);
newCapacities.add(newPrivateIPCapacity);
}
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
try {
// delete the old records
_capacityDao.clearNonStorageCapacities();
for (CapacityVO newCapacity : newCapacities) {
_capacityDao.persist(newCapacity);
}
} finally {
m_capacityCheckLock.unlock();
}

View File

@ -292,8 +292,50 @@ public class StatsCollector {
}
}
_storagePoolStats = storagePoolStats;
// a list to store the new capacity entries that will be committed once everything is calculated
List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
// create new entries
for (Long hostId : storageStats.keySet()) {
StorageStats stats = storageStats.get(hostId);
HostVO host = _hostDao.findById(hostId);
host.setTotalSize(stats.getCapacityBytes());
_hostDao.update(host.getId(), host);
if (Host.Type.SecondaryStorage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
} else if (Host.Type.Storage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
}
}
for (Long poolId : storagePoolStats.keySet()) {
StorageStats stats = storagePoolStats.get(poolId);
StoragePoolVO pool = _storagePoolDao.findById(poolId);
if (pool == null) {
continue;
}
pool.setCapacityBytes(stats.getCapacityBytes());
long available = stats.getCapacityBytes() - stats.getByteUsed();
if( available < 0 ) {
available = 0;
}
pool.setAvailableBytes(available);
_storagePoolDao.update(pool.getId(), pool);
CapacityVO capacity = new CapacityVO(poolId, pool.getDataCenterId(), pool.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
}
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system storage capacity");
}
@ -304,54 +346,22 @@ public class StatsCollector {
// to collect the stats from an agent and update the database as needed. The
// listener model has connects/disconnects to keep things in sync much better
// than this model right now
_capacityDao.clearStorageCapacities();
// create new entries
for (Long hostId : storageStats.keySet()) {
StorageStats stats = storageStats.get(hostId);
HostVO host = _hostDao.findById(hostId);
host.setTotalSize(stats.getCapacityBytes());
_hostDao.update(host.getId(), host);
if (Host.Type.SecondaryStorage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE);
_capacityDao.persist(capacity);
} else if (Host.Type.Storage.equals(host.getType())) {
CapacityVO capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
}
}
for (Long poolId : storagePoolStats.keySet()) {
StorageStats stats = storagePoolStats.get(poolId);
StoragePoolVO pool = _storagePoolDao.findById(poolId);
if (pool == null) {
continue;
}
pool.setCapacityBytes(stats.getCapacityBytes());
long available = stats.getCapacityBytes() - stats.getByteUsed();
if( available < 0 ) {
available = 0;
}
pool.setAvailableBytes(available);
_storagePoolDao.update(pool.getId(), pool);
CapacityVO capacity = new CapacityVO(poolId, pool.getDataCenterId(), pool.getPodId(), stats.getByteUsed(), stats.getCapacityBytes(), CapacityVO.CAPACITY_TYPE_STORAGE);
_capacityDao.persist(capacity);
}
} finally {
m_capacityCheckLock.unlock();
_capacityDao.clearStorageCapacities();
for (CapacityVO newCapacity : newCapacities) {
_capacityDao.persist(newCapacity);
}
} finally {
m_capacityCheckLock.unlock();
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("done recalculating system storage capacity");
}
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("not recalculating system storage capacity, unable to lock capacity table");
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("done recalculating system storage capacity");
}
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("not recalculating system storage capacity, unable to lock capacity table");
}
}
} catch (Throwable t) {
s_logger.error("Error trying to retrieve storage stats", t);
}

View File

@ -72,3 +72,6 @@ INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (58,
INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (59, 7, 'Other install media', 'Ubuntu');
INSERT INTO `cloud`.`guest_os` (id, category_id, name, display_name) VALUES (60, 7, 'Other install media', 'Other');
-- temporarily added for vmware, will be moved when vmware support is fully in-place
INSERT INTO `cloud`.`host_master`(`type`, `service_address`, `admin`, `password`) VALUES('VSphere', 'vsphere-1.lab.vmops.com', 'Administrator', 'Suite219');