Merge remote-tracking branch 'apache/4.15'

This commit is contained in:
Abhishek Kumar 2021-04-12 11:43:57 +05:30
commit cce736709e
19 changed files with 333 additions and 192 deletions

View File

@ -59,6 +59,7 @@ public interface VMSnapshot extends ControlledEntity, Identity, InternalIdentity
s_fsm.addTransition(Error, Event.ExpungeRequested, Expunging);
s_fsm.addTransition(Expunging, Event.ExpungeRequested, Expunging);
s_fsm.addTransition(Expunging, Event.OperationSucceeded, Removed);
s_fsm.addTransition(Expunging, Event.OperationFailed, Error);
}
}

View File

@ -85,6 +85,11 @@ public class ListUsageRecordsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.OLD_FORMAT, type = CommandType.BOOLEAN, description = "Flag to enable description rendered in old format which uses internal database IDs instead of UUIDs. False by default.")
private Boolean oldFormat;
@Parameter(name = ApiConstants.IS_RECURSIVE, type = CommandType.BOOLEAN,
description = "Specify if usage records should be fetched recursively per domain. If an account id is passed, records will be limited to that account.",
since = "4.15")
private Boolean recursive = false;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -153,6 +158,10 @@ public class ListUsageRecordsCmd extends BaseListCmd {
return oldFormat != null && oldFormat;
}
public Boolean isRecursive() {
return recursive;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -55,9 +55,10 @@ public class MigrateVolumeCommand extends Command {
this.setWait(timeout);
}
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) {
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String hostGuidInTargetCluster) {
this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1);
this.sourcePool = new StorageFilerTO(sourcePool);
this.hostGuidInTargetCluster = hostGuidInTargetCluster;
}
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
@ -69,11 +70,6 @@ public class MigrateVolumeCommand extends Command {
setWait(timeout);
}
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String targetClusterHost) {
this(volumeId, volumePath, sourcePool, targetPool);
this.hostGuidInTargetCluster = targetClusterHost;
}
@Override
public boolean executeInSequence() {
return true;
@ -107,6 +103,10 @@ public class MigrateVolumeCommand extends Command {
return volumeType;
}
public String getHostGuidInTargetCluster() {
return hostGuidInTargetCluster;
}
public DataTO getSrcData() {
return srcData;
}
@ -131,10 +131,6 @@ public class MigrateVolumeCommand extends Command {
return destDetails;
}
public String getHostGuidInTargetCluster() {
return hostGuidInTargetCluster;
}
public int getWaitInMillSeconds() {
return getWait() * 1000;
}

View File

@ -43,11 +43,6 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.api.query.dao.DomainRouterJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
import com.cloud.api.query.vo.DomainRouterJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
@ -131,6 +126,10 @@ import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.manager.Commands;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.alert.AlertManager;
import com.cloud.api.query.dao.DomainRouterJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
import com.cloud.api.query.vo.DomainRouterJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.ClusterDetailsDao;
@ -148,6 +147,7 @@ import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.deploy.DeploymentPlanningManager;
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.event.UsageEventVO;
@ -2346,7 +2346,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
String msg = String.format("Resetting lastHost for VM %s(%s)", vm.getInstanceName(), vm.getUuid());
s_logger.debug(msg);
}
vm.setLastHostId(null);
vm.setPodIdToDeployIn(rootVolumePool.getPodId());
// OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod)
@ -2409,7 +2408,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (hostId == null) {
List<VolumeVO> volumes = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT);
if (CollectionUtils.isNotEmpty(volumes)) {
VolumeVO rootVolume = volumes.get(0);
for (VolumeVO rootVolume : volumes) {
if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) {
@ -2417,6 +2416,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(pool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) {
hostId = hosts.get(0).getId();
break;
}
}
}
}
@ -2527,7 +2528,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return result;
}
private void postStorageMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException {
StoragePool rootVolumePool = null;
if (MapUtils.isNotEmpty(volumeToPool)) {
@ -2549,7 +2549,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
// If VM was cold migrated between clusters belonging to two different VMware DCs,
// unregister the VM from the source host and cleanup the associated VM files.
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
afterStorageMigrationVmwareVMcleanup(rootVolumePool, vm, srcHost, srcClusterId);
afterStorageMigrationVmwareVMCleanup(rootVolumePool, vm, srcHost, srcClusterId);
}
}
@ -2567,7 +2567,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
}
private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
// OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command
final Long destClusterId = destPool.getClusterId();
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId) && srcHost != null) {

View File

@ -230,11 +230,10 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
} else {
String errMsg = (answer == null) ? null : answer.getDetails();
s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
processAnswer(vmSnapshotVO, userVm, answer, hostId);
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
}
} catch (OperationTimedoutException e) {
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage());
} catch (AgentUnavailableException e) {
} catch (OperationTimedoutException | AgentUnavailableException e) {
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage());
}
}
@ -254,9 +253,13 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
finalizeRevert(vmSnapshot, answer.getVolumeTOs());
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
} else if (as instanceof DeleteVMSnapshotAnswer) {
if (as.getResult()) {
DeleteVMSnapshotAnswer answer = (DeleteVMSnapshotAnswer) as;
finalizeDelete(vmSnapshot, answer.getVolumeTOs());
vmSnapshotDao.remove(vmSnapshot.getId());
} else {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
}
}
}
});

View File

@ -210,7 +210,24 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId()));
}
Long getClusterId(long vmId) {
private Long getClusterIdFromVmVolume(long vmId) {
Long clusterId = null;
List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
if (CollectionUtils.isNotEmpty(volumes)) {
for (VolumeVO rootVolume : volumes) {
if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) {
clusterId = pool.getClusterId();
break;
}
}
}
}
return clusterId;
}
private Long getClusterId(long vmId) {
Long clusterId = null;
Long hostId = null;
VMInstanceVO vm = _vmDao.findById(vmId);
@ -228,16 +245,7 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
if (host != null) {
clusterId = host.getClusterId();
} else {
List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
if (CollectionUtils.isNotEmpty(volumes)) {
VolumeVO rootVolume = volumes.get(0);
if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) {
clusterId = pool.getClusterId();
}
}
}
clusterId = getClusterIdFromVmVolume(vmId);
}
return clusterId;
@ -1078,14 +1086,37 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
return null;
}
@Override public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) {
private boolean isInterClusterMigration(Long srcClusterId, Long destClusterId) {
return srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId);
}
private String getHostGuidInTargetCluster(boolean isInterClusterMigration, Long destClusterId) {
String hostGuidInTargetCluster = null;
if (isInterClusterMigration) {
Host hostInTargetCluster = null;
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
// As this is offline migration VM won't be started on this host
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId);
if (CollectionUtils.isNotEmpty(hosts)) {
hostInTargetCluster = hosts.get(0);
}
if (hostInTargetCluster == null) {
throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages");
}
hostGuidInTargetCluster = hostInTargetCluster.getGuid();
}
return hostGuidInTargetCluster;
}
@Override
public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) {
List<Command> commands = new ArrayList<Command>();
// OfflineVmwareMigration: specialised migration command
List<VolumeTO> vols = new ArrayList<>();
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
Long poolClusterId = null;
Host hostInTargetCluster = null;
for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) {
Volume volume = entry.getKey();
StoragePool pool = entry.getValue();
@ -1099,21 +1130,9 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
}
final Long destClusterId = poolClusterId;
final Long srcClusterId = getClusterId(vm.getId());
final boolean isInterClusterMigration = srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId);
if (isInterClusterMigration) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
// As this is offline migration VM won't be started on this host
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId);
if (CollectionUtils.isNotEmpty(hosts)) {
hostInTargetCluster = hosts.get(0);
}
if (hostInTargetCluster == null) {
throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages");
}
}
final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId);
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(),
volumeToFilerTo, hostInTargetCluster == null ? null : hostInTargetCluster.getGuid(), true);
volumeToFilerTo, getHostGuidInTargetCluster(isInterClusterMigration, destClusterId), true);
commands.add(migrateVmToPoolCommand);
// OfflineVmwareMigration: cleanup if needed

View File

@ -48,8 +48,6 @@ import java.util.stream.Collectors;
import javax.naming.ConfigurationException;
import javax.xml.datatype.XMLGregorianCalendar;
import com.cloud.agent.api.SetupPersistentNetworkAnswer;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
@ -154,6 +152,8 @@ import com.cloud.agent.api.ScaleVmCommand;
import com.cloud.agent.api.SetupAnswer;
import com.cloud.agent.api.SetupCommand;
import com.cloud.agent.api.SetupGuestNetworkCommand;
import com.cloud.agent.api.SetupPersistentNetworkAnswer;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
@ -4511,7 +4511,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
volumeDeviceKey.put(diskId, volumeId);
}
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) {
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool,
VmwareHypervisorHost hyperHost) {
ManagedObjectReference morDs;
try {
if (s_logger.isDebugEnabled()) {
@ -4632,9 +4633,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
// we need to spawn a worker VM to attach the volume to and move it
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid());
sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(),
VmwareHypervisorHost hyperHostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(),
cmd.getHostGuidInTargetCluster());
VmwareHypervisorHost dsHost = hostInTargetCluster == null ? hyperHost : hostInTargetCluster;
VmwareHypervisorHost dsHost = hyperHostInTargetCluster == null ? hyperHost : hyperHostInTargetCluster;
String targetDsName = cmd.getTargetPool().getUuid();
morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName);
if(morDestinationDS == null) {
@ -4649,14 +4650,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
isvVolsInvolved = true;
vmName = getWorkerName(getServiceContext(), cmd, 0, destinationDsMo);
}
String hardwareVersion = null;
if (hostInTargetCluster != null) {
Integer sourceHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(hyperHost);
Integer destinationHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(dsHost);
if (sourceHardwareVersion != null && destinationHardwareVersion != null && !sourceHardwareVersion.equals(destinationHardwareVersion)) {
hardwareVersion = String.valueOf(Math.min(sourceHardwareVersion, destinationHardwareVersion));
}
}
// OfflineVmwareMigration: refactor for re-use
// OfflineVmwareMigration: 1. find data(store)
@ -4665,7 +4658,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.info("Create worker VM " + vmName);
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, hardwareVersion);
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName,
HypervisorHostHelper.getMinimumHostHardwareVersion(hyperHost, hyperHostInTargetCluster));
if (vmMo == null) {
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
throw new CloudRuntimeException("Unable to create a worker VM for volume operation");

View File

@ -35,6 +35,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
@ -86,20 +87,19 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same pod or one of srcData & destData is in a zone-wide pool and both are volumes
if (isOnVmware(srcData, destData)
&& isOnPrimary(srcData, destData)
&& isVolumesOnly(srcData, destData)
&& isIntraPodOrZoneWideStoreInvolved(srcData, destData)
&& isDettached(srcData)) {
if (s_logger.isDebugEnabled()) {
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)"
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the pod"
, this.getClass()
, srcData.getId()
, srcData.getUuid()
, destData.getId()
, destData.getUuid()
, storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId()
, storagePoolDao.findById(destData.getDataStore().getId()).getClusterId());
, destData.getUuid());
s_logger.debug(msg);
}
return StrategyPriority.HYPERVISOR;
@ -107,6 +107,17 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
return StrategyPriority.CANT_HANDLE;
}
private boolean isIntraPodOrZoneWideStoreInvolved(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
StoragePoolVO srcPool = storagePoolDao.findById(srcStore.getId());
DataStore destStore = destData.getDataStore();
StoragePoolVO destPool = storagePoolDao.findById(destStore.getId());
if (srcPool.getPodId() != null && destPool.getPodId() != null) {
return srcPool.getPodId().equals(destPool.getPodId());
}
return (ScopeType.ZONE.equals(srcPool.getScope()) || ScopeType.ZONE.equals(destPool.getScope()));
}
private boolean isDettached(DataObject srcData) {
VolumeVO volume = volDao.findById(srcData.getId());
return volume.getInstanceId() == null;
@ -127,30 +138,37 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
&& HypervisorType.VMware.equals(destData.getTO().getHypervisorType());
}
private boolean isIntraCluster(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
StoragePool srcPool = storagePoolDao.findById(srcStore.getId());
DataStore destStore = destData.getDataStore();
StoragePool destPool = storagePoolDao.findById(destStore.getId());
if (srcPool.getClusterId() != null && destPool.getClusterId() != null) {
return srcPool.getClusterId().equals(destPool.getClusterId());
private Pair<Long, String> getHostIdForVmAndHostGuidInTargetCluster(DataObject srcData, DataObject destData) {
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType();
StoragePool targetPool = (StoragePool) destData.getDataStore();
ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType();
Long hostId = null;
String hostGuidInTargetCluster = null;
if (ScopeType.CLUSTER.equals(sourceScopeType)) {
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName());
}
return false;
if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) {
hostGuidInTargetCluster = hosts.get(0).getGuid();
}
/**
* Ensure that the scope of source and destination storage pools match
*
* @param srcData
* @param destData
* @return
*/
private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
DataStore destStore = destData.getDataStore();
String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString());
s_logger.debug(msg);
return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType());
if (hostGuidInTargetCluster == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages");
}
}
} else if (ScopeType.CLUSTER.equals(targetScopeType)) {
hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName());
}
}
return new Pair<>(hostId, hostGuidInTargetCluster);
}
@Override
@ -187,41 +205,15 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
// OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call
throw new UnsupportedOperationException();
}
Pair<Long, String> hostIdForVmAndHostGuidInTargetCluster = getHostIdForVmAndHostGuidInTargetCluster(srcData, destData);
Long hostId = hostIdForVmAndHostGuidInTargetCluster.first();
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType();
StoragePool targetPool = (StoragePool) destData.getDataStore();
ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType();
Long hostId = null;
String hostGuidInTargetCluster = null;
if (ScopeType.CLUSTER.equals(sourceScopeType)) {
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName());
}
if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) {
hostGuidInTargetCluster = hosts.get(0).getGuid();
}
if (hostGuidInTargetCluster == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages");
}
}
} else if (ScopeType.CLUSTER.equals(targetScopeType)) {
hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName());
}
}
MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId()
, srcData.getTO().getPath()
, sourcePool
, targetPool
, hostGuidInTargetCluster);
// OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding
, hostIdForVmAndHostGuidInTargetCluster.second());
Answer answer;
if (hostId != null) {
answer = agentMgr.easySend(hostId, cmd);

View File

@ -21,10 +21,14 @@ import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.naming.NamingException;
import javax.naming.ldap.LdapContext;
import java.util.Map;
import java.util.UUID;
import com.cloud.user.AccountManager;
import com.cloud.utils.component.ComponentLifecycleBase;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.LdapValidator;
import org.apache.cloudstack.api.command.LDAPConfigCmd;
@ -42,6 +46,8 @@ import org.apache.cloudstack.api.response.LdapConfigurationResponse;
import org.apache.cloudstack.api.response.LdapUserResponse;
import org.apache.cloudstack.api.response.LinkAccountToLdapResponse;
import org.apache.cloudstack.api.response.LinkDomainToLdapResponse;
import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.framework.messagebus.MessageSubscriber;
import org.apache.cloudstack.ldap.dao.LdapConfigurationDao;
import org.apache.cloudstack.ldap.dao.LdapTrustMapDao;
import org.apache.commons.lang.Validate;
@ -57,7 +63,7 @@ import com.cloud.user.dao.AccountDao;
import com.cloud.utils.Pair;
@Component
public class LdapManagerImpl implements LdapManager, LdapValidator {
public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManager, LdapValidator {
private static final Logger LOGGER = Logger.getLogger(LdapManagerImpl.class.getName());
@Inject
@ -80,6 +86,9 @@ public class LdapManagerImpl implements LdapManager, LdapValidator {
@Inject
LdapTrustMapDao _ldapTrustMapDao;
@Inject
private MessageBus messageBus;
public LdapManagerImpl() {
super();
}
@ -93,6 +102,33 @@ public class LdapManagerImpl implements LdapManager, LdapValidator {
_ldapConfiguration = ldapConfiguration;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
LOGGER.debug("Configuring LDAP Manager");
messageBus.subscribe(AccountManager.MESSAGE_REMOVE_ACCOUNT_EVENT, new MessageSubscriber() {
@Override
public void onPublishMessage(String senderAddress, String subject, Object args) {
try {
final Account account = accountDao.findByIdIncludingRemoved((Long) args);
long domainId = account.getDomainId();
LdapTrustMapVO ldapTrustMapVO = _ldapTrustMapDao.findByAccount(domainId, account.getAccountId());
if (ldapTrustMapVO != null) {
String msg = String.format("Removing link between LDAP: %s - type: %s and account: %s on domain: %s",
ldapTrustMapVO.getName(), ldapTrustMapVO.getType().name(), account.getAccountId(), domainId);
LOGGER.debug(msg);
_ldapTrustMapDao.remove(ldapTrustMapVO.getId());
}
} catch (final Exception e) {
LOGGER.error("Caught exception while removing account linked to LDAP", e);
}
}
});
return true;
}
@Override
public LdapConfigurationResponse addConfiguration(final LdapAddConfigurationCmd cmd) throws InvalidParameterValueException {
return addConfigurationInternal(cmd.getHostname(),cmd.getPort(),cmd.getDomainId());

View File

@ -185,6 +185,7 @@ import com.cloud.vm.VmWorkResizeVolume;
import com.cloud.vm.VmWorkSerializer;
import com.cloud.vm.VmWorkTakeVolumeSnapshot;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
@ -228,6 +229,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject
private UserVmDao _userVmDao;
@Inject
private UserVmDetailsDao userVmDetailsDao;
@Inject
private UserVmService _userVmService;
@Inject
private VolumeDataStoreDao _volumeStoreDao;
@ -952,9 +955,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer
&& hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any
&& hypervisorType != HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support volume resize");
}
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {

View File

@ -26,6 +26,7 @@ import java.util.TimeZone;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.domain.Domain;
import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd;
@ -200,22 +201,41 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
}
}
boolean isAdmin = false;
boolean isDomainAdmin = false;
boolean ignoreAccountId = false;
boolean isDomainAdmin = _accountService.isDomainAdmin(caller.getId());
boolean isNormalUser = _accountService.isNormalUser(caller.getId());
//If accountId couldn't be found using accountName and domainId, get it from userContext
if (accountId == null) {
accountId = caller.getId();
//List records for all the accounts if the caller account is of type admin.
//If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin
if (_accountService.isRootAdmin(caller.getId())) {
isAdmin = true;
} else if (_accountService.isDomainAdmin(caller.getId())) {
isDomainAdmin = true;
}
ignoreAccountId = _accountService.isRootAdmin(caller.getId());
s_logger.debug("Account details not available. Using userContext accountId: " + accountId);
}
// Check if a domain admin is allowed to access the requested domain id
if (isDomainAdmin) {
if (domainId != null) {
Account callerAccount = _accountService.getAccount(caller.getId());
Domain domain = _domainDao.findById(domainId);
_accountService.checkAccess(callerAccount, domain);
} else {
// Domain admins can only access their own domain's usage records.
// Set the domain if not specified.
domainId = caller.getDomainId();
}
if (cmd.getAccountId() != null) {
// Check if a domain admin is allowed to access the requested account info.
checkDomainAdminAccountAccess(accountId, domainId);
}
}
// By default users do not have access to this API.
// Adding checks here in case someone changes the default access.
checkUserAccess(cmd, accountId, caller, isNormalUser);
Date startDate = cmd.getStartDate();
Date endDate = cmd.getEndDate();
if (startDate.after(endDate)) {
@ -234,23 +254,28 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
SearchCriteria<UsageVO> sc = _usageDao.createSearchCriteria();
if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !isAdmin && !isDomainAdmin) {
if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !ignoreAccountId) {
// account exists and either domain on user role
// If not recursive and the account belongs to the user/domain admin, or the account was passed in, filter
if ((accountId == caller.getId() && !cmd.isRecursive()) || cmd.getAccountId() != null){
sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId);
}
if (isDomainAdmin) {
SearchCriteria<DomainVO> sdc = _domainDao.createSearchCriteria();
sdc.addOr("path", SearchCriteria.Op.LIKE, _domainDao.findById(caller.getDomainId()).getPath() + "%");
List<DomainVO> domains = _domainDao.search(sdc, null);
List<Long> domainIds = new ArrayList<Long>();
for (DomainVO domain : domains)
domainIds.add(domain.getId());
sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray());
}
if (domainId != null) {
if (cmd.isRecursive()) {
SearchCriteria<DomainVO> sdc = _domainDao.createSearchCriteria();
sdc.addOr("path", SearchCriteria.Op.LIKE, _domainDao.findById(domainId).getPath() + "%");
List<DomainVO> domains = _domainDao.search(sdc, null);
List<Long> domainIds = new ArrayList<Long>();
for (DomainVO domain : domains) {
domainIds.add(domain.getId());
}
sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray());
} else {
sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId);
}
}
if (usageType != null) {
sc.addAnd("usageType", SearchCriteria.Op.EQ, usageType);
@ -372,6 +397,46 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
return new Pair<List<? extends Usage>, Integer>(usageRecords.first(), usageRecords.second());
}
private void checkUserAccess(ListUsageRecordsCmd cmd, Long accountId, Account caller, boolean isNormalUser) {
if (isNormalUser) {
// A user can only access their own account records
if (caller.getId() != accountId) {
throw new PermissionDeniedException("Users are only allowed to list usage records for their own account.");
}
// Users cannot get recursive records
if (cmd.isRecursive()) {
throw new PermissionDeniedException("Users are not allowed to list usage records recursively.");
}
// Users cannot get domain records
if (cmd.getDomainId() != null) {
throw new PermissionDeniedException("Users are not allowed to list usage records for a domain");
}
}
}
private void checkDomainAdminAccountAccess(Long accountId, Long domainId) {
Account account = _accountService.getAccount(accountId);
boolean matchFound = false;
if (account.getDomainId() == domainId) {
matchFound = true;
} else {
// Check if the account is in a child domain of this domain admin.
List<DomainVO> childDomains = _domainDao.findAllChildren(_domainDao.findById(domainId).getPath(), domainId);
for (DomainVO domainVO : childDomains) {
if (account.getDomainId() == domainVO.getId()) {
matchFound = true;
break;
}
}
}
if (!matchFound) {
throw new PermissionDeniedException("Domain admins may only retrieve usage records for accounts in their own domain and child domains.");
}
}
@Override
public TimeZone getUsageTimezone() {
return _usageTimezone;

View File

@ -7084,6 +7084,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
newVols.add(newVol);
if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.ROOT_DISK_SIZE) == null && !newVol.getSize().equals(template.getSize())) {
VolumeVO resizedVolume = (VolumeVO) newVol;
resizedVolume.setSize(template.getSize());
_volsDao.update(resizedVolume.getId(), resizedVolume);
}
// 1. Save usage event and update resource count for user vm volumes
_resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.volume, newVol.isDisplay());
_resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.primary_storage, newVol.isDisplay(), new Long(newVol.getSize()));

View File

@ -37,7 +37,6 @@ COMMIT
-A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i eth1 -p tcp -m tcp -m state --state NEW,ESTABLISHED --dport 3922 -j ACCEPT
-A INPUT -i eth0 -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT
-A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eth0 -o eth0 -m state --state NEW -j ACCEPT

View File

@ -418,6 +418,8 @@ class CsIP:
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 443 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
@ -467,9 +469,10 @@ class CsIP:
["filter", "", "-A INPUT -i %s -p udp -m udp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 443 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(["mangle", "",

View File

@ -59,16 +59,6 @@ class CsApache(CsApp):
file.commit()
CsHelper.execute2("systemctl restart apache2", False)
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT" % (self.dev, self.ip)
])
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 443 -j ACCEPT" % (self.dev, self.ip)
])
class CsPasswdSvc():
"""

View File

@ -1443,6 +1443,11 @@ class TestKVMLiveMigration(cloudstackTestCase):
if len(self.hosts) < 2:
self.skipTest("Requires at least two hosts for performing migration related tests")
for host in self.hosts:
if host.details['Host.OS'] in ['CentOS']:
self.skipTest("live migration is not stabily supported on CentOS")
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)

View File

@ -2267,4 +2267,21 @@ public class HypervisorHostHelper {
version = apiVersionHardwareVersionMap.get(hostApiVersion);
return version;
}
/*
Finds minimum host hardware version as String, of two hosts when both of them are not null
and hardware version of both hosts is different.
Return null otherwise
*/
public static String getMinimumHostHardwareVersion(VmwareHypervisorHost host1, VmwareHypervisorHost host2) {
String hardwareVersion = null;
if (host1 != null & host2 != null) {
Integer host1Version = getHostHardwareVersion(host1);
Integer host2Version = getHostHardwareVersion(host2);
if (host1Version != null && host2Version != null && !host1Version.equals(host2Version)) {
hardwareVersion = String.valueOf(Math.min(host1Version, host2Version));
}
}
return hardwareVersion;
}
}

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.hypervisor.vmware.mo;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
@ -39,6 +41,14 @@ import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.hypervisor.vmware.util.VmwareHelper;
import com.cloud.utils.ActionDelegate;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.script.Script;
import com.google.gson.Gson;
import com.vmware.vim25.ArrayOfManagedObjectReference;
import com.vmware.vim25.ChoiceOption;
@ -92,6 +102,7 @@ import com.vmware.vim25.VirtualMachineConfigInfo;
import com.vmware.vim25.VirtualMachineConfigOption;
import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualMachineConfigSummary;
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
import com.vmware.vim25.VirtualMachineFileInfo;
import com.vmware.vim25.VirtualMachineFileLayoutEx;
import com.vmware.vim25.VirtualMachineMessage;
@ -106,18 +117,6 @@ import com.vmware.vim25.VirtualMachineSnapshotInfo;
import com.vmware.vim25.VirtualMachineSnapshotTree;
import com.vmware.vim25.VirtualSCSIController;
import com.vmware.vim25.VirtualSCSISharing;
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.hypervisor.vmware.util.VmwareHelper;
import com.cloud.utils.ActionDelegate;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VirtualMachineMO extends BaseMO {
private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class);
@ -460,9 +459,13 @@ public class VirtualMachineMO extends BaseMO {
return false;
}
public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception {
public boolean changeDatastore(ManagedObjectReference morDataStore, VmwareHypervisorHost targetHost) throws Exception {
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
relocateSpec.setDatastore(morDataStore);
if (targetHost != null) {
relocateSpec.setHost(targetHost.getMor());
relocateSpec.setPool(targetHost.getHyperHostOwnerResourcePool());
}
ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null);

View File

@ -40,6 +40,17 @@ import javax.xml.datatype.XMLGregorianCalendar;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.ExceptionUtil;
import com.vmware.vim25.DistributedVirtualSwitchPortConnection;
import com.vmware.vim25.DynamicProperty;
import com.vmware.vim25.GuestOsDescriptor;
@ -56,7 +67,6 @@ import com.vmware.vim25.VirtualCdromRemotePassthroughBackingInfo;
import com.vmware.vim25.VirtualDevice;
import com.vmware.vim25.VirtualDeviceBackingInfo;
import com.vmware.vim25.VirtualDeviceConnectInfo;
import com.vmware.vim25.VirtualUSBController;
import com.vmware.vim25.VirtualDisk;
import com.vmware.vim25.VirtualDiskFlatVer1BackingInfo;
import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo;
@ -72,21 +82,10 @@ import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo;
import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualMachineSnapshotTree;
import com.vmware.vim25.VirtualPCNet32;
import com.vmware.vim25.VirtualUSBController;
import com.vmware.vim25.VirtualVmxnet2;
import com.vmware.vim25.VirtualVmxnet3;
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.ExceptionUtil;
public class VmwareHelper {
@SuppressWarnings("unused")
private static final Logger s_logger = Logger.getLogger(VmwareHelper.class);