mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
vmware: fix inter-cluster stopped vm and volume migration (#4895)
Fixes #4838 For inter-cluster migration without shared storage, VMware needs a host to be specified. Fix is to specify an appropriate host in the target cluster during a stopped VM migration. Also, find target datastore using the host in the target cluster. Signed-off-by: Abhishek Kumar <abhishek.mrt22@gmail.com>
This commit is contained in:
parent
99a9063cf4
commit
fdefee75ff
@ -18,10 +18,10 @@
|
||||
//
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import com.cloud.agent.api.to.VolumeTO;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import com.cloud.agent.api.to.VolumeTO;
|
||||
|
||||
/**
|
||||
* used to tell the agent to migrate a vm to a different primary storage pool.
|
||||
* It is for now only implemented on Vmware and is supposed to work irrespective of whether the VM is started or not.
|
||||
@ -32,6 +32,7 @@ public class MigrateVmToPoolCommand extends Command {
|
||||
private String vmName;
|
||||
private String destinationPool;
|
||||
private boolean executeInSequence = false;
|
||||
private String hostGuidInTargetCluster;
|
||||
|
||||
protected MigrateVmToPoolCommand() {
|
||||
}
|
||||
@ -41,15 +42,22 @@ public class MigrateVmToPoolCommand extends Command {
|
||||
* @param vmName the name of the VM to migrate
|
||||
* @param volumes used to supply feedback on vmware generated names
|
||||
* @param destinationPool the primary storage pool to migrate the VM to
|
||||
* @param hostGuidInTargetCluster GUID of host in target cluster when migrating across clusters
|
||||
* @param executeInSequence
|
||||
*/
|
||||
public MigrateVmToPoolCommand(String vmName, Collection<VolumeTO> volumes, String destinationPool, boolean executeInSequence) {
|
||||
public MigrateVmToPoolCommand(String vmName, Collection<VolumeTO> volumes, String destinationPool,
|
||||
String hostGuidInTargetCluster, boolean executeInSequence) {
|
||||
this.vmName = vmName;
|
||||
this.volumes = volumes;
|
||||
this.destinationPool = destinationPool;
|
||||
this.hostGuidInTargetCluster = hostGuidInTargetCluster;
|
||||
this.executeInSequence = executeInSequence;
|
||||
}
|
||||
|
||||
public MigrateVmToPoolCommand(String vmName, Collection<VolumeTO> volumes, String destinationPool, boolean executeInSequence) {
|
||||
this(vmName, volumes, destinationPool, null, executeInSequence);
|
||||
}
|
||||
|
||||
public Collection<VolumeTO> getVolumes() {
|
||||
return volumes;
|
||||
}
|
||||
@ -62,6 +70,10 @@ public class MigrateVmToPoolCommand extends Command {
|
||||
return vmName;
|
||||
}
|
||||
|
||||
public String getHostGuidInTargetCluster() {
|
||||
return hostGuidInTargetCluster;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return executeInSequence;
|
||||
|
||||
@ -34,6 +34,7 @@ public class MigrateVolumeCommand extends Command {
|
||||
StorageFilerTO sourcePool;
|
||||
String attachedVmName;
|
||||
Volume.Type volumeType;
|
||||
private String hostGuidInTargetCluster;
|
||||
|
||||
private DataTO srcData;
|
||||
private DataTO destData;
|
||||
@ -54,9 +55,10 @@ public class MigrateVolumeCommand extends Command {
|
||||
this.setWait(timeout);
|
||||
}
|
||||
|
||||
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) {
|
||||
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String hostGuidInTargetCluster) {
|
||||
this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1);
|
||||
this.sourcePool = new StorageFilerTO(sourcePool);
|
||||
this.hostGuidInTargetCluster = hostGuidInTargetCluster;
|
||||
}
|
||||
|
||||
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
|
||||
@ -101,6 +103,10 @@ public class MigrateVolumeCommand extends Command {
|
||||
return volumeType;
|
||||
}
|
||||
|
||||
public String getHostGuidInTargetCluster() {
|
||||
return hostGuidInTargetCluster;
|
||||
}
|
||||
|
||||
public DataTO getSrcData() {
|
||||
return srcData;
|
||||
}
|
||||
|
||||
@ -40,7 +40,6 @@ import java.util.concurrent.TimeUnit;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
|
||||
@ -142,6 +141,7 @@ import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.deploy.DeploymentPlanningManager;
|
||||
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.event.UsageEventVO;
|
||||
@ -2210,7 +2210,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
}
|
||||
|
||||
private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm) {
|
||||
private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm, Long hostId) {
|
||||
if (hostId == null) {
|
||||
return null;
|
||||
}
|
||||
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
|
||||
// OfflineVmwareMigration: in case of vmware call vcenter to do it for us.
|
||||
// OfflineVmwareMigration: should we check the proximity of source and destination
|
||||
@ -2218,15 +2221,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
// OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not
|
||||
List<Command> commandsToSend = hvGuru.finalizeMigrate(vm, destPool);
|
||||
|
||||
Long hostId = vm.getHostId();
|
||||
// OfflineVmwareMigration: probably this is null when vm is stopped
|
||||
if(hostId == null) {
|
||||
hostId = vm.getLastHostId();
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("host id is null, using last host id %d", hostId) );
|
||||
}
|
||||
}
|
||||
|
||||
if(CollectionUtils.isNotEmpty(commandsToSend)) {
|
||||
Commands commandsContainer = new Commands(Command.OnError.Stop);
|
||||
commandsContainer.addCommands(commandsToSend);
|
||||
@ -2241,7 +2235,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
return null;
|
||||
}
|
||||
|
||||
private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException {
|
||||
private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException {
|
||||
boolean isDebugEnabled = s_logger.isDebugEnabled();
|
||||
if(isDebugEnabled) {
|
||||
String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid());
|
||||
@ -2250,18 +2244,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
setDestinationPoolAndReallocateNetwork(destPool, vm);
|
||||
// OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE
|
||||
Long destPodId = destPool.getPodId();
|
||||
Long vmPodId = vm.getPodIdToDeployIn();
|
||||
if (destPodId == null || ! destPodId.equals(vmPodId)) {
|
||||
|
||||
if (destPodId == null || !destPodId.equals(vm.getPodIdToDeployIn())) {
|
||||
if(isDebugEnabled) {
|
||||
String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId);
|
||||
s_logger.debug(msg);
|
||||
}
|
||||
|
||||
vm.setLastHostId(null);
|
||||
vm.setPodIdToDeployIn(destPodId);
|
||||
// OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod)
|
||||
}// else keep last host set for this vm
|
||||
markVolumesInPool(vm,destPool, hypervisorMigrationResults);
|
||||
} else if (srcClusterId != null && destPool.getClusterId() != null && !srcClusterId.equals(destPool.getClusterId())) {
|
||||
if(isDebugEnabled) {
|
||||
String msg = String.format("resetting lasHost for VM %s(%s) as cluster changed", vm.getInstanceName(), vm.getUuid());
|
||||
s_logger.debug(msg);
|
||||
}
|
||||
vm.setLastHostId(null);
|
||||
} // else keep last host set for this vm
|
||||
markVolumesInPool(vm, destPool, hypervisorMigrationResults);
|
||||
// OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0)
|
||||
// OfflineVmwareMigration: iterate over the volumes for data updates
|
||||
}
|
||||
@ -2295,23 +2294,60 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
}
|
||||
|
||||
private Pair<Long, Long> findClusterAndHostIdForVm(VMInstanceVO vm) {
|
||||
Long hostId = vm.getHostId();
|
||||
Long clusterId = null;
|
||||
// OfflineVmwareMigration: probably this is null when vm is stopped
|
||||
if(hostId == null) {
|
||||
hostId = vm.getLastHostId();
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("host id is null, using last host id %d", hostId) );
|
||||
}
|
||||
}
|
||||
if (hostId == null) {
|
||||
List<VolumeVO> volumes = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT);
|
||||
if (CollectionUtils.isNotEmpty(volumes)) {
|
||||
for (VolumeVO rootVolume : volumes) {
|
||||
if (rootVolume.getPoolId() != null) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
|
||||
if (pool != null && pool.getClusterId() != null) {
|
||||
clusterId = pool.getClusterId();
|
||||
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(pool.getClusterId());
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
hostId = hosts.get(0).getId();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (clusterId == null && hostId != null) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
if (host != null) {
|
||||
clusterId = host.getClusterId();
|
||||
}
|
||||
}
|
||||
return new Pair<>(clusterId, hostId);
|
||||
}
|
||||
|
||||
private void migrateThroughHypervisorOrStorage(StoragePool destPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException {
|
||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||
final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
||||
final HostVO srcHost = _hostDao.findById(srchostId);
|
||||
final Long srcClusterId = srcHost.getClusterId();
|
||||
Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm);
|
||||
Pair<Long, Long> vmClusterAndHost = findClusterAndHostIdForVm(vm);
|
||||
final Long sourceClusterId = vmClusterAndHost.first();
|
||||
final Long sourceHostId = vmClusterAndHost.second();
|
||||
Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm, sourceHostId);
|
||||
boolean migrationResult = false;
|
||||
if (hypervisorMigrationResults == null) {
|
||||
// OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it.
|
||||
migrationResult = volumeMgr.storageMigration(profile, destPool);
|
||||
if (migrationResult) {
|
||||
afterStorageMigrationCleanup(destPool, vm, srcHost, srcClusterId);
|
||||
afterStorageMigrationCleanup(destPool, vm, sourceHostId, sourceClusterId);
|
||||
} else {
|
||||
s_logger.debug("Storage migration failed");
|
||||
}
|
||||
} else {
|
||||
afterHypervisorMigrationCleanup(destPool, vm, srcHost, srcClusterId, hypervisorMigrationResults);
|
||||
afterHypervisorMigrationCleanup(destPool, vm, sourceClusterId, hypervisorMigrationResults);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2366,7 +2402,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
|
||||
|
||||
private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException {
|
||||
private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, Long srcHostId, Long srcClusterId) throws InsufficientCapacityException {
|
||||
setDestinationPoolAndReallocateNetwork(destPool, vm);
|
||||
|
||||
//when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool
|
||||
@ -2376,7 +2412,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
// If VM was cold migrated between clusters belonging to two different VMware DCs,
|
||||
// unregister the VM from the source host and cleanup the associated VM files.
|
||||
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
|
||||
afterStorageMigrationVmwareVMcleanup(destPool, vm, srcHost, srcClusterId);
|
||||
afterStorageMigrationVmwareVMCleanup(destPool, vm, srcHostId, srcClusterId);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2394,14 +2430,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
}
|
||||
|
||||
private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
|
||||
private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstanceVO vm, Long srcHostId, Long srcClusterId) {
|
||||
// OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command
|
||||
final Long destClusterId = destPool.getClusterId();
|
||||
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
|
||||
if (srcHostId != null && srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
|
||||
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
|
||||
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
|
||||
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
|
||||
removeStaleVmFromSource(vm, srcHost);
|
||||
removeStaleVmFromSource(vm, _hostDao.findById(srcHostId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.guru;
|
||||
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
@ -149,8 +151,6 @@ import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
|
||||
import com.vmware.vim25.VirtualMachineConfigSummary;
|
||||
import com.vmware.vim25.VirtualMachineRuntimeInfo;
|
||||
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Configurable {
|
||||
private static final Logger s_logger = Logger.getLogger(VMwareGuru.class);
|
||||
|
||||
@ -209,16 +209,43 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId()));
|
||||
}
|
||||
|
||||
long getClusterId(long vmId) {
|
||||
long clusterId;
|
||||
Long hostId;
|
||||
private Long getClusterIdFromVmVolume(long vmId) {
|
||||
Long clusterId = null;
|
||||
List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
|
||||
if (CollectionUtils.isNotEmpty(volumes)) {
|
||||
for (VolumeVO rootVolume : volumes) {
|
||||
if (rootVolume.getPoolId() != null) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
|
||||
if (pool != null && pool.getClusterId() != null) {
|
||||
clusterId = pool.getClusterId();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
hostId = _vmDao.findById(vmId).getHostId();
|
||||
if (hostId == null) {
|
||||
private Long getClusterId(long vmId) {
|
||||
Long clusterId = null;
|
||||
Long hostId = null;
|
||||
VMInstanceVO vm = _vmDao.findById(vmId);
|
||||
if (vm != null) {
|
||||
hostId = _vmDao.findById(vmId).getHostId();
|
||||
}
|
||||
if (vm != null && hostId == null) {
|
||||
// If VM is in stopped state then hostId would be undefined. Hence read last host's Id instead.
|
||||
hostId = _vmDao.findById(vmId).getLastHostId();
|
||||
}
|
||||
clusterId = _hostDao.findById(hostId).getClusterId();
|
||||
HostVO host = null;
|
||||
if (hostId != null) {
|
||||
host = _hostDao.findById(hostId);
|
||||
}
|
||||
if (host != null) {
|
||||
clusterId = host.getClusterId();
|
||||
} else {
|
||||
clusterId = getClusterIdFromVmVolume(vmId);
|
||||
}
|
||||
|
||||
return clusterId;
|
||||
}
|
||||
@ -418,9 +445,11 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
|
||||
@Override public Map<String, String> getClusterSettings(long vmId) {
|
||||
Map<String, String> details = new HashMap<String, String>();
|
||||
long clusterId = getClusterId(vmId);
|
||||
details.put(VmwareReserveCpu.key(), VmwareReserveCpu.valueIn(clusterId).toString());
|
||||
details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString());
|
||||
Long clusterId = getClusterId(vmId);
|
||||
if (clusterId != null) {
|
||||
details.put(VmwareReserveCpu.key(), VmwareReserveCpu.valueIn(clusterId).toString());
|
||||
details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString());
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
@ -1056,6 +1085,29 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean isInterClusterMigration(Long srcClusterId, Long destClusterId) {
|
||||
return srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId);
|
||||
}
|
||||
|
||||
private String getHostGuidInTargetCluster(boolean isInterClusterMigration, Long destClusterId) {
|
||||
String hostGuidInTargetCluster = null;
|
||||
if (isInterClusterMigration) {
|
||||
Host hostInTargetCluster = null;
|
||||
// Without host vMotion might fail between non-shared storages with error similar to,
|
||||
// https://kb.vmware.com/s/article/1003795
|
||||
// As this is offline migration VM won't be started on this host
|
||||
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId);
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
hostInTargetCluster = hosts.get(0);
|
||||
}
|
||||
if (hostInTargetCluster == null) {
|
||||
throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages");
|
||||
}
|
||||
hostGuidInTargetCluster = hostInTargetCluster.getGuid();
|
||||
}
|
||||
return hostGuidInTargetCluster;
|
||||
}
|
||||
|
||||
@Override public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) {
|
||||
List<Command> commands = new ArrayList<Command>();
|
||||
|
||||
@ -1066,14 +1118,16 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
VolumeTO vol = new VolumeTO(volume, destination);
|
||||
vols.add(vol);
|
||||
}
|
||||
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, destination.getUuid(), true);
|
||||
|
||||
final Long destClusterId = destination.getClusterId();
|
||||
final Long srcClusterId = getClusterId(vm.getId());
|
||||
final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId);
|
||||
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(),
|
||||
vols, destination.getUuid(), getHostGuidInTargetCluster(isInterClusterMigration, destClusterId), true);
|
||||
commands.add(migrateVmToPoolCommand);
|
||||
|
||||
// OfflineVmwareMigration: cleanup if needed
|
||||
final Long destClusterId = destination.getClusterId();
|
||||
final Long srcClusterId = getClusterId(vm.getId());
|
||||
|
||||
if (srcClusterId != null && destClusterId != null && !srcClusterId.equals(destClusterId)) {
|
||||
if (isInterClusterMigration) {
|
||||
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
|
||||
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
|
||||
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
|
||||
|
||||
@ -16,6 +16,9 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.vmware.resource;
|
||||
|
||||
import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson;
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
@ -45,11 +48,12 @@ import java.util.stream.Collectors;
|
||||
import javax.naming.ConfigurationException;
|
||||
import javax.xml.datatype.XMLGregorianCalendar;
|
||||
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DeployAsIsInfoTO;
|
||||
import com.cloud.agent.api.ValidateVcenterDetailsCommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.configdrive.ConfigDrive;
|
||||
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo;
|
||||
@ -162,6 +166,7 @@ import com.cloud.agent.api.UnregisterVMCommand;
|
||||
import com.cloud.agent.api.UpgradeSnapshotCommand;
|
||||
import com.cloud.agent.api.ValidateSnapshotAnswer;
|
||||
import com.cloud.agent.api.ValidateSnapshotCommand;
|
||||
import com.cloud.agent.api.ValidateVcenterDetailsCommand;
|
||||
import com.cloud.agent.api.VmDiskStatsEntry;
|
||||
import com.cloud.agent.api.VmStatsEntry;
|
||||
import com.cloud.agent.api.VolumeStatsEntry;
|
||||
@ -178,12 +183,13 @@ import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.DestroyCommand;
|
||||
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.MigrateVolumeCommand;
|
||||
import com.cloud.agent.api.to.deployasis.OVFPropertyTO;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeCommand;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DeployAsIsInfoTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.IpAddressTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
@ -191,6 +197,7 @@ import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.agent.api.to.VolumeTO;
|
||||
import com.cloud.agent.api.to.deployasis.OVFPropertyTO;
|
||||
import com.cloud.agent.resource.virtualnetwork.VRScripts;
|
||||
import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer;
|
||||
import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource;
|
||||
@ -219,8 +226,8 @@ import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
|
||||
import com.cloud.hypervisor.vmware.mo.NetworkDetails;
|
||||
import com.cloud.hypervisor.vmware.mo.PbmProfileManagerMO;
|
||||
import com.cloud.hypervisor.vmware.mo.TaskMO;
|
||||
import com.cloud.hypervisor.vmware.mo.StoragepodMO;
|
||||
import com.cloud.hypervisor.vmware.mo.TaskMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
|
||||
@ -289,7 +296,6 @@ import com.vmware.vim25.HostInternetScsiHba;
|
||||
import com.vmware.vim25.HostPortGroupSpec;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.NasDatastoreInfo;
|
||||
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
|
||||
import com.vmware.vim25.ObjectContent;
|
||||
import com.vmware.vim25.OptionValue;
|
||||
import com.vmware.vim25.PerfCounterInfo;
|
||||
@ -324,6 +330,7 @@ import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo;
|
||||
import com.vmware.vim25.VirtualIDEController;
|
||||
import com.vmware.vim25.VirtualMachineBootOptions;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
|
||||
import com.vmware.vim25.VirtualMachineFileInfo;
|
||||
import com.vmware.vim25.VirtualMachineFileLayoutEx;
|
||||
import com.vmware.vim25.VirtualMachineFileLayoutExFileInfo;
|
||||
@ -343,13 +350,6 @@ import com.vmware.vim25.VmConfigInfo;
|
||||
import com.vmware.vim25.VmConfigSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
|
||||
import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson;
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
|
||||
@ -4391,6 +4391,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
final String vmName = cmd.getVmName();
|
||||
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
VmwareHypervisorHost hyperHostInTargetCluster = null;
|
||||
if (cmd.getHostGuidInTargetCluster() != null) {
|
||||
hyperHostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(),
|
||||
cmd.getHostGuidInTargetCluster());
|
||||
}
|
||||
try {
|
||||
VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost);
|
||||
if (vmMo == null) {
|
||||
@ -4400,7 +4405,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
}
|
||||
|
||||
String poolUuid = cmd.getDestinationPool();
|
||||
return migrateAndAnswer(vmMo, poolUuid, hyperHost, cmd);
|
||||
return migrateAndAnswer(vmMo, poolUuid, hyperHost, hyperHostInTargetCluster, cmd);
|
||||
} catch (Throwable e) { // hopefully only CloudRuntimeException :/
|
||||
if (e instanceof Exception) {
|
||||
return new Answer(cmd, (Exception) e);
|
||||
@ -4412,10 +4417,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
return new Answer(cmd, false, "unknown problem: " + e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception {
|
||||
ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, hyperHost);
|
||||
|
||||
private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid,
|
||||
VmwareHypervisorHost sourceHyperHost, VmwareHypervisorHost targetHyperHost,
|
||||
Command cmd) throws Exception {
|
||||
ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, sourceHyperHost, targetHyperHost);
|
||||
try {
|
||||
// OfflineVmwareMigration: getVolumesFromCommand(cmd);
|
||||
Map<Integer, Long> volumeDeviceKey = getVolumesFromCommand(vmMo, cmd);
|
||||
@ -4424,7 +4429,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
s_logger.trace(String.format("disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId)));
|
||||
}
|
||||
}
|
||||
if (vmMo.changeDatastore(morDs)) {
|
||||
if (vmMo.changeDatastore(morDs, targetHyperHost)) {
|
||||
// OfflineVmwareMigration: create target specification to include in answer
|
||||
// Consolidate VM disks after successful VM migration
|
||||
// In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies.
|
||||
@ -4500,18 +4505,28 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
volumeDeviceKey.put(diskId, volumeId);
|
||||
}
|
||||
|
||||
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) {
|
||||
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool,
|
||||
VmwareHypervisorHost hyperHost,
|
||||
VmwareHypervisorHost targetHyperHost) {
|
||||
ManagedObjectReference morDs;
|
||||
try {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("finding datastore %s", destinationPool));
|
||||
}
|
||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, destinationPool);
|
||||
if (morDs == null && targetHyperHost != null) {
|
||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(targetHyperHost, destinationPool);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = "exception while finding data store " + destinationPool;
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
|
||||
}
|
||||
if (morDs == null) {
|
||||
String msg = String.format("Failed to find datastore for pool UUID: %s", destinationPool);
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
return morDs;
|
||||
}
|
||||
|
||||
@ -4627,7 +4642,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
morDc = srcHyperHost.getHyperHostDatacenter();
|
||||
morDcOfTargetHost = tgtHyperHost.getHyperHostDatacenter();
|
||||
if (!morDc.getValue().equalsIgnoreCase(morDcOfTargetHost.getValue())) {
|
||||
String msg = "Source host & target host are in different datacentesr";
|
||||
String msg = "Source host & target host are in different datacenter";
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
VmwareManager mgr = tgtHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
|
||||
@ -4839,6 +4854,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
String path = cmd.getVolumePath();
|
||||
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
VmwareHypervisorHost hyperHostInTargetCluster = null;
|
||||
if (cmd.getHostGuidInTargetCluster() != null) {
|
||||
hyperHostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), cmd.getHostGuidInTargetCluster());
|
||||
}
|
||||
VmwareHypervisorHost targetDSHost = hyperHostInTargetCluster != null ? hyperHostInTargetCluster : hyperHost;
|
||||
VirtualMachineMO vmMo = null;
|
||||
DatastoreMO dsMo = null;
|
||||
DatastoreMO destinationDsMo = null;
|
||||
@ -4854,8 +4874,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
// we need to spawn a worker VM to attach the volume to and move it
|
||||
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid());
|
||||
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
|
||||
morDestintionDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid());
|
||||
destinationDsMo = new DatastoreMO(hyperHost.getContext(), morDestintionDS);
|
||||
morDestintionDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(targetDSHost, cmd.getTargetPool().getUuid());
|
||||
destinationDsMo = new DatastoreMO(targetDSHost.getContext(), morDestintionDS);
|
||||
|
||||
vmName = getWorkerName(getServiceContext(), cmd, 0, dsMo);
|
||||
if (destinationDsMo.getDatastoreType().equalsIgnoreCase("VVOL")) {
|
||||
@ -4870,7 +4890,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
s_logger.info("Create worker VM " + vmName);
|
||||
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
|
||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName, null);
|
||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName,
|
||||
HypervisorHostHelper.getMinimumHostHardwareVersion(hyperHost, hyperHostInTargetCluster));
|
||||
if (vmMo == null) {
|
||||
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
|
||||
throw new CloudRuntimeException("Unable to create a worker VM for volume operation");
|
||||
@ -4934,7 +4955,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
}
|
||||
|
||||
// OfflineVmwareMigration: this may have to be disected and executed in separate steps
|
||||
answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, cmd);
|
||||
answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, hyperHostInTargetCluster, cmd);
|
||||
} catch (Exception e) {
|
||||
String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage());
|
||||
s_logger.error(msg, e);
|
||||
@ -4943,9 +4964,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
try {
|
||||
// OfflineVmwareMigration: worker *may* have been renamed
|
||||
vmName = vmMo.getVmName();
|
||||
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid());
|
||||
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
|
||||
s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration");
|
||||
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(targetDSHost, cmd.getTargetPool().getUuid());
|
||||
dsMo = new DatastoreMO(targetDSHost.getContext(), morSourceDS);
|
||||
s_logger.info("Detaching disks before destroying worker VM '" + vmName + "' after volume migration");
|
||||
VirtualDisk[] disks = vmMo.getAllDiskDevice();
|
||||
String format = "disk %d was migrated to %s";
|
||||
for (VirtualDisk disk : disks) {
|
||||
|
||||
@ -26,6 +26,21 @@ import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.MigrateWithStorageAnswer;
|
||||
@ -53,18 +68,6 @@ import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||
@ -84,22 +87,19 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
|
||||
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes
|
||||
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same pod or one of srcData & destData is in a zone-wide pool and both are volumes
|
||||
if (isOnVmware(srcData, destData)
|
||||
&& isOnPrimary(srcData, destData)
|
||||
&& isVolumesOnly(srcData, destData)
|
||||
&& isDettached(srcData)
|
||||
&& isIntraCluster(srcData, destData)
|
||||
&& isStoreScopeEqual(srcData, destData)) {
|
||||
&& isIntraPodOrZoneWideStoreInvolved(srcData, destData)
|
||||
&& isDettached(srcData)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)"
|
||||
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the pod"
|
||||
, this.getClass()
|
||||
, srcData.getId()
|
||||
, srcData.getUuid()
|
||||
, destData.getId()
|
||||
, destData.getUuid()
|
||||
, storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId()
|
||||
, storagePoolDao.findById(destData.getDataStore().getId()).getClusterId());
|
||||
, destData.getUuid());
|
||||
s_logger.debug(msg);
|
||||
}
|
||||
return StrategyPriority.HYPERVISOR;
|
||||
@ -107,6 +107,17 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
private boolean isIntraPodOrZoneWideStoreInvolved(DataObject srcData, DataObject destData) {
|
||||
DataStore srcStore = srcData.getDataStore();
|
||||
StoragePoolVO srcPool = storagePoolDao.findById(srcStore.getId());
|
||||
DataStore destStore = destData.getDataStore();
|
||||
StoragePoolVO destPool = storagePoolDao.findById(destStore.getId());
|
||||
if (srcPool.getPodId() != null && destPool.getPodId() != null) {
|
||||
return srcPool.getPodId().equals(destPool.getPodId());
|
||||
}
|
||||
return (ScopeType.ZONE.equals(srcPool.getScope()) || ScopeType.ZONE.equals(destPool.getScope()));
|
||||
}
|
||||
|
||||
private boolean isDettached(DataObject srcData) {
|
||||
VolumeVO volume = volDao.findById(srcData.getId());
|
||||
return volume.getInstanceId() == null;
|
||||
@ -127,30 +138,37 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||
&& HypervisorType.VMware.equals(destData.getTO().getHypervisorType());
|
||||
}
|
||||
|
||||
private boolean isIntraCluster(DataObject srcData, DataObject destData) {
|
||||
DataStore srcStore = srcData.getDataStore();
|
||||
StoragePool srcPool = storagePoolDao.findById(srcStore.getId());
|
||||
DataStore destStore = destData.getDataStore();
|
||||
StoragePool destPool = storagePoolDao.findById(destStore.getId());
|
||||
if (srcPool.getClusterId() != null && destPool.getClusterId() != null) {
|
||||
return srcPool.getClusterId().equals(destPool.getClusterId());
|
||||
private Pair<Long, String> getHostIdForVmAndHostGuidInTargetCluster(DataObject srcData, DataObject destData) {
|
||||
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
|
||||
ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType();
|
||||
StoragePool targetPool = (StoragePool) destData.getDataStore();
|
||||
ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType();
|
||||
Long hostId = null;
|
||||
String hostGuidInTargetCluster = null;
|
||||
if (ScopeType.CLUSTER.equals(sourceScopeType)) {
|
||||
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
|
||||
hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
|
||||
if (hostId == null) {
|
||||
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName());
|
||||
}
|
||||
if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) {
|
||||
// Without host vMotion might fail between non-shared storages with error similar to,
|
||||
// https://kb.vmware.com/s/article/1003795
|
||||
List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId());
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
hostGuidInTargetCluster = hosts.get(0).getGuid();
|
||||
}
|
||||
if (hostGuidInTargetCluster == null) {
|
||||
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages");
|
||||
}
|
||||
}
|
||||
} else if (ScopeType.CLUSTER.equals(targetScopeType)) {
|
||||
hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId());
|
||||
if (hostId == null) {
|
||||
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName());
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the scope of source and destination storage pools match
|
||||
*
|
||||
* @param srcData
|
||||
* @param destData
|
||||
* @return
|
||||
*/
|
||||
private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) {
|
||||
DataStore srcStore = srcData.getDataStore();
|
||||
DataStore destStore = destData.getDataStore();
|
||||
String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString());
|
||||
s_logger.debug(msg);
|
||||
return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType());
|
||||
return new Pair<>(hostId, hostGuidInTargetCluster);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -187,21 +205,19 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
|
||||
// OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
Pair<Long, String> hostIdForVmAndHostGuidInTargetCluster = getHostIdForVmAndHostGuidInTargetCluster(srcData, destData);
|
||||
Long hostId = hostIdForVmAndHostGuidInTargetCluster.first();
|
||||
String hostGuidInTargetCluster = hostIdForVmAndHostGuidInTargetCluster.second();
|
||||
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
|
||||
StoragePool targetPool = (StoragePool) destData.getDataStore();
|
||||
MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId()
|
||||
, srcData.getTO().getPath()
|
||||
, sourcePool
|
||||
, targetPool);
|
||||
, targetPool
|
||||
, hostGuidInTargetCluster);
|
||||
// OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding
|
||||
Answer answer;
|
||||
ScopeType scopeType = srcData.getDataStore().getScope().getScopeType();
|
||||
if (ScopeType.CLUSTER == scopeType) {
|
||||
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
|
||||
Long hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
|
||||
if (hostId == null) {
|
||||
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in cluster: " + sourcePool.getName());
|
||||
}
|
||||
if (hostId != null) {
|
||||
answer = agentMgr.easySend(hostId, cmd);
|
||||
} else {
|
||||
answer = agentMgr.sendTo(sourcePool.getDataCenterId(), HypervisorType.VMware, cmd);
|
||||
|
||||
@ -18,6 +18,7 @@ package com.cloud.hypervisor.vmware.mo;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.net.URI;
|
||||
@ -28,6 +29,7 @@ import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
@ -37,17 +39,6 @@ import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
||||
import com.vmware.vim25.ConcurrentAccessFaultMsg;
|
||||
import com.vmware.vim25.DuplicateNameFaultMsg;
|
||||
import com.vmware.vim25.FileFaultFaultMsg;
|
||||
import com.vmware.vim25.InsufficientResourcesFaultFaultMsg;
|
||||
import com.vmware.vim25.InvalidDatastoreFaultMsg;
|
||||
import com.vmware.vim25.InvalidNameFaultMsg;
|
||||
import com.vmware.vim25.InvalidStateFaultMsg;
|
||||
import com.vmware.vim25.OutOfBoundsFaultMsg;
|
||||
import com.vmware.vim25.RuntimeFaultFaultMsg;
|
||||
import com.vmware.vim25.TaskInProgressFaultMsg;
|
||||
import com.vmware.vim25.VmConfigFaultFaultMsg;
|
||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
@ -80,19 +71,20 @@ import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion;
|
||||
import com.vmware.vim25.OvfCreateDescriptorParams;
|
||||
import com.vmware.vim25.OvfCreateDescriptorResult;
|
||||
import com.vmware.vim25.AlreadyExistsFaultMsg;
|
||||
import com.vmware.vim25.BoolPolicy;
|
||||
import com.vmware.vim25.CustomFieldStringValue;
|
||||
import com.vmware.vim25.ClusterConfigInfoEx;
|
||||
import com.vmware.vim25.DatacenterConfigInfo;
|
||||
import com.vmware.vim25.ConcurrentAccessFaultMsg;
|
||||
import com.vmware.vim25.CustomFieldStringValue;
|
||||
import com.vmware.vim25.DVPortSetting;
|
||||
import com.vmware.vim25.DVPortgroupConfigInfo;
|
||||
import com.vmware.vim25.DVPortgroupConfigSpec;
|
||||
import com.vmware.vim25.DVSSecurityPolicy;
|
||||
import com.vmware.vim25.DVSTrafficShapingPolicy;
|
||||
import com.vmware.vim25.DatacenterConfigInfo;
|
||||
import com.vmware.vim25.DuplicateNameFaultMsg;
|
||||
import com.vmware.vim25.DynamicProperty;
|
||||
import com.vmware.vim25.FileFaultFaultMsg;
|
||||
import com.vmware.vim25.HostNetworkSecurityPolicy;
|
||||
import com.vmware.vim25.HostNetworkTrafficShapingPolicy;
|
||||
import com.vmware.vim25.HostPortGroup;
|
||||
@ -101,6 +93,10 @@ import com.vmware.vim25.HostVirtualSwitch;
|
||||
import com.vmware.vim25.HttpNfcLeaseDeviceUrl;
|
||||
import com.vmware.vim25.HttpNfcLeaseInfo;
|
||||
import com.vmware.vim25.HttpNfcLeaseState;
|
||||
import com.vmware.vim25.InsufficientResourcesFaultFaultMsg;
|
||||
import com.vmware.vim25.InvalidDatastoreFaultMsg;
|
||||
import com.vmware.vim25.InvalidNameFaultMsg;
|
||||
import com.vmware.vim25.InvalidStateFaultMsg;
|
||||
import com.vmware.vim25.LocalizedMethodFault;
|
||||
import com.vmware.vim25.LongPolicy;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
@ -108,11 +104,16 @@ import com.vmware.vim25.MethodFault;
|
||||
import com.vmware.vim25.NumericRange;
|
||||
import com.vmware.vim25.ObjectContent;
|
||||
import com.vmware.vim25.OptionValue;
|
||||
import com.vmware.vim25.OutOfBoundsFaultMsg;
|
||||
import com.vmware.vim25.OvfCreateDescriptorParams;
|
||||
import com.vmware.vim25.OvfCreateDescriptorResult;
|
||||
import com.vmware.vim25.OvfCreateImportSpecParams;
|
||||
import com.vmware.vim25.OvfCreateImportSpecResult;
|
||||
import com.vmware.vim25.OvfFileItem;
|
||||
import com.vmware.vim25.OvfFile;
|
||||
import com.vmware.vim25.OvfFileItem;
|
||||
import com.vmware.vim25.ParaVirtualSCSIController;
|
||||
import com.vmware.vim25.RuntimeFaultFaultMsg;
|
||||
import com.vmware.vim25.TaskInProgressFaultMsg;
|
||||
import com.vmware.vim25.VMwareDVSConfigSpec;
|
||||
import com.vmware.vim25.VMwareDVSPortSetting;
|
||||
import com.vmware.vim25.VMwareDVSPortgroupPolicy;
|
||||
@ -121,25 +122,24 @@ import com.vmware.vim25.VMwareDVSPvlanMapEntry;
|
||||
import com.vmware.vim25.VirtualBusLogicController;
|
||||
import com.vmware.vim25.VirtualController;
|
||||
import com.vmware.vim25.VirtualDevice;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpec;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualIDEController;
|
||||
import com.vmware.vim25.VirtualLsiLogicController;
|
||||
import com.vmware.vim25.VirtualLsiLogicSASController;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualMachineFileInfo;
|
||||
import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
|
||||
import com.vmware.vim25.VirtualMachineImportSpec;
|
||||
import com.vmware.vim25.VirtualMachineVideoCard;
|
||||
import com.vmware.vim25.VirtualSCSIController;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
import com.vmware.vim25.VirtualMachineImportSpec;
|
||||
import com.vmware.vim25.VmConfigFaultFaultMsg;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchTrunkVlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec;
|
||||
import java.io.FileWriter;
|
||||
import java.util.UUID;
|
||||
|
||||
public class HypervisorHostHelper {
|
||||
private static final Logger s_logger = Logger.getLogger(HypervisorHostHelper.class);
|
||||
@ -153,6 +153,48 @@ public class HypervisorHostHelper {
|
||||
public static final String VSPHERE_DATASTORE_BASE_FOLDER = "fcd";
|
||||
public static final String VSPHERE_DATASTORE_HIDDEN_FOLDER = ".hidden";
|
||||
|
||||
protected final static Map<String, Integer> apiVersionHardwareVersionMap;
|
||||
|
||||
static {
|
||||
apiVersionHardwareVersionMap = new HashMap<String, Integer>();
|
||||
apiVersionHardwareVersionMap.put("3.5", 4);
|
||||
apiVersionHardwareVersionMap.put("3.6", 4);
|
||||
apiVersionHardwareVersionMap.put("3.7", 4);
|
||||
apiVersionHardwareVersionMap.put("3.8", 4);
|
||||
apiVersionHardwareVersionMap.put("3.9", 4);
|
||||
apiVersionHardwareVersionMap.put("4.0", 7);
|
||||
apiVersionHardwareVersionMap.put("4.1", 7);
|
||||
apiVersionHardwareVersionMap.put("4.2", 7);
|
||||
apiVersionHardwareVersionMap.put("4.3", 7);
|
||||
apiVersionHardwareVersionMap.put("4.4", 7);
|
||||
apiVersionHardwareVersionMap.put("4.5", 7);
|
||||
apiVersionHardwareVersionMap.put("4.6", 7);
|
||||
apiVersionHardwareVersionMap.put("4.7", 7);
|
||||
apiVersionHardwareVersionMap.put("4.8", 7);
|
||||
apiVersionHardwareVersionMap.put("4.9", 7);
|
||||
apiVersionHardwareVersionMap.put("5.0", 8);
|
||||
apiVersionHardwareVersionMap.put("5.1", 9);
|
||||
apiVersionHardwareVersionMap.put("5.2", 9);
|
||||
apiVersionHardwareVersionMap.put("5.3", 9);
|
||||
apiVersionHardwareVersionMap.put("5.4", 9);
|
||||
apiVersionHardwareVersionMap.put("5.5", 10);
|
||||
apiVersionHardwareVersionMap.put("5.6", 10);
|
||||
apiVersionHardwareVersionMap.put("5.7", 10);
|
||||
apiVersionHardwareVersionMap.put("5.8", 10);
|
||||
apiVersionHardwareVersionMap.put("5.9", 10);
|
||||
apiVersionHardwareVersionMap.put("6.0", 11);
|
||||
apiVersionHardwareVersionMap.put("6.1", 11);
|
||||
apiVersionHardwareVersionMap.put("6.2", 11);
|
||||
apiVersionHardwareVersionMap.put("6.3", 11);
|
||||
apiVersionHardwareVersionMap.put("6.4", 11);
|
||||
apiVersionHardwareVersionMap.put("6.5", 13);
|
||||
apiVersionHardwareVersionMap.put("6.6", 13);
|
||||
apiVersionHardwareVersionMap.put("6.7", 14);
|
||||
apiVersionHardwareVersionMap.put("6.8", 14);
|
||||
apiVersionHardwareVersionMap.put("6.9", 14);
|
||||
apiVersionHardwareVersionMap.put("7.0", 17);
|
||||
}
|
||||
|
||||
public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name, String instanceNameCustomField) {
|
||||
|
||||
if (ocs != null && ocs.length > 0) {
|
||||
@ -2211,4 +2253,36 @@ public class HypervisorHostHelper {
|
||||
dsMo.makeDirectory(hiddenFolderPath, hyperHost.getHyperHostDatacenter());
|
||||
}
|
||||
}
|
||||
|
||||
public static Integer getHostHardwareVersion(VmwareHypervisorHost host) {
|
||||
Integer version = null;
|
||||
HostMO hostMo = new HostMO(host.getContext(), host.getMor());
|
||||
String hostApiVersion = "";
|
||||
try {
|
||||
hostApiVersion = hostMo.getHostAboutInfo().getApiVersion();
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
if (hostApiVersion == null) {
|
||||
hostApiVersion = "";
|
||||
}
|
||||
version = apiVersionHardwareVersionMap.get(hostApiVersion);
|
||||
return version;
|
||||
}
|
||||
|
||||
/*
|
||||
Finds minimum host hardware version as String, of two hosts when both of them are not null
|
||||
and hardware version of both hosts is different.
|
||||
Return null otherwise
|
||||
*/
|
||||
public static String getMinimumHostHardwareVersion(VmwareHypervisorHost host1, VmwareHypervisorHost host2) {
|
||||
String hardwareVersion = null;
|
||||
if (host1 != null & host2 != null) {
|
||||
Integer host1Version = getHostHardwareVersion(host1);
|
||||
Integer host2Version = getHostHardwareVersion(host2);
|
||||
if (host1Version != null && host2Version != null && !host1Version.equals(host2Version)) {
|
||||
hardwareVersion = String.valueOf(Math.min(host1Version, host2Version));
|
||||
}
|
||||
}
|
||||
return hardwareVersion;
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.vmware.mo;
|
||||
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.ByteArrayInputStream;
|
||||
@ -39,6 +41,14 @@ import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
|
||||
import com.cloud.hypervisor.vmware.util.VmwareContext;
|
||||
import com.cloud.hypervisor.vmware.util.VmwareHelper;
|
||||
import com.cloud.utils.ActionDelegate;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.google.gson.Gson;
|
||||
import com.vmware.vim25.ArrayOfManagedObjectReference;
|
||||
import com.vmware.vim25.ChoiceOption;
|
||||
@ -92,6 +102,7 @@ import com.vmware.vim25.VirtualMachineConfigInfo;
|
||||
import com.vmware.vim25.VirtualMachineConfigOption;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualMachineConfigSummary;
|
||||
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
|
||||
import com.vmware.vim25.VirtualMachineFileInfo;
|
||||
import com.vmware.vim25.VirtualMachineFileLayoutEx;
|
||||
import com.vmware.vim25.VirtualMachineMessage;
|
||||
@ -106,18 +117,6 @@ import com.vmware.vim25.VirtualMachineSnapshotInfo;
|
||||
import com.vmware.vim25.VirtualMachineSnapshotTree;
|
||||
import com.vmware.vim25.VirtualSCSIController;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
|
||||
|
||||
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
|
||||
import com.cloud.hypervisor.vmware.util.VmwareContext;
|
||||
import com.cloud.hypervisor.vmware.util.VmwareHelper;
|
||||
import com.cloud.utils.ActionDelegate;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
public class VirtualMachineMO extends BaseMO {
|
||||
private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class);
|
||||
@ -460,9 +459,13 @@ public class VirtualMachineMO extends BaseMO {
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception {
|
||||
public boolean changeDatastore(ManagedObjectReference morDataStore, VmwareHypervisorHost targetHost) throws Exception {
|
||||
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
|
||||
relocateSpec.setDatastore(morDataStore);
|
||||
if (targetHost != null) {
|
||||
relocateSpec.setHost(targetHost.getMor());
|
||||
relocateSpec.setPool(targetHost.getHyperHostOwnerResourcePool());
|
||||
}
|
||||
|
||||
ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null);
|
||||
|
||||
|
||||
@ -40,6 +40,17 @@ import javax.xml.datatype.XMLGregorianCalendar;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
|
||||
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
|
||||
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
|
||||
import com.cloud.hypervisor.vmware.mo.HostMO;
|
||||
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.ExceptionUtil;
|
||||
import com.vmware.vim25.DistributedVirtualSwitchPortConnection;
|
||||
import com.vmware.vim25.DynamicProperty;
|
||||
import com.vmware.vim25.GuestOsDescriptor;
|
||||
@ -56,7 +67,6 @@ import com.vmware.vim25.VirtualCdromRemotePassthroughBackingInfo;
|
||||
import com.vmware.vim25.VirtualDevice;
|
||||
import com.vmware.vim25.VirtualDeviceBackingInfo;
|
||||
import com.vmware.vim25.VirtualDeviceConnectInfo;
|
||||
import com.vmware.vim25.VirtualUSBController;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualDiskFlatVer1BackingInfo;
|
||||
import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo;
|
||||
@ -72,21 +82,10 @@ import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualMachineSnapshotTree;
|
||||
import com.vmware.vim25.VirtualPCNet32;
|
||||
import com.vmware.vim25.VirtualUSBController;
|
||||
import com.vmware.vim25.VirtualVmxnet2;
|
||||
import com.vmware.vim25.VirtualVmxnet3;
|
||||
|
||||
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
|
||||
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HostMO;
|
||||
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
|
||||
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
|
||||
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.ExceptionUtil;
|
||||
|
||||
public class VmwareHelper {
|
||||
@SuppressWarnings("unused")
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareHelper.class);
|
||||
@ -744,4 +743,18 @@ public class VmwareHelper {
|
||||
return DatatypeFactory.newInstance().newXMLGregorianCalendar(gregorianCalendar);
|
||||
}
|
||||
|
||||
public static HostMO getHostMOFromHostName(final VmwareContext context, final String hostName) {
|
||||
HostMO host = null;
|
||||
if (com.cloud.utils.StringUtils.isNotBlank(hostName) && hostName.contains("@")) {
|
||||
String hostMorInfo = hostName.split("@")[0];
|
||||
if (hostMorInfo.contains(":")) {
|
||||
ManagedObjectReference morHost = new ManagedObjectReference();
|
||||
morHost.setType(hostMorInfo.split(":")[0]);
|
||||
morHost.setValue(hostMorInfo.split(":")[1]);
|
||||
host = new HostMO(context, morHost);
|
||||
}
|
||||
}
|
||||
return host;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user