Vmware offline migration (#2848)

* - Offline VM and Volume migration on Vmware hypervisor hosts
- Also add VM disk consolidation call on successful VM migrations

* Fix indentation of marvin test file and reformat against PEP8

* * Fix few comment typos
* Refactor debug messages to use String.format() when debug log level is enabled.

* Send list of commands returned by hypervisor Guru instead of explicitly selecting the first one

* Fix unhandled NPE during VM migration

* Revert back to distinct event descriptions for VM to host or storage pool migration

* Reformat test_primary_storage file against PEP-8 and Remove unused imports

* Revert back the deprecation messages in the custom StringUtils class to favour the use of the ApacheUtils
This commit is contained in:
dahn 2019-01-25 13:05:13 +01:00 committed by Gabriel Beims Bräscher
parent d68712eb7b
commit b363fd49f7
40 changed files with 2119 additions and 758 deletions

View File

@ -19,6 +19,7 @@ package com.cloud.hypervisor;
import java.util.List;
import java.util.Map;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.agent.api.Command;
@ -32,7 +33,7 @@ import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
public interface HypervisorGuru extends Adapter {
static final ConfigKey<Boolean> VmwareFullClone = new ConfigKey<Boolean>("Advanced", Boolean.class, "vmware.create.full.clone", "true",
ConfigKey<Boolean> VmwareFullClone = new ConfigKey<Boolean>("Advanced", Boolean.class, "vmware.create.full.clone", "true",
"If set to true, creates guest VMs as full clones on ESX", false);
HypervisorType getHypervisorType();
@ -84,4 +85,13 @@ public interface HypervisorGuru extends Adapter {
List<Command> finalizeExpungeVolumes(VirtualMachine vm);
Map<String, String> getClusterSettings(long vmId);
/**
* Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware.
*
* @param vm the stopped vm to migrate
* @param destination the primary storage pool to migrate to
* @return a list of commands to perform for a successful migration
*/
List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination);
}

View File

@ -29,11 +29,21 @@ import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.user.Account;
public interface VolumeApiService {
ConfigKey<Long> ConcurrentMigrationsThresholdPerDatastore = new ConfigKey<Long>("Advanced"
, Long.class
, "concurrent.migrations.per.target.datastore"
, "0"
, "Limits number of migrations that can be handled per datastore concurrently; default is 0 - unlimited"
, true // not sure if this is to be dynamic
, ConfigKey.Scope.Global);
/**
* Creates the database object for a volume based on the given criteria
*

View File

@ -27,6 +27,7 @@ public abstract class BaseAsyncCmd extends BaseCmd {
public static final String ipAddressSyncObject = "ipaddress";
public static final String networkSyncObject = "network";
public static final String vpcSyncObject = "vpc";
public static final String migrationSyncObject = "migration";
public static final String snapshotHostSyncObject = "snapshothost";
public static final String gslbSyncObject = "globalserverloadbalancer";
private static final Logger s_logger = Logger.getLogger(BaseAsyncCmd.class.getName());

View File

@ -119,13 +119,15 @@ public class MigrateVMCmd extends BaseAsyncCmd {
@Override
public String getEventDescription() {
String eventDescription;
if (getHostId() != null) {
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId());
eventDescription = String.format("Attempting to migrate VM id: %s to host Id: %s", getVirtualMachineId(), getHostId());
} else if (getStoragePoolId() != null) {
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId());
eventDescription = String.format("Attempting to migrate VM id: %s to storage pool Id: %s", getVirtualMachineId(), getStoragePoolId());
} else {
return "Attempting to migrate VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId());
eventDescription = String.format("Attempting to migrate VM id: %s", getVirtualMachineId());
}
return eventDescription;
}
@Override
@ -152,16 +154,17 @@ public class MigrateVMCmd extends BaseAsyncCmd {
if (destinationHost.getType() != Host.Type.Routing) {
throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one");
}
CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + ((getHostId() != null) ? " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId()) : "" ));
CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId());
}
// OfflineMigration performed when this parameter is specified
StoragePool destStoragePool = null;
if (getStoragePoolId() != null) {
destStoragePool = _storageService.getStoragePool(getStoragePoolId());
if (destStoragePool == null) {
throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM");
}
CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to storage pool Id: " + this._uuidMgr.getUuid(StoragePool.class, getStoragePoolId()));
CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStoragePoolId());
}
try {
@ -172,7 +175,7 @@ public class MigrateVMCmd extends BaseAsyncCmd {
migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool);
}
if (migratedVm != null) {
UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm)migratedVm).get(0);
UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm) migratedVm).get(0);
response.setResponseName(getCommandName());
setResponseObject(response);
} else {
@ -181,15 +184,27 @@ public class MigrateVMCmd extends BaseAsyncCmd {
} catch (ResourceUnavailableException ex) {
s_logger.warn("Exception: ", ex);
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
} catch (ConcurrentOperationException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
} catch (ManagementServerException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
} catch (VirtualMachineMigrationException e) {
} catch (VirtualMachineMigrationException | ConcurrentOperationException | ManagementServerException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
@Override
public String getSyncObjType() {
return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null;
}
@Override
public Long getSyncObjId() {
if (getStoragePoolId() != null) {
return getStoragePoolId();
}
// OfflineVmwareMigrations: undocumented feature;
// OfflineVmwareMigrations: on implementing a maximum queue size for per storage migrations it seems counter intuitive for the user to not enforce it for hosts as well.
if (getHostId() != null) {
return getHostId();
}
return null;
}
}

View File

@ -147,6 +147,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
}
Host destinationHost = _resourceService.getHost(getHostId());
// OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs
if (destinationHost == null) {
throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId());
}
@ -163,13 +164,7 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
} catch (ResourceUnavailableException ex) {
s_logger.warn("Exception: ", ex);
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
} catch (ConcurrentOperationException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
} catch (ManagementServerException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
} catch (VirtualMachineMigrationException e) {
} catch (ConcurrentOperationException | ManagementServerException | VirtualMachineMigrationException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}

View File

@ -120,4 +120,16 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
}
}
@Override
public String getSyncObjType() {
return (getSyncObjId() != null) ? BaseAsyncCmd.migrationSyncObject : null;
}
@Override
public Long getSyncObjId() {
if (getStoragePoolId() != null) {
return getStoragePoolId();
}
return null;
}
}

View File

@ -0,0 +1,43 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import java.util.List;
public class MigrateVmToPoolAnswer extends Answer {
List<VolumeObjectTO> volumeTos;
public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, Exception ex) {
super(cmd, ex);
volumeTos = null;
}
public MigrateVmToPoolAnswer(MigrateVmToPoolCommand cmd, List<VolumeObjectTO> volumeTos) {
super(cmd, true, null);
this.volumeTos = volumeTos;
}
public List<VolumeObjectTO> getVolumeTos() {
return volumeTos;
}
}

View File

@ -0,0 +1,70 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.to.VolumeTO;
import java.util.Collection;
/**
* used to tell the agent to migrate a vm to a different primary storage pool.
* It is for now only implemented on Vmware and is supposed to work irrespective of whether the VM is started or not.
*
*/
public class MigrateVmToPoolCommand extends Command {
private Collection<VolumeTO> volumes;
private String vmName;
private String destinationPool;
private boolean executeInSequence = false;
protected MigrateVmToPoolCommand() {
}
/**
*
* @param vmName the name of the VM to migrate
* @param volumes used to supply feedback on vmware generated names
* @param destinationPool the primary storage pool to migrate the VM to
* @param executeInSequence
*/
public MigrateVmToPoolCommand(String vmName, Collection<VolumeTO> volumes, String destinationPool, boolean executeInSequence) {
this.vmName = vmName;
this.volumes = volumes;
this.destinationPool = destinationPool;
this.executeInSequence = executeInSequence;
}
public Collection<VolumeTO> getVolumes() {
return volumes;
}
public String getDestinationPool() {
return destinationPool;
}
public String getVmName() {
return vmName;
}
@Override
public boolean executeInSequence() {
return executeInSequence;
}
}

View File

@ -22,14 +22,19 @@ package com.cloud.agent.api;
public class UnregisterVMCommand extends Command {
String vmName;
boolean cleanupVmFiles = false;
boolean executeInSequence;
public UnregisterVMCommand(String vmName) {
this(vmName, false);
}
public UnregisterVMCommand(String vmName, boolean executeInSequence) {
this.vmName = vmName;
this.executeInSequence = executeInSequence;
}
@Override
public boolean executeInSequence() {
return false;
return executeInSequence;
}
public String getVmName() {

View File

@ -31,6 +31,7 @@ public class MigrateVolumeCommand extends Command {
long volumeId;
String volumePath;
StorageFilerTO pool;
StorageFilerTO sourcePool;
String attachedVmName;
Volume.Type volumeType;
@ -47,14 +48,17 @@ public class MigrateVolumeCommand extends Command {
}
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, String attachedVmName, Volume.Type volumeType, int timeout) {
this.volumeId = volumeId;
this.volumePath = volumePath;
this.pool = new StorageFilerTO(pool);
this(volumeId,volumePath,pool,timeout);
this.attachedVmName = attachedVmName;
this.volumeType = volumeType;
this.setWait(timeout);
}
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) {
this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1);
this.sourcePool = new StorageFilerTO(sourcePool);
}
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
this.srcData = srcData;
this.destData = destData;
@ -81,6 +85,14 @@ public class MigrateVolumeCommand extends Command {
return pool;
}
public StorageFilerTO getSourcePool() {
return sourcePool;
}
public StorageFilerTO getTargetPool() {
return pool;
}
public String getAttachedVmName() {
return attachedVmName;
}

View File

@ -25,11 +25,28 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.host.Host;
/**
* Interface to query how to move data around and to commision the moving
*/
public interface DataMotionStrategy {
/**
* Reports whether this instance can do a move from source to destination
* @param srcData object to move
* @param destData location to move it to
* @return the expertise level with which this instance knows how to handle the move
*/
StrategyPriority canHandle(DataObject srcData, DataObject destData);
StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost);
/**
* Copy the source volume to its destination (on a host if not null)
*
* @param srcData volume to move
* @param destData volume description as intended after the move
* @param destHost if not null destData should be reachable from here
* @param callback where to report completion or failure to
*/
void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);

View File

@ -106,7 +106,14 @@ public interface StorageManager extends StorageService {
* @param poolId
* @return comma separated list of tags
*/
public String getStoragePoolTags(long poolId);
String getStoragePoolTags(long poolId);
/**
* Returns a list of Strings with tags for the specified storage pool
* @param poolId
* @return comma separated list of tags
*/
List<String> getStoragePoolTagList(long poolId);
Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException;

View File

@ -41,6 +41,9 @@ import javax.naming.ConfigurationException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
import org.apache.cloudstack.ca.CAManager;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
@ -86,6 +89,7 @@ import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer;
import com.cloud.agent.api.ClusterVMMetaDataSyncCommand;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateVmToPoolAnswer;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.PlugNicAnswer;
@ -138,10 +142,8 @@ import com.cloud.exception.AffinityConflictException;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ConnectionException;
import com.cloud.exception.InsufficientAddressCapacityException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.exception.InsufficientVirtualNetworkCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
@ -171,10 +173,12 @@ import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
@ -314,6 +318,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
private VmWorkJobDao _workJobDao;
@Inject
private AsyncJobManager _jobMgr;
@Inject
private StorageManager storageMgr;
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
@ -1820,14 +1826,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
protected boolean stateTransitTo(final VMInstanceVO vm, final VirtualMachine.Event e, final Long hostId, final String reservationId) throws NoTransitionException {
// if there are active vm snapshots task, state change is not allowed
// Disable this hacking thing, VM snapshot task need to be managed by its orchestartion flow istelf instead of
// hacking it here at general VM manager
/*
if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) {
s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
return false;
}
*/
vm.setReservationId(reservationId);
return _stateMachine.transitTo(vm, e, new Pair<Long, Long>(vm.getHostId(), hostId), _vmDao);
}
@ -1836,15 +1834,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Event e, final Long hostId) throws NoTransitionException {
final VMInstanceVO vm = (VMInstanceVO)vm1;
/*
* Remove the hacking logic here.
// if there are active vm snapshots task, state change is not allowed
if (_vmSnapshotMgr.hasActiveVMSnapshotTasks(vm.getId())) {
s_logger.error("State transit with event: " + e + " failed due to: " + vm.getInstanceName() + " has active VM snapshots tasks");
return false;
}
*/
final State oldState = vm.getState();
if (oldState == State.Starting) {
if (e == Event.OperationSucceeded) {
@ -1988,30 +1977,192 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) {
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
preStorageMigrationStateCheck(destPool, vm);
try {
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Offline migration of %s vm %s with volumes",
vm.getHypervisorType().toString(),
vm.getInstanceName()));
}
migrateThroughHypervisorOrStorage(destPool, vm);
} catch (ConcurrentOperationException
| InsufficientCapacityException // possibly InsufficientVirtualNetworkCapacityException or InsufficientAddressCapacityException
| StorageUnavailableException e) {
String msg = String.format("Failed to migrate VM: %s", vmUuid);
s_logger.debug(msg);
throw new CloudRuntimeException(msg, e);
} finally {
try {
stateTransitTo(vm, Event.AgentReportStopped, null);
} catch (final NoTransitionException e) {
String anotherMEssage = String.format("failed to change vm state of VM: %s", vmUuid);
s_logger.debug(anotherMEssage);
throw new CloudRuntimeException(anotherMEssage, e);
}
}
}
private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm) {
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
// OfflineVmwareMigration: in case of vmware call vcenter to do it for us.
// OfflineVmwareMigration: should we check the proximity of source and destination
// OfflineVmwareMigration: if we are in the same cluster/datacentre/pool or whatever?
// OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not
List<Command> commandsToSend = hvGuru.finalizeMigrate(vm, destPool);
Long hostId = vm.getHostId();
// OfflineVmwareMigration: probably this is null when vm is stopped
if(hostId == null) {
hostId = vm.getLastHostId();
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("host id is null, using last host id %d", hostId) );
}
}
if(CollectionUtils.isNotEmpty(commandsToSend)) {
Commands commandsContainer = new Commands(Command.OnError.Stop);
commandsContainer.addCommands(commandsToSend);
try {
// OfflineVmwareMigration: change to the call back variety?
// OfflineVmwareMigration: getting a Long seq to be filled with _agentMgr.send(hostId, commandsContainer, this)
return _agentMgr.send(hostId, commandsContainer);
} catch (AgentUnavailableException | OperationTimedoutException e) {
throw new CloudRuntimeException(String.format("Failed to migrate VM: %s", vm.getUuid()),e);
}
}
return null;
}
private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException {
boolean isDebugEnabled = s_logger.isDebugEnabled();
if(isDebugEnabled) {
String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid());
s_logger.debug(msg);
}
setDestinationPoolAndReallocateNetwork(destPool, vm);
// OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE
Long destPodId = destPool.getPodId();
Long vmPodId = vm.getPodIdToDeployIn();
if (destPodId == null || ! destPodId.equals(vmPodId)) {
if(isDebugEnabled) {
String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId);
s_logger.debug(msg);
}
vm.setLastHostId(null);
vm.setPodIdToDeployIn(destPodId);
// OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod)
}// else keep last host set for this vm
markVolumesInPool(vm,destPool, hypervisorMigrationResults);
// OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0)
// OfflineVmwareMigration: iterate over the volumes for data updates
}
private void markVolumesInPool(VMInstanceVO vm, StoragePool destPool, Answer[] hypervisorMigrationResults) {
MigrateVmToPoolAnswer relevantAnswer = null;
for (Answer answer : hypervisorMigrationResults) {
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("received an %s: %s", answer.getClass().getSimpleName(), answer));
}
if (answer instanceof MigrateVmToPoolAnswer) {
relevantAnswer = (MigrateVmToPoolAnswer) answer;
}
}
if (relevantAnswer == null) {
throw new CloudRuntimeException("no relevant migration results found");
}
List<VolumeVO> volumes = _volsDao.findUsableVolumesForInstance(vm.getId());
if(s_logger.isDebugEnabled()) {
String msg = String.format("found %d volumes for VM %s(uuid:%s, id:%d)", volumes.size(), vm.getInstanceName(), vm.getUuid(), vm.getId());
s_logger.debug(msg);
}
for (VolumeObjectTO result : relevantAnswer.getVolumeTos() ) {
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("updating volume (%d) with path '%s' on pool '%d'", result.getId(), result.getPath(), destPool.getId()));
}
VolumeVO volume = _volsDao.findById(result.getId());
volume.setPath(result.getPath());
volume.setPoolId(destPool.getId());
_volsDao.update(volume.getId(), volume);
}
}
private void migrateThroughHypervisorOrStorage(StoragePool destPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException {
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
final HostVO srcHost = _hostDao.findById(srchostId);
final Long srcClusterId = srcHost.getClusterId();
Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm);
boolean migrationResult = false;
if (hypervisorMigrationResults == null) {
// OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it.
migrationResult = volumeMgr.storageMigration(profile, destPool);
if (migrationResult) {
afterStorageMigrationCleanup(destPool, vm, srcHost, srcClusterId);
} else {
s_logger.debug("Storage migration failed");
}
} else {
afterHypervisorMigrationCleanup(destPool, vm, srcHost, srcClusterId, hypervisorMigrationResults);
}
}
private void preStorageMigrationStateCheck(StoragePool destPool, VMInstanceVO vm) {
if (destPool == null) {
throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool");
}
checkDestinationForTags(destPool, vm);
try {
stateTransitTo(vm, VirtualMachine.Event.StorageMigrationRequested, null);
stateTransitTo(vm, Event.StorageMigrationRequested, null);
} catch (final NoTransitionException e) {
s_logger.debug("Unable to migrate vm: " + e.toString());
throw new CloudRuntimeException("Unable to migrate vm: " + e.toString());
String msg = String.format("Unable to migrate vm: %s", vm.getUuid());
s_logger.debug(msg);
throw new CloudRuntimeException(msg, e);
}
}
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
boolean migrationResult = false;
try {
migrationResult = volumeMgr.storageMigration(profile, destPool);
if (migrationResult) {
//if the vm is migrated to different pod in basic mode, need to reallocate ip
if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) {
final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null);
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null);
_networkMgr.reallocate(vmProfile, plan);
private void checkDestinationForTags(StoragePool destPool, VMInstanceVO vm) {
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
// OfflineVmwareMigration: iterate over volumes
// OfflineVmwareMigration: get disk offering
List<String> storageTags = storageMgr.getStoragePoolTagList(destPool.getId());
for(Volume vol : vols) {
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
List<String> volumeTags = StringUtils.csvTagsToList(diskOffering.getTags());
if(! matches(volumeTags, storageTags)) {
String msg = String.format("destination pool '%s' with tags '%s', does not support the volume diskoffering for volume '%s' (tags: '%s') ",
destPool.getName(),
StringUtils.listToCsvTags(storageTags),
vol.getName(),
StringUtils.listToCsvTags(volumeTags)
);
throw new CloudRuntimeException(msg);
}
}
}
static boolean matches(List<String> volumeTags, List<String> storagePoolTags) {
// OfflineVmwareMigration: commons collections 4 allows for Collections.containsAll(volumeTags,storagePoolTags);
boolean result = true;
if (volumeTags != null) {
for (String tag : volumeTags) {
// there is a volume tags so
if (storagePoolTags == null || !storagePoolTags.contains(tag)) {
result = false;
break;
}
}
}
return result;
}
private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException {
setDestinationPoolAndReallocateNetwork(destPool, vm);
//when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool
vm.setLastHostId(null);
@ -2020,57 +2171,49 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
// If VM was cold migrated between clusters belonging to two different VMware DCs,
// unregister the VM from the source host and cleanup the associated VM files.
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
Long srcClusterId = null;
Long srcHostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
if (srcHostId != null) {
HostVO srcHost = _hostDao.findById(srcHostId);
srcClusterId = srcHost.getClusterId();
afterStorageMigrationVmwareVMcleanup(destPool, vm, srcHost, srcClusterId);
}
}
private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInstanceVO vm) throws InsufficientCapacityException {
//if the vm is migrated to different pod in basic mode, need to reallocate ip
if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) {
if (s_logger.isDebugEnabled()) {
String msg = String.format("as the pod for vm %s has changed we are reallocating its network", vm.getInstanceName());
s_logger.debug(msg);
}
final DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null);
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null);
_networkMgr.reallocate(vmProfile, plan);
}
}
private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
// OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command
final Long destClusterId = destPool.getClusterId();
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
" from source host: " + srcHostId);
final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
uvc.setCleanupVmFiles(true);
try {
_agentMgr.send(srcHostId, uvc);
} catch (final AgentUnavailableException | OperationTimedoutException e) {
throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHostId +
" after successfully migrating VM's storage across VMware Datacenters");
}
removeStaleVmFromSource(vm, srcHost);
}
}
}
} else {
s_logger.debug("Storage migration failed");
}
} catch (final ConcurrentOperationException e) {
s_logger.debug("Failed to migration: " + e.toString());
throw new CloudRuntimeException("Failed to migration: " + e.toString());
} catch (final InsufficientVirtualNetworkCapacityException e) {
s_logger.debug("Failed to migration: " + e.toString());
throw new CloudRuntimeException("Failed to migration: " + e.toString());
} catch (final InsufficientAddressCapacityException e) {
s_logger.debug("Failed to migration: " + e.toString());
throw new CloudRuntimeException("Failed to migration: " + e.toString());
} catch (final InsufficientCapacityException e) {
s_logger.debug("Failed to migration: " + e.toString());
throw new CloudRuntimeException("Failed to migration: " + e.toString());
} catch (final StorageUnavailableException e) {
s_logger.debug("Failed to migration: " + e.toString());
throw new CloudRuntimeException("Failed to migration: " + e.toString());
} finally {
// OfflineVmwareMigration: on port forward refator this to be done in two
// OfflineVmwareMigration: command creation in the guru.migrat method
// OfflineVmwareMigration: sending up in the attemptHypevisorMigration with execute in sequence (responsibility of the guru)
private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) {
s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() +
" from source host: " + srcHost.getId());
final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName());
uvc.setCleanupVmFiles(true);
try {
stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null);
} catch (final NoTransitionException e) {
s_logger.debug("Failed to change vm state: " + e.toString());
throw new CloudRuntimeException("Failed to change vm state: " + e.toString());
}
_agentMgr.send(srcHost.getId(), uvc);
} catch (final Exception e) {
throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() +
" after successfully migrating VM's storage across VMware Datacenters");
}
}
@ -4577,6 +4720,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final User user = context.getCallingUser();
final Account account = context.getCallingAccount();
Map<Volume, StoragePool> volumeStorageMap = dest.getStorageForDisks();
if (volumeStorageMap != null) {
for (Volume vol : volumeStorageMap.keySet()) {
checkConcurrentJobsPerDatastoreThreshhold(volumeStorageMap.get(vol));
}
}
final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final List<VmWorkJobVO> pendingWorkJobs = _workJobDao.listPendingWorkJobs(
@ -4738,6 +4888,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return new VmJobVirtualMachineOutcome(workJob, vm.getId());
}
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) {
final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
if (threshold != null && threshold > 0) {
long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName());
if (count > threshold) {
throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time.");
}
}
}
public Outcome<VirtualMachine> migrateVmStorageThroughJobQueue(
final String vmUuid, final StoragePool destPool) {

View File

@ -30,6 +30,10 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.VolumeApiService;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@ -953,10 +957,29 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
}
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) {
final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
if (threshold != null && threshold > 0) {
long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName());
if (count > threshold) {
throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time.");
}
}
}
@Override
@DB
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
VolumeInfo vol = volFactory.getVolume(volume.getId());
if (vol == null){
throw new CloudRuntimeException("Migrate volume failed because volume object of volume " + volume.getName()+ "is null");
}
if (destPool == null) {
throw new CloudRuntimeException("Migrate volume failed because destination storage pool is not available!!");
}
checkConcurrentJobsPerDatastoreThreshhold(destPool);
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, dataStoreTarget);
@ -1062,6 +1085,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
return true;
}
// OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing
if (s_logger.isDebugEnabled()) {
s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here.");
}
for (Volume vol : volumesNeedToMigrate) {
Volume result = migrateVolume(vol, destPool);
if (result == null) {

View File

@ -17,6 +17,7 @@
package com.cloud.vm;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
@ -25,6 +26,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -178,7 +180,7 @@ public class VirtualMachineManagerImplTest {
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
Assert.assertFalse(actual);
assertFalse(actual);
}
@Test
@ -192,7 +194,7 @@ public class VirtualMachineManagerImplTest {
boolean actual = virtualMachineManagerImpl.sendStop(guru, profile, false, false);
Assert.assertFalse(actual);
assertFalse(actual);
}
@Test
@ -242,7 +244,7 @@ public class VirtualMachineManagerImplTest {
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
Assert.assertFalse(returnedValue);
assertFalse(returnedValue);
}
@Test
@ -253,7 +255,7 @@ public class VirtualMachineManagerImplTest {
boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock);
Assert.assertFalse(returnedValue);
assertFalse(returnedValue);
}
@Test
@ -317,7 +319,7 @@ public class VirtualMachineManagerImplTest {
Map<Volume, StoragePool> volumeToPoolObjectMap = virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap);
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
assertFalse(volumeToPoolObjectMap.isEmpty());
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
Mockito.verify(userDefinedVolumeToStoragePoolMap, times(1)).keySet();
@ -501,7 +503,7 @@ public class VirtualMachineManagerImplTest {
HashMap<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock);
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
assertFalse(volumeToPoolObjectMap.isEmpty());
Assert.assertEquals(storagePoolMockOther, volumeToPoolObjectMap.get(volumeVoMock));
}
@ -558,7 +560,7 @@ public class VirtualMachineManagerImplTest {
virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes);
Assert.assertFalse(volumeToPoolObjectMap.isEmpty());
assertFalse(volumeToPoolObjectMap.isEmpty());
Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock));
Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock);
@ -587,4 +589,38 @@ public class VirtualMachineManagerImplTest {
inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap);
inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped);
}
@Test
public void matchesOfSorts() {
List<String> nothing = null;
List<String> empty = new ArrayList<>();
List<String> tag = Arrays.asList("bla");
List<String> tags = Arrays.asList("bla", "blob");
List<String> others = Arrays.asList("bla", "blieb");
List<String> three = Arrays.asList("bla", "blob", "blieb");
// single match
assertTrue(VirtualMachineManagerImpl.matches(tag,tags));
assertTrue(VirtualMachineManagerImpl.matches(tag,others));
// no requirements
assertTrue(VirtualMachineManagerImpl.matches(nothing,tags));
assertTrue(VirtualMachineManagerImpl.matches(empty,tag));
// mis(sing)match
assertFalse(VirtualMachineManagerImpl.matches(tags,tag));
assertFalse(VirtualMachineManagerImpl.matches(tag,nothing));
assertFalse(VirtualMachineManagerImpl.matches(tag,empty));
// disjunct sets
assertFalse(VirtualMachineManagerImpl.matches(tags,others));
assertFalse(VirtualMachineManagerImpl.matches(others,tags));
// everything matches the larger set
assertTrue(VirtualMachineManagerImpl.matches(nothing,three));
assertTrue(VirtualMachineManagerImpl.matches(empty,three));
assertTrue(VirtualMachineManagerImpl.matches(tag,three));
assertTrue(VirtualMachineManagerImpl.matches(tags,three));
assertTrue(VirtualMachineManagerImpl.matches(others,three));
}
}

View File

@ -18,12 +18,17 @@
*/
package org.apache.cloudstack.storage.motion;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@ -40,10 +45,15 @@ import com.cloud.host.Host;
import com.cloud.utils.StringUtils;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
public class DataMotionServiceImpl implements DataMotionService {
private static final Logger LOGGER = Logger.getLogger(DataMotionServiceImpl.class);
@Inject
StorageStrategyFactory storageStrategyFactory;
@Inject
VolumeDao volDao;
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
@ -61,6 +71,9 @@ public class DataMotionServiceImpl implements DataMotionService {
DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(srcData, destData);
if (strategy == null) {
// OfflineVmware volume migration
// Cleanup volumes from target and reset the state of volume at source
cleanUpVolumesForFailedMigrations(srcData, destData);
throw new CloudRuntimeException("Can't find strategy to move data. " + "Source: " + srcData.getType().name() + " '" + srcData.getUuid() + ", Destination: " +
destData.getType().name() + " '" + destData.getUuid() + "'");
}
@ -68,6 +81,23 @@ public class DataMotionServiceImpl implements DataMotionService {
strategy.copyAsync(srcData, destData, destHost, callback);
}
/**
* Offline Vmware volume migration
* Cleanup volumes after failed migrations and reset state of source volume
*
* @param srcData
* @param destData
*/
private void cleanUpVolumesForFailedMigrations(DataObject srcData, DataObject destData) {
VolumeVO destinationVO = volDao.findById(destData.getId());
VolumeVO sourceVO = volDao.findById(srcData.getId());
sourceVO.setState(Volume.State.Ready);
volDao.update(sourceVO.getId(), sourceVO);
destinationVO.setState(Volume.State.Expunged);
destinationVO.setRemoved(new Date());
volDao.update(destinationVO.getId(), destinationVO);
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
copyAsync(srcData, destData, null, callback);

View File

@ -1408,6 +1408,19 @@ public class VolumeServiceImpl implements VolumeService {
@Override
public AsyncCallFuture<VolumeApiResult> copyVolume(VolumeInfo srcVolume, DataStore destStore) {
if (s_logger.isDebugEnabled()) {
DataStore srcStore = srcVolume.getDataStore();
String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : "<unknown role>");
String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)"
, srcVolume.getName()
, srcVolume.getId()
, srcRole
, destStore.getName()
, destStore.getId()
, destStore.getRole());
s_logger.debug(msg);
}
if (srcVolume.getState() == Volume.State.Uploaded) {
return copyVolumeFromImageToPrimary(srcVolume, destStore);
@ -1417,6 +1430,8 @@ public class VolumeServiceImpl implements VolumeService {
return copyVolumeFromPrimaryToImage(srcVolume, destStore);
}
// OfflineVmwareMigration: aren't we missing secondary to secondary in this logic?
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
VolumeApiResult res = new VolumeApiResult(srcVolume);
try {
@ -1438,7 +1453,10 @@ public class VolumeServiceImpl implements VolumeService {
caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context);
motionSrv.copyAsync(srcVolume, destVolume, caller);
} catch (Exception e) {
s_logger.debug("Failed to copy volume" + e);
s_logger.error("Failed to copy volume:" + e);
if(s_logger.isDebugEnabled()) {
s_logger.debug("Failed to copy volume.", e);
}
res.setResult(e.toString());
future.complete(res);
}
@ -1461,12 +1479,10 @@ public class VolumeServiceImpl implements VolumeService {
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(destVolume);
destroyFuture.get();
future.complete(res);
return null;
}
} else {
srcVolume.processEvent(Event.OperationSuccessed);
destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer());
volDao.updateUuid(srcVolume.getId(), destVolume.getId());
_volumeStoreDao.updateVolumeId(srcVolume.getId(), destVolume.getId());
try {
destroyVolume(srcVolume.getId());
srcVolume = volFactory.getVolume(srcVolume.getId());
@ -1481,7 +1497,7 @@ public class VolumeServiceImpl implements VolumeService {
} catch (Exception e) {
s_logger.debug("failed to clean up volume on storage", e);
}
return null;
}
} catch (Exception e) {
s_logger.debug("Failed to process copy volume callback", e);
res.setResult(e.toString());

View File

@ -131,4 +131,6 @@ public interface AsyncJobManager extends Manager {
Object unmarshallResultObject(AsyncJob job);
List<AsyncJobVO> findFailureAsyncJobs(String... cmds);
long countPendingJobs(String havingInfo, String... cmds);
}

View File

@ -44,4 +44,6 @@ public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
List<AsyncJobVO> getResetJobs(long msid);
List<AsyncJobVO> getFailureJobsSinceLastMsStart(long msId, String... cmds);
long countPendingJobs(String havingInfo, String... cmds);
}

View File

@ -30,6 +30,7 @@ import org.apache.cloudstack.jobs.JobInfo;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
@ -46,6 +47,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
private final SearchBuilder<AsyncJobVO> expiringUnfinishedAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> expiringCompletedAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> failureMsidAsyncJobSearch;
private final GenericSearchBuilder<AsyncJobVO, Long> asyncJobTypeSearch;
public AsyncJobDaoImpl() {
pendingAsyncJobSearch = createSearchBuilder();
@ -94,6 +96,13 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN);
failureMsidAsyncJobSearch.done();
asyncJobTypeSearch = createSearchBuilder(Long.class);
asyncJobTypeSearch.select(null, SearchCriteria.Func.COUNT, asyncJobTypeSearch.entity().getId());
asyncJobTypeSearch.and("job_info", asyncJobTypeSearch.entity().getCmdInfo(),Op.LIKE);
asyncJobTypeSearch.and("job_cmd", asyncJobTypeSearch.entity().getCmd(), Op.IN);
asyncJobTypeSearch.and("status", asyncJobTypeSearch.entity().getStatus(), SearchCriteria.Op.EQ);
asyncJobTypeSearch.done();
}
@Override
@ -227,4 +236,14 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
sc.setParameters("job_cmd", (Object[])cmds);
return listBy(sc);
}
@Override
public long countPendingJobs(String havingInfo, String... cmds) {
SearchCriteria<Long> sc = asyncJobTypeSearch.create();
sc.setParameters("status", JobInfo.Status.IN_PROGRESS);
sc.setParameters("job_cmd", (Object[])cmds);
sc.setParameters("job_info", "%" + havingInfo + "%");
List<Long> results = customSearch(sc, null);
return results.get(0);
}
}

View File

@ -1122,4 +1122,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
public List<AsyncJobVO> findFailureAsyncJobs(String... cmds) {
return _jobDao.getFailureJobsSinceLastMsStart(getMsid(), cmds);
}
@Override
public long countPendingJobs(String havingInfo, String... cmds) {
return _jobDao.countPendingJobs(havingInfo, cmds);
}
}

View File

@ -26,6 +26,11 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.agent.api.MigrateVmToPoolCommand;
import com.cloud.agent.api.UnregisterVMCommand;
import com.cloud.agent.api.to.VolumeTO;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@ -115,12 +120,14 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
@Inject
private GuestOSDao _guestOsDao;
@Inject
GuestOSHypervisorDao _guestOsHypervisorDao;
private GuestOSHypervisorDao _guestOsHypervisorDao;
@Inject
private HostDao _hostDao;
@Inject
private HostDetailsDao _hostDetailsDao;
@Inject
private ClusterDetailsDao _clusterDetailsDao;
@Inject
private CommandExecLogDao _cmdExecLogDao;
@Inject
private VmwareManager _vmwareMgr;
@ -640,4 +647,35 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString());
return details;
}
@Override
public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) {
List<Command> commands = new ArrayList<Command>();
// OfflineVmwareMigration: specialised migration command
List<VolumeVO> volumes = _volumeDao.findByInstance(vm.getId());
List<VolumeTO> vols = new ArrayList<>();
for (Volume volume : volumes) {
VolumeTO vol = new VolumeTO(volume,destination);
vols.add(vol);
}
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, destination.getUuid(), true);
commands.add(migrateVmToPoolCommand);
// OfflineVmwareMigration: cleanup if needed
final Long destClusterId = destination.getClusterId();
final Long srcClusterId = getClusterId(vm.getId());
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) {
final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId);
final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId);
if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) {
final UnregisterVMCommand unregisterVMCommand = new UnregisterVMCommand(vm.getInstanceName(), true);
unregisterVMCommand.setCleanupVmFiles(true);
commands.add(unregisterVMCommand);
}
}
return commands;
}
}

View File

@ -43,8 +43,8 @@ import java.util.UUID;
import javax.naming.ConfigurationException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.log4j.NDC;
import org.joda.time.Duration;
@ -163,6 +163,8 @@ import com.cloud.agent.api.ManageSnapshotAnswer;
import com.cloud.agent.api.ManageSnapshotCommand;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateVmToPoolAnswer;
import com.cloud.agent.api.MigrateVmToPoolCommand;
import com.cloud.agent.api.MigrateWithStorageAnswer;
import com.cloud.agent.api.MigrateWithStorageCommand;
import com.cloud.agent.api.ModifySshKeysCommand;
@ -311,6 +313,7 @@ import com.cloud.vm.VmDetailConstants;
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
public static final String VMDK_EXTENSION = ".vmdk";
private static final Random RANDOM = new Random(System.nanoTime());
@ -442,6 +445,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
answer = execute((PrepareForMigrationCommand)cmd);
} else if (clz == MigrateCommand.class) {
answer = execute((MigrateCommand)cmd);
} else if (clz == MigrateVmToPoolCommand.class) {
answer = execute((MigrateVmToPoolCommand)cmd);
} else if (clz == MigrateWithStorageCommand.class) {
answer = execute((MigrateWithStorageCommand)cmd);
} else if (clz == MigrateVolumeCommand.class) {
@ -699,30 +704,38 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
if (vmName.equalsIgnoreCase("none")) {
// OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here
// OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway
// we need to spawn a worker VM to attach the volume to and resize the volume.
useWorkerVm = true;
vmName = getWorkerName(getServiceContext(), cmd, 0);
String poolId = cmd.getPoolUuid();
// OfflineVmwareMigration: refactor for re-use
// OfflineVmwareMigration: 1. find data(store)
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
s_logger.info("Create worker VM " + vmName);
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
if (vmMo == null) {
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
throw new Exception("Unable to create a worker VM for volume resize");
}
synchronized (this) {
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk");
// OfflineVmwareMigration: 3. attach the disk to the worker
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + VMDK_EXTENSION);
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
}
}
// OfflineVmwareMigration: 4. find the (worker-) VM
// find VM through datacenter (VM is not at the target host yet)
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
@ -734,6 +747,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
throw new Exception(msg);
}
// OfflineVmwareMigration: 5. ignore/replace the rest of the try-block; It is the functional bit
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
if (vdisk == null) {
@ -813,6 +827,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return new ResizeVolumeAnswer(cmd, false, error);
} finally {
// OfflineVmwareMigration: 6. check if a worker was used and destroy it if needed
try {
if (useWorkerVm) {
s_logger.info("Destroy worker VM after volume resize");
@ -2313,7 +2328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
final Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), ".vmdk"));
final Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), VMDK_EXTENSION));
assert(vdisk != null);
Long reqSize = 0L;
@ -2536,7 +2551,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
vmdkPath = dsMo.getName();
}
datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION);
}
} else {
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value());
@ -3061,7 +3076,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
* Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18"
*/
public String getVmdkPath(String path) {
if (!com.cloud.utils.StringUtils.isNotBlank(path)) {
if (!StringUtils.isNotBlank(path)) {
return null;
}
@ -3075,7 +3090,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
path = path.substring(startIndex + search.length());
final String search2 = ".vmdk";
final String search2 = VMDK_EXTENSION;
int endIndex = path.indexOf(search2);
@ -3128,10 +3143,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
final String datastoreVolumePath;
if (vmdkPath != null) {
datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + VMDK_EXTENSION);
}
else {
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + VMDK_EXTENSION);
}
volumeTO.setPath(datastoreVolumePath);
@ -3780,12 +3795,172 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
invalidateServiceContext();
}
String msg = "Unexcpeted exception " + VmwareHelper.getExceptionMessage(e);
String msg = "Unexpected exception " + VmwareHelper.getExceptionMessage(e);
s_logger.error(msg, e);
return new PrepareForMigrationAnswer(cmd, msg);
}
}
protected Answer execute(MigrateVmToPoolCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info(String.format("excuting MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool()));
if (s_logger.isDebugEnabled()) {
s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd));
}
}
final String vmName = cmd.getVmName();
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
try {
VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost);
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter";
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
String poolUuid = cmd.getDestinationPool();
return migrateAndAnswer(vmMo, poolUuid, hyperHost, cmd);
} catch (Throwable e) { // hopefully only CloudRuntimeException :/
if (e instanceof Exception) {
return new Answer(cmd, (Exception) e);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("problem" , e);
}
s_logger.error(e.getLocalizedMessage());
return new Answer(cmd, false, "unknown problem: " + e.getLocalizedMessage());
}
}
private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception {
ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, hyperHost);
try {
// OfflineVmwareMigration: getVolumesFromCommand(cmd);
Map<Integer, Long> volumeDeviceKey = getVolumesFromCommand(vmMo, cmd);
if (s_logger.isTraceEnabled()) {
for (Integer diskId: volumeDeviceKey.keySet()) {
s_logger.trace(String.format("disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId)));
}
}
if (vmMo.changeDatastore(morDs)) {
// OfflineVmwareMigration: create target specification to include in answer
// Consolidate VM disks after successful VM migration
// In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies.
if (!vmMo.consolidateVmDisks()) {
s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration.");
} else {
s_logger.debug("Successfully consolidated disks of VM " + vmMo.getVmName() + ".");
}
return createAnswerForCmd(vmMo, poolUuid, cmd, volumeDeviceKey);
} else {
return new Answer(cmd, false, "failed to changes data store for VM" + vmMo.getVmName());
}
} catch (Exception e) {
String msg = "change data store for VM " + vmMo.getVmName() + " failed";
s_logger.error(msg + ": " + e.getLocalizedMessage());
throw new CloudRuntimeException(msg,e);
}
}
Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, Map<Integer, Long> volumeDeviceKey) throws Exception {
List<VolumeObjectTO> volumeToList = new ArrayList<>();
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
VirtualDisk[] disks = vmMo.getAllDiskDevice();
Answer answer;
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName()));
}
if (cmd instanceof MigrateVolumeCommand) {
if (disks.length == 1) {
String volumePath = vmMo.getVmdkFileBaseName(disks[0]);
return new MigrateVolumeAnswer(cmd, true, null, volumePath);
}
throw new CloudRuntimeException("not expecting more then one disk after migrate volume command");
} else if (cmd instanceof MigrateVmToPoolCommand) {
for (VirtualDisk disk : disks) {
VolumeObjectTO newVol = new VolumeObjectTO();
String newPath = vmMo.getVmdkFileBaseName(disk);
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolUuid);
newVol.setId(volumeDeviceKey.get(disk.getKey()));
newVol.setPath(newPath);
newVol.setChainInfo(_gson.toJson(diskInfo));
volumeToList.add(newVol);
}
return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand)cmd, volumeToList);
}
return new Answer(cmd, false, null);
}
private Map<Integer, Long> getVolumesFromCommand(VirtualMachineMO vmMo, Command cmd) throws Exception {
Map<Integer, Long> volumeDeviceKey = new HashMap<Integer, Long>();
if (cmd instanceof MigrateVmToPoolCommand) {
MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd;
for (VolumeTO volume : mcmd.getVolumes()) {
addVolumeDiskmapping(vmMo, volumeDeviceKey, volume.getPath(), volume.getId());
}
} else if (cmd instanceof MigrateVolumeCommand) {
MigrateVolumeCommand mcmd = (MigrateVolumeCommand)cmd;
addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId());
}
return volumeDeviceKey;
}
private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map<Integer, Long> volumeDeviceKey, String volumePath, long volumeId) throws Exception {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath));
}
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, volumePath + VMDK_EXTENSION);
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
}
int diskId = diskInfo.first().getKey();
volumeDeviceKey.put(diskId, volumeId);
}
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) {
ManagedObjectReference morDs;
try {
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("finding datastore %s", destinationPool));
}
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, destinationPool);
} catch (Exception e) {
String msg = "exception while finding data store " + destinationPool;
s_logger.error(msg);
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
}
return morDs;
}
private ManagedObjectReference getDataCenterMOReference(String vmName, VmwareHypervisorHost hyperHost) {
ManagedObjectReference morDc;
try {
morDc = hyperHost.getHyperHostDatacenter();
} catch (Exception e) {
String msg = "exception while finding VMware datacenter to search for VM " + vmName;
s_logger.error(msg);
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
}
return morDc;
}
private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost hyperHost) {
VirtualMachineMO vmMo = null;
try {
// find VM through datacenter (VM is not at the target host yet)
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
} catch (Exception e) {
String msg = "exception while searching for VM " + vmName + " in VMware datacenter";
s_logger.error(msg);
throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage());
}
return vmMo;
}
protected Answer execute(MigrateCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd));
@ -3946,7 +4121,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
diskLocator = new VirtualMachineRelocateSpecDiskLocator();
diskLocator.setDatastore(morDsAtSource);
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), ".vmdk"));
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), VMDK_EXTENSION));
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
@ -4074,6 +4249,141 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
}
private Answer migrateVolume(MigrateVolumeCommand cmd) {
Answer answer = null;
String path = cmd.getVolumePath();
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
VirtualMachineMO vmMo = null;
DatastoreMO dsMo = null;
ManagedObjectReference morSourceDS = null;
String vmdkDataStorePath = null;
String vmName = null;
try {
// OfflineVmwareMigration: we need to refactor the worker vm creation out for use in migration methods as well as here
// OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway
// we need to spawn a worker VM to attach the volume to and move it
vmName = getWorkerName(getServiceContext(), cmd, 0);
// OfflineVmwareMigration: refactor for re-use
// OfflineVmwareMigration: 1. find data(store)
// OfflineVmwareMigration: more robust would be to find the store given the volume as it might have been moved out of band or due to error
// example: DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid());
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
s_logger.info("Create worker VM " + vmName);
// OfflineVmwareMigration: 2. create the worker with access to the data(store)
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
if (vmMo == null) {
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one
throw new CloudRuntimeException("Unable to create a worker VM for volume operation");
}
synchronized (this) {
// OfflineVmwareMigration: 3. attach the disk to the worker
String vmdkFileName = path + VMDK_EXTENSION;
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName);
if (!dsMo.fileExists(vmdkDataStorePath)) {
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path));
}
vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, path, vmdkFileName);
}
if (!dsMo.fileExists(vmdkDataStorePath)) {
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName));
}
vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkFileName);
}
if(s_logger.isDebugEnabled()) {
s_logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName()));
}
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morSourceDS);
}
// OfflineVmwareMigration: 4. find the (worker-) VM
// find VM through datacenter (VM is not at the target host yet)
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter";
s_logger.error(msg);
throw new Exception(msg);
}
if (s_logger.isTraceEnabled()) {
VirtualDisk[] disks = vmMo.getAllDiskDevice();
String format = "disk %d is attached as %s";
for (VirtualDisk disk : disks) {
s_logger.trace(String.format(format,disk.getKey(),vmMo.getVmdkFileBaseName(disk)));
}
}
// OfflineVmwareMigration: 5. create a relocate spec and perform
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
if (vdisk == null) {
if (s_logger.isTraceEnabled())
s_logger.trace("migrate volume done (failed)");
throw new CloudRuntimeException("No such disk device: " + path);
}
VirtualDisk disk = vdisk.first();
String vmdkAbsFile = getAbsoluteVmdkFile(disk);
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
}
// OfflineVmwareMigration: this may have to be disected and executed in separate steps
answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, cmd);
} catch (Exception e) {
String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage());
s_logger.error(msg, e);
answer = new Answer(cmd, false, msg);
} finally {
try {
// OfflineVmwareMigration: worker *may* have been renamed
vmName = vmMo.getVmName();
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid());
dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration");
VirtualDisk[] disks = vmMo.getAllDiskDevice();
String format = "disk %d was migrated to %s";
for (VirtualDisk disk : disks) {
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk)));
}
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION);
vmMo.detachDisk(vmdkDataStorePath, false);
}
s_logger.info("Destroy worker VM '" + vmName + "' after volume migration");
vmMo.destroy();
} catch (Throwable e) {
s_logger.info("Failed to destroy worker VM: " + vmName);
}
}
if (answer instanceof MigrateVolumeAnswer) {
String newPath = ((MigrateVolumeAnswer)answer).getVolumePath();
String vmdkFileName = newPath + VMDK_EXTENSION;
try {
VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, newPath, vmName);
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName);
if (!dsMo.fileExists(vmdkDataStorePath)) {
String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath);
s_logger.error(msg);
answer = new Answer(cmd, false, msg);
}
} catch (Exception e) {
String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage());
s_logger.error(msg, e);
answer = new Answer(cmd, false, msg);
}
}
return answer;
}
// OfflineVmwareMigration: refactor to be able to handle a detached volume
private Answer execute(MigrateVolumeCommand cmd) {
String volumePath = cmd.getVolumePath();
StorageFilerTO poolTo = cmd.getPool();
@ -4087,6 +4397,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VirtualMachineMO vmMo = null;
VmwareHypervisorHost srcHyperHost = null;
// OfflineVmwareMigration: ifhost is null ???
if (org.apache.commons.lang.StringUtils.isBlank(cmd.getAttachedVmName())) {
return migrateVolume(cmd);
}
ManagedObjectReference morDs = null;
ManagedObjectReference morDc = null;
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
@ -4107,7 +4421,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if (vmMo == null) {
String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
s_logger.error(msg);
throw new Exception(msg);
throw new CloudRuntimeException(msg);
}
vmName = vmMo.getName();
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
@ -4119,8 +4433,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
DatastoreMO targetDsMo = new DatastoreMO(srcHyperHost.getContext(), morDs);
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + ".vmdk");
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, ".vmdk"));
String fullVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(targetDsMo, vmName, volumePath + VMDK_EXTENSION);
Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volumePath, VMDK_EXTENSION));
String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first());
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);

View File

@ -20,11 +20,39 @@
package org.apache.cloudstack.storage.motion;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.MigrateWithStorageAnswer;
import com.cloud.agent.api.MigrateWithStorageCommand;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.storage.MigrateVolumeCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.api.to.VolumeTO;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@ -38,25 +66,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.MigrateWithStorageAnswer;
import com.cloud.agent.api.MigrateWithStorageCommand;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.api.to.VolumeTO;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
@Component
public class VmwareStorageMotionStrategy implements DataMotionStrategy {
private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class);
@ -70,12 +79,77 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
PrimaryDataStoreDao storagePoolDao;
@Inject
VMInstanceDao instanceDao;
@Inject
private HostDao hostDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes
if (isOnVmware(srcData, destData)
&& isOnPrimary(srcData, destData)
&& isVolumesOnly(srcData, destData)
&& isDettached(srcData)
&& isIntraCluster(srcData, destData)
&& isStoreScopeEqual(srcData, destData)) {
if (s_logger.isDebugEnabled()) {
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)"
, this.getClass()
, srcData.getId()
, srcData.getUuid()
, destData.getId()
, destData.getUuid()
, storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId()
, storagePoolDao.findById(destData.getDataStore().getId()).getClusterId());
s_logger.debug(msg);
}
return StrategyPriority.HYPERVISOR;
}
return StrategyPriority.CANT_HANDLE;
}
private boolean isDettached(DataObject srcData) {
VolumeVO volume = volDao.findById(srcData.getId());
return volume.getInstanceId() == null;
}
private boolean isVolumesOnly(DataObject srcData, DataObject destData) {
return DataObjectType.VOLUME.equals(srcData.getType())
&& DataObjectType.VOLUME.equals(destData.getType());
}
private boolean isOnPrimary(DataObject srcData, DataObject destData) {
return DataStoreRole.Primary.equals(srcData.getDataStore().getRole())
&& DataStoreRole.Primary.equals(destData.getDataStore().getRole());
}
private boolean isOnVmware(DataObject srcData, DataObject destData) {
return HypervisorType.VMware.equals(srcData.getTO().getHypervisorType())
&& HypervisorType.VMware.equals(destData.getTO().getHypervisorType());
}
private boolean isIntraCluster(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
StoragePool srcPool = storagePoolDao.findById(srcStore.getId());
DataStore destStore = destData.getDataStore();
StoragePool destPool = storagePoolDao.findById(destStore.getId());
return srcPool.getClusterId().equals(destPool.getClusterId());
}
/**
* Ensure that the scope of source and destination storage pools match
*
* @param srcData
* @param destData
* @return
*/
private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
DataStore destStore = destData.getDataStore();
String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString());
s_logger.debug(msg);
return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType());
}
@Override
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) {
@ -85,10 +159,97 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
return StrategyPriority.CANT_HANDLE;
}
/**
* the Vmware storageMotion strategy allows to copy to a destination pool but not to a destination host
*
* @param srcData volume to move
* @param destData volume description as intended after the move
* @param destHost null or else
* @param callback where to report completion or failure to
*/
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
if (destHost != null) {
String format = "%s cannot target a host in moving an object from {%s}\n to {%s}";
String msg = String.format(format
, this.getClass().getName()
, srcData.toString()
, destData.toString()
);
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
// OfflineVmwareMigration: extract the destination pool from destData and construct a migrateVolume command
if (!isOnPrimary(srcData, destData)) {
// OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call
throw new UnsupportedOperationException();
}
StoragePool sourcePool = (StoragePool) srcData.getDataStore();
StoragePool targetPool = (StoragePool) destData.getDataStore();
MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId()
, srcData.getTO().getPath()
, sourcePool
, targetPool);
// OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding
Answer answer;
ScopeType scopeType = srcData.getDataStore().getScope().getScopeType();
if (ScopeType.CLUSTER == scopeType) {
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
Long hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in cluster: " + sourcePool.getName());
}
answer = agentMgr.easySend(hostId, cmd);
} else {
answer = agentMgr.sendTo(sourcePool.getDataCenterId(), HypervisorType.VMware, cmd);
}
updateVolumeAfterMigration(answer, srcData, destData);
CopyCommandResult result = new CopyCommandResult(null, answer);
callback.complete(result);
}
/**
* Selects a host from the cluster housing the source storage pool
* Assumption is that Primary Storage is cluster-wide
* <p>
* returns any host ID within the cluster if storage-pool is cluster-wide, and exception is thrown otherwise
*
* @param clusterId
* @return
*/
private Long findSuitableHostIdForWorkerVmPlacement(Long clusterId) {
List<HostVO> hostLists = hostDao.findByClusterId(clusterId);
Long hostId = null;
for (HostVO hostVO : hostLists) {
if (hostVO.getHypervisorType().equals(HypervisorType.VMware) && hostVO.getStatus() == Status.Up) {
hostId = hostVO.getId();
break;
}
}
return hostId;
}
private void updateVolumeAfterMigration(Answer answer, DataObject srcData, DataObject destData) {
VolumeVO destinationVO = volDao.findById(destData.getId());
if (!(answer instanceof MigrateVolumeAnswer)) {
// OfflineVmwareMigration: reset states and such
VolumeVO sourceVO = volDao.findById(srcData.getId());
sourceVO.setState(Volume.State.Ready);
volDao.update(sourceVO.getId(), sourceVO);
destinationVO.setState(Volume.State.Expunged);
destinationVO.setRemoved(new Date());
volDao.update(destinationVO.getId(), destinationVO);
throw new CloudRuntimeException("unexpected answer from hypervisor agent: " + answer.getDetails());
}
MigrateVolumeAnswer ans = (MigrateVolumeAnswer) answer;
if (s_logger.isDebugEnabled()) {
String format = "retrieved '%s' as new path for volume(%d)";
s_logger.debug(String.format(format, ans.getVolumePath(), destData.getId()));
}
// OfflineVmwareMigration: update the volume with new pool/volume path
destinationVO.setPath(ans.getVolumePath());
volDao.update(destinationVO.getId(), destinationVO);
}
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
@ -124,7 +285,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
VolumeInfo volume = entry.getKey();
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue());
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
}
@ -133,7 +294,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
// Run validations against target!!
// 2. Complete the process. Update the volume details.
MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), migrateWithStorageCmd);
MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), migrateWithStorageCmd);
if (migrateWithStorageAnswer == null) {
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
@ -162,12 +323,12 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
VolumeInfo volume = entry.getKey();
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue());
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
}
MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(srcHost.getId(), command);
MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command);
if (answer == null) {
s_logger.error("Migration with storage of vm " + vm + " failed.");
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
@ -190,7 +351,7 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
boolean updated = false;
VolumeInfo volume = entry.getKey();
StoragePool pool = (StoragePool)entry.getValue();
StoragePool pool = (StoragePool) entry.getValue();
for (VolumeObjectTO volumeTo : volumeTos) {
if (volume.getId() == volumeTo.getId()) {
VolumeVO volumeVO = volDao.findById(volume.getId());

View File

@ -16,13 +16,6 @@
// under the License.
package org.apache.cloudstack.storage.motion;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -30,6 +23,29 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.MigrateWithStorageAnswer;
import com.cloud.agent.api.MigrateWithStorageCommand;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.host.Host;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.async.AsyncRpcContext;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.test.utils.SpringUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@ -47,29 +63,12 @@ import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.support.AnnotationConfigContextLoader;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.async.AsyncRpcContext;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.test.utils.SpringUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.MigrateWithStorageAnswer;
import com.cloud.agent.api.MigrateWithStorageCommand;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
@ -87,6 +86,8 @@ public class VmwareStorageMotionStrategyTest {
PrimaryDataStoreDao storagePoolDao;
@Inject
VMInstanceDao instanceDao;
@Inject
private HostDao hostDao;
CopyCommandResult result;
@ -262,6 +263,11 @@ public class VmwareStorageMotionStrategyTest {
return Mockito.mock(AgentManager.class);
}
@Bean
public HostDao hostDao() {
return Mockito.mock(HostDao.class);
}
public static class Library implements TypeFilter {
@Override
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {

View File

@ -49,6 +49,7 @@ public class ApiDispatcher {
private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName());
Long _createSnapshotQueueSizeLimit;
Long migrateQueueSizeLimit;
@Inject
AsyncJobManager _asyncMgr;
@ -79,6 +80,9 @@ public class ApiDispatcher {
_createSnapshotQueueSizeLimit = snapshotLimit;
}
public void setMigrateQueueSizeLimit(final Long migrateLimit) {
migrateQueueSizeLimit = migrateLimit;
}
public void dispatchCreateCmd(final BaseAsyncCreateCmd cmd, final Map<String, String> params) throws Exception {
asyncCreationDispatchChain.dispatch(new DispatchTask(cmd, params));
@ -124,6 +128,8 @@ public class ApiDispatcher {
Long queueSizeLimit = null;
if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.snapshotHostSyncObject)) {
queueSizeLimit = _createSnapshotQueueSizeLimit;
} else if (asyncCmd.getSyncObjType() != null && asyncCmd.getSyncObjType().equalsIgnoreCase(BaseAsyncCmd.migrationSyncObject)) {
queueSizeLimit = migrateQueueSizeLimit;
} else {
queueSizeLimit = 1L;
}

View File

@ -19,7 +19,6 @@ package com.cloud.api;
import com.cloud.api.dispatch.DispatchChainFactory;
import com.cloud.api.dispatch.DispatchTask;
import com.cloud.api.response.ApiResponseSerializer;
import com.cloud.configuration.Config;
import com.cloud.domain.Domain;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
@ -35,6 +34,7 @@ import com.cloud.exception.RequestLimitException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.UnavailableCommandException;
import com.cloud.storage.VolumeApiService;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.DomainManager;
@ -44,7 +44,6 @@ import com.cloud.user.UserVO;
import com.cloud.utils.ConstantTimeComparator;
import com.cloud.utils.DateUtil;
import com.cloud.utils.HttpUtils;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.ReflectUtil;
import com.cloud.utils.StringUtils;
@ -54,7 +53,6 @@ import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.component.PluggableService;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.EntityManager;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.UUIDManager;
import com.cloud.utils.exception.CloudRuntimeException;
@ -100,8 +98,6 @@ import org.apache.cloudstack.config.ApiServiceConfiguration;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.cloudstack.framework.jobs.AsyncJob;
@ -209,8 +205,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
@Inject
private AsyncJobManager asyncMgr;
@Inject
private ConfigurationDao configDao;
@Inject
private EntityManager entityMgr;
@Inject
private APIAuthenticationManager authManager;
@ -228,14 +222,60 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
private static ExecutorService s_executor = new ThreadPoolExecutor(10, 150, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory(
"ApiServer"));
static final ConfigKey<Boolean> EnableSecureSessionCookie = new ConfigKey<Boolean>("Advanced", Boolean.class, "enable.secure.session.cookie", "false",
"Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used.", false);
static final ConfigKey<String> JSONcontentType = new ConfigKey<String>(String.class, "json.content.type", "Advanced", "application/json; charset=UTF-8",
"Http response content type for .js files (default is text/javascript)", false, ConfigKey.Scope.Global, null);
@Inject
private MessageBus messageBus;
private static final ConfigKey<Integer> IntegrationAPIPort = new ConfigKey<Integer>("Advanced"
, Integer.class
, "integration.api.port"
, "8096"
, "Default API port"
, false
, ConfigKey.Scope.Global);
private static final ConfigKey<Long> ConcurrentSnapshotsThresholdPerHost = new ConfigKey<Long>("Advanced"
, Long.class
, "concurrent.snapshots.threshold.perhost"
, null
, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited"
, true // not sure if this is to be dynamic
, ConfigKey.Scope.Global);
private static final ConfigKey<Boolean> EncodeApiResponse = new ConfigKey<Boolean>("Advanced"
, Boolean.class
, "encode.api.response"
, "false"
, "Do URL encoding for the api response, false by default"
, false
, ConfigKey.Scope.Global);
static final ConfigKey<String> JSONcontentType = new ConfigKey<String>( "Advanced"
, String.class
, "json.content.type"
, "application/json; charset=UTF-8"
, "Http response content type for .js files (default is text/javascript)"
, false
, ConfigKey.Scope.Global);
static final ConfigKey<Boolean> EnableSecureSessionCookie = new ConfigKey<Boolean>("Advanced"
, Boolean.class
, "enable.secure.session.cookie"
, "false"
, "Session cookie is marked as secure if this is enabled. Secure cookies only work when HTTPS is used."
, false
, ConfigKey.Scope.Global);
private static final ConfigKey<String> JSONDefaultContentType = new ConfigKey<String> ("Advanced"
, String.class
, "json.content.type"
, "application/json; charset=UTF-8"
, "Http response content type for JSON"
, false
, ConfigKey.Scope.Global);
private static final ConfigKey<Boolean> UseEventAccountInfo = new ConfigKey<Boolean>( "advanced"
, Boolean.class
, "event.accountinfo"
, "false"
, "use account info in event logging"
, true
, ConfigKey.Scope.Global);
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
messageBus.subscribe(AsyncJob.Topics.JOB_EVENT_PUBLISH, MessageDispatcher.getDispatcher(this));
@ -305,8 +345,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
eventDescription.put("cmdInfo", job.getCmdInfo());
eventDescription.put("status", "" + job.getStatus() );
// If the event.accountinfo boolean value is set, get the human readable value for the username / domainname
Map<String, String> configs = configDao.getConfiguration("management-server", new HashMap<String, String>());
if (Boolean.valueOf(configs.get("event.accountinfo"))) {
if (UseEventAccountInfo.value()) {
DomainVO domain = domainDao.findById(jobOwner.getDomainId());
eventDescription.put("username", userJobOwner.getUsername());
eventDescription.put("accountname", jobOwner.getAccountName());
@ -325,27 +364,20 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
@Override
public boolean start() {
Security.addProvider(new BouncyCastleProvider());
Integer apiPort = null; // api port, null by default
final SearchCriteria<ConfigurationVO> sc = configDao.createSearchCriteria();
sc.addAnd("name", SearchCriteria.Op.EQ, Config.IntegrationAPIPort.key());
final List<ConfigurationVO> values = configDao.search(sc, null);
if ((values != null) && (values.size() > 0)) {
final ConfigurationVO apiPortConfig = values.get(0);
if (apiPortConfig.getValue() != null) {
apiPort = Integer.parseInt(apiPortConfig.getValue());
apiPort = (apiPort <= 0) ? null : apiPort;
}
}
Integer apiPort = IntegrationAPIPort.value(); // api port, null by default
final Map<String, String> configs = configDao.getConfiguration();
final String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key());
if (strSnapshotLimit != null) {
final Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L);
if (snapshotLimit.longValue() <= 0) {
s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited");
final Long snapshotLimit = ConcurrentSnapshotsThresholdPerHost.value();
if (snapshotLimit == null || snapshotLimit.longValue() <= 0) {
s_logger.debug("Global concurrent snapshot config parameter " + ConcurrentSnapshotsThresholdPerHost.value() + " is less or equal 0; defaulting to unlimited");
} else {
dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit);
}
final Long migrationLimit = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value();
if (migrationLimit == null || migrationLimit.longValue() <= 0) {
s_logger.debug("Global concurrent migration config parameter " + VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value() + " is less or equal 0; defaulting to unlimited");
} else {
dispatcher.setMigrateQueueSizeLimit(migrationLimit);
}
final Set<Class<?>> cmdClasses = new HashSet<Class<?>>();
@ -372,7 +404,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
}
setEncodeApiResponse(Boolean.valueOf(configDao.getValue(Config.EncodeApiResponse.key())));
setEncodeApiResponse(EncodeApiResponse.value());
if (apiPort != null) {
final ListenerThread listenerThread = new ListenerThread(this, apiPort);
@ -1200,16 +1232,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
}
}
@Override
public String getConfigComponentName() {
return ApiServer.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { EnableSecureSessionCookie, JSONcontentType };
}
// FIXME: the following two threads are copied from
// http://svn.apache.org/repos/asf/httpcomponents/httpcore/trunk/httpcore/src/examples/org/apache/http/examples/ElementalHttpServer.java
// we have to cite a license if we are using this code directly, so we need to add the appropriate citation or
@ -1413,4 +1435,19 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
ApiServer.encodeApiResponse = encodeApiResponse;
}
@Override
public String getConfigComponentName() {
return ApiServer.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {
IntegrationAPIPort,
ConcurrentSnapshotsThresholdPerHost,
EncodeApiResponse,
EnableSecureSessionCookie,
JSONDefaultContentType
};
}
}

View File

@ -566,7 +566,6 @@ public enum Config {
"The interval (in milliseconds) when host stats are retrieved from agents.",
null),
HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null),
IntegrationAPIPort("Advanced", ManagementServer.class, Integer.class, "integration.api.port", null, "Default API port. To disable set it to 0 or negative.", null),
InvestigateRetryInterval(
"Advanced",
HighAvailabilityManager.class,
@ -1439,7 +1438,6 @@ public enum Config {
"true",
"Allow subdomains to use networks dedicated to their parent domain(s)",
null),
EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null),
DnsBasicZoneUpdates(
"Advanced",
NetworkOrchestrationService.class,
@ -1693,14 +1691,6 @@ public enum Config {
null),
VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null),
DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null),
ConcurrentSnapshotsThresholdPerHost(
"Advanced",
ManagementServer.class,
Long.class,
"concurrent.snapshots.threshold.perhost",
null,
"Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited",
null),
NetworkIPv6SearchRetryMax(
"Network",
ManagementServer.class,

View File

@ -1342,7 +1342,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
// There should be atleast the ROOT volume of the VM in usable state
if (volumesTobeCreated.isEmpty()) {
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM");
// OfflineVmwareMigration: find out what is wrong with the id of the vm we try to start
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getId());
}
// don't allow to start vm that doesn't have a root volume

View File

@ -40,6 +40,7 @@ import com.cloud.resource.ResourceManager;
import com.cloud.service.ServiceOfferingDetailsVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.StoragePool;
import com.cloud.utils.Pair;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.NicProfile;
@ -225,4 +226,8 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
return null;
}
@Override
public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) {
return null;
}
}

View File

@ -37,6 +37,7 @@ import javax.crypto.spec.SecretKeySpec;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.ScopeType;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
@ -1103,6 +1104,32 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
return new Pair<List<? extends Cluster>, Integer>(result.first(), result.second());
}
private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool, VirtualMachineProfile profile) {
HypervisorType type = null;
if (vm == null) {
StoragePoolVO poolVo = _poolDao.findById(srcVolumePool.getId());
if (ScopeType.CLUSTER.equals(poolVo.getScope())) {
Long clusterId = poolVo.getClusterId();
if (clusterId != null) {
ClusterVO cluster = _clusterDao.findById(clusterId);
type = cluster.getHypervisorType();
}
} else if (ScopeType.ZONE.equals(poolVo.getScope())) {
Long zoneId = poolVo.getDataCenterId();
if (zoneId != null) {
DataCenterVO dc = _dcDao.findById(zoneId);
}
}
if (null == type) {
type = srcVolumePool.getHypervisor();
}
} else {
type = profile.getHypervisorType();
}
return type;
}
@Override
public Pair<List<? extends Host>, Integer> searchForServers(final ListHostsCmd cmd) {
@ -1433,10 +1460,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null, null, null);
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
// OfflineVmwareMigration: vm might be null here; deal!
HypervisorType type = getHypervisorType(vm, srcVolumePool, profile);
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
//This is an override mechanism so we can list the possible local storage pools that a volume in a shared pool might be able to be migrated to
DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
DiskProfile diskProfile = new DiskProfile(volume, diskOffering, type);
diskProfile.setUseLocalStorage(true);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {

View File

@ -522,7 +522,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public String getStoragePoolTags(long poolId) {
return com.cloud.utils.StringUtils.listToCsvTags(_storagePoolDao.searchForStoragePoolTags(poolId));
return StringUtils.listToCsvTags(getStoragePoolTagList(poolId));
}
@Override
public List<String> getStoragePoolTagList(long poolId) {
return _storagePoolDao.searchForStoragePoolTags(poolId);
}
@Override

View File

@ -56,6 +56,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.jobs.AsyncJob;
import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext;
@ -178,7 +179,7 @@ import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonParseException;
public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler {
public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler, Configurable {
private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class);
public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName();
@ -2028,10 +2029,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
// Check that Vm to which this volume is attached does not have VM Snapshots
// OfflineVmwareMigration: considder if this is needed and desirable
if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) {
throw new InvalidParameterValueException("Volume cannot be migrated, please remove all VM snapshots for VM to which this volume is attached");
}
// OfflineVmwareMigration: extract this block as method and check if it is subject to regression
if (vm != null && vm.getState() == State.Running) {
// Check if the VM is GPU enabled.
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
@ -2073,6 +2076,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new CloudRuntimeException("Storage pool " + destPool.getName() + " does not have enough space to migrate volume " + vol.getName());
}
// OfflineVmwareMigration: check storage tags on disk(offering)s in comparison to destination storage pool
// OfflineVmwareMigration: if no match return a proper error now
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
if(diskOffering.equals(null)) {
throw new CloudRuntimeException("volume '" + vol.getUuid() +"', has no diskoffering. Migration target cannot be checked.");
}
if(! doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
throw new CloudRuntimeException("Migration target has no matching tags for volume '" +vol.getName() + "(" + vol.getUuid() + ")'");
}
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
if (!srcClusterId.equals(destPool.getClusterId())) {
throw new InvalidParameterValueException("Cannot migrate a volume of a virtual machine to a storage pool in a different cluster");
@ -2191,7 +2204,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if ((destPool.isShared() && newDiskOffering.isUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) {
throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa.");
}
if (!doesTargetStorageSupportNewDiskOffering(destPool, newDiskOffering)) {
if (!doesTargetStorageSupportDiskOffering(destPool, newDiskOffering)) {
throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(),
getStoragePoolTags(destPool), newDiskOffering.getUuid(), newDiskOffering.getTags()));
}
@ -2236,9 +2249,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
* </body>
* </table>
*/
protected boolean doesTargetStorageSupportNewDiskOffering(StoragePool destPool, DiskOfferingVO newDiskOffering) {
String newDiskOfferingTags = newDiskOffering.getTags();
return doesTargetStorageSupportDiskOffering(destPool, newDiskOfferingTags);
protected boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, DiskOfferingVO diskOffering) {
String targetStoreTags = diskOffering.getTags();
return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags);
}
@Override
@ -3350,4 +3363,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return workJob;
}
@Override
public String getConfigComponentName() {
return VolumeApiService.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {ConcurrentMigrationsThresholdPerDatastore};
}
}

View File

@ -5065,19 +5065,33 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
if (vm.getType() != VirtualMachine.Type.User) {
// OffLineVmwareMigration: *WHY* ?
throw new InvalidParameterValueException("can only do storage migration on user vm");
}
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
if (vols.size() > 1) {
// OffLineVmwareMigration: data disks are not permitted, here!
if (vols.size() > 1 &&
// OffLineVmwareMigration: allow multiple disks for vmware
!HypervisorType.VMware.equals(vm.getHypervisorType())) {
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
}
}
// Check that Vm does not have VM Snapshots
if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");
}
checkDestinationHypervisorType(destPool, vm);
_itMgr.storageMigration(vm.getUuid(), destPool);
return _vmDao.findById(vm.getId());
}
private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) {
HypervisorType destHypervisorType = destPool.getHypervisor();
if (destHypervisorType == null) {
destHypervisorType = _clusterDao.findById(
@ -5087,8 +5101,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) {
throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString());
}
_itMgr.storageMigration(vm.getUuid(), destPool);
return _vmDao.findById(vm.getId());
}
@ -5144,12 +5156,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
}
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM)
&& !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv)
&& !vm.getHypervisorType().equals(HypervisorType.LXC) && !vm.getHypervisorType().equals(HypervisorType.Simulator)
&& !vm.getHypervisorType().equals(HypervisorType.Ovm3)) {
if (!isOnSupportedHypevisorForMigration(vm)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM.");
s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM form hypervisor type " + vm.getHypervisorType());
}
throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only");
}
@ -5227,6 +5236,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
private boolean isOnSupportedHypevisorForMigration(VMInstanceVO vm) {
return (vm.getHypervisorType().equals(HypervisorType.XenServer) ||
vm.getHypervisorType().equals(HypervisorType.VMware) ||
vm.getHypervisorType().equals(HypervisorType.KVM) ||
vm.getHypervisorType().equals(HypervisorType.Ovm) ||
vm.getHypervisorType().equals(HypervisorType.Hyperv) ||
vm.getHypervisorType().equals(HypervisorType.LXC) ||
vm.getHypervisorType().equals(HypervisorType.Simulator) ||
vm.getHypervisorType().equals(HypervisorType.Ovm3));
}
private boolean checkIfHostIsDedicated(HostVO host) {
long hostId = host.getId();
DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId);
@ -5469,7 +5489,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
throw new InvalidParameterValueException("Unable to find the vm by id " + vmId);
}
// OfflineVmwareMigration: this would be it ;) if multiple paths exist: unify
if (vm.getState() != State.Running) {
// OfflineVmwareMigration: and not vmware
if (s_logger.isDebugEnabled()) {
s_logger.debug("VM is not Running, unable to migrate the vm " + vm);
}
@ -5482,6 +5504,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
}
// OfflineVmwareMigration: this condition is to complicated. (already a method somewhere)
if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM)
&& !vm.getHypervisorType().equals(HypervisorType.Ovm) && !vm.getHypervisorType().equals(HypervisorType.Hyperv)
&& !vm.getHypervisorType().equals(HypervisorType.Simulator)) {

View File

@ -1004,7 +1004,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertFalse(result);
}
@ -1017,7 +1017,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertTrue(result);
}
@ -1030,7 +1030,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("A,B,C,D,X,Y").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertTrue(result);
}
@ -1043,7 +1043,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertFalse(result);
}
@ -1056,7 +1056,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertTrue(result);
}
@ -1069,7 +1069,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("C,D").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertFalse(result);
}
@ -1082,7 +1082,7 @@ public class VolumeApiServiceImplTest {
StoragePool storagePoolMock = Mockito.mock(StoragePool.class);
Mockito.doReturn("A").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportNewDiskOffering(storagePoolMock, diskOfferingVoMock);
boolean result = volumeApiServiceImpl.doesTargetStorageSupportDiskOffering(storagePoolMock, diskOfferingVoMock);
Assert.assertTrue(result);
}

View File

@ -16,21 +16,19 @@
# under the License.
""" BVT tests for Primary Storage
"""
#Import Local Modules
import marvin
# Import System modules
# Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
import logging
from marvin.lib.decoratorGenerators import skipTestIf
from marvin.lib.utils import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class TestPrimaryStorageServices(cloudstackTestCase):
def setUp(self):
@ -49,14 +47,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
def tearDown(self):
try:
#Clean up, terminate the created templates
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_01_primary_storage_nfs(self):
"""Test primary storage pools - XEN, KVM, VMWare. Not Supported for hyperv
"""
@ -64,24 +62,21 @@ class TestPrimaryStorageServices(cloudstackTestCase):
if self.hypervisor.lower() in ["hyperv"]:
raise self.skipTest("NFS primary storage not supported for Hyper-V")
# Validate the following:
# 1. List Clusters
# 2. verify that the cluster is in 'Enabled' allocation state
# 3. verify that the host is added successfully and
# in Up state with listHosts api response
#Create NFS storage pools with on XEN/KVM/VMWare clusters
# Create NFS storage pools with on XEN/KVM/VMWare clusters
clusters = list_clusters(
self.apiclient,
zoneid=self.zone.id
)
assert isinstance(clusters,list) and len(clusters)>0
assert isinstance(clusters, list) and len(clusters) > 0
for cluster in clusters:
#Host should be present before adding primary storage
# Host should be present before adding primary storage
list_hosts_response = list_hosts(
self.apiclient,
clusterid=cluster.id
@ -120,7 +115,7 @@ class TestPrimaryStorageServices(cloudstackTestCase):
"Check storage pool type "
)
#Verify List Storage pool Response has newly added storage pool
# Verify List Storage pool Response has newly added storage pool
storage_pools_response = list_storage_pools(
self.apiclient,
id=storage.id,
@ -152,13 +147,12 @@ class TestPrimaryStorageServices(cloudstackTestCase):
self.cleanup = []
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
def test_01_primary_storage_iscsi(self):
"""Test primary storage pools - XEN. Not Supported for kvm,hyperv,vmware
"""
if self.hypervisor.lower() in ["kvm","hyperv", "vmware", "lxc"]:
if self.hypervisor.lower() in ["kvm", "hyperv", "vmware", "lxc"]:
raise self.skipTest("iscsi primary storage not supported on kvm, VMWare, Hyper-V, or LXC")
if not self.services["configurableData"]["iscsi"]["url"]:
@ -175,10 +169,9 @@ class TestPrimaryStorageServices(cloudstackTestCase):
self.apiclient,
zoneid=self.zone.id
)
assert isinstance(clusters,list) and len(clusters)>0
assert isinstance(clusters, list) and len(clusters) > 0
for cluster in clusters:
#Host should be present before adding primary storage
# Host should be present before adding primary storage
list_hosts_response = list_hosts(
self.apiclient,
clusterid=cluster.id
@ -195,7 +188,6 @@ class TestPrimaryStorageServices(cloudstackTestCase):
"Check list Hosts in the cluster: " + cluster.name
)
storage = StoragePool.create(self.apiclient,
self.services["configurableData"]["iscsi"],
clusterid=cluster.id,
@ -218,7 +210,7 @@ class TestPrimaryStorageServices(cloudstackTestCase):
"Check storage pool type "
)
#Verify List Storage pool Response has newly added storage pool
# Verify List Storage pool Response has newly added storage pool
storage_pools_response = list_storage_pools(
self.apiclient,
id=storage.id,
@ -251,17 +243,17 @@ class TestPrimaryStorageServices(cloudstackTestCase):
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_01_add_primary_storage_disabled_host(self):
"""Test add primary storage pool with disabled host
"""
#Disable a host
# Disable a host
clusters = list_clusters(
self.apiclient,
zoneid=self.zone.id
)
assert isinstance(clusters,list) and len(clusters)>0
assert isinstance(clusters, list) and len(clusters) > 0
for cluster in clusters:
list_hosts_response = list_hosts(
@ -269,15 +261,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
clusterid=cluster.id,
type="Routing"
)
assert isinstance(list_hosts_response,list)
assert isinstance(list_hosts_response, list)
if len(list_hosts_response) < 2:
continue
selected_cluster = cluster
selected_host = list_hosts_response[0]
Host.update(self.apiclient, id=selected_host.id, allocationstate="Disable")
#create a pool
# create a pool
storage_pool_2 = StoragePool.create(
self.apiclient,
self.services["nfs2"],
@ -285,24 +276,23 @@ class TestPrimaryStorageServices(cloudstackTestCase):
zoneid=self.zone.id,
podid=self.pod.id
)
#self.cleanup.append(storage_pool_2)
# self.cleanup.append(storage_pool_2)
#Enable host and disable others
# Enable host and disable others
Host.update(self.apiclient, id=selected_host.id, allocationstate="Enable")
for host in list_hosts_response :
if(host.id == selected_host.id) :
for host in list_hosts_response:
if (host.id == selected_host.id):
continue
Host.update(self.apiclient, id=host.id, allocationstate="Disable")
#put other pools in maintenance
storage_pool_list = StoragePool.list(self.apiclient, zoneid = self.zone.id)
for pool in storage_pool_list :
if(pool.id == storage_pool_2.id) :
# put other pools in maintenance
storage_pool_list = StoragePool.list(self.apiclient, zoneid=self.zone.id)
for pool in storage_pool_list:
if (pool.id == storage_pool_2.id):
continue
StoragePool.update(self.apiclient,id=pool.id, enabled=False)
StoragePool.update(self.apiclient, id=pool.id, enabled=False)
#deployvm
# deployvm
try:
# Create Account
account = Account.create(
@ -329,20 +319,20 @@ class TestPrimaryStorageServices(cloudstackTestCase):
self.cleanup.append(self.virtual_machine)
self.cleanup.append(account)
finally:
#cancel maintenance
for pool in storage_pool_list :
if(pool.id == storage_pool_2.id) :
# cancel maintenance
for pool in storage_pool_list:
if (pool.id == storage_pool_2.id):
continue
StoragePool.update(self.apiclient,id=pool.id, enabled=True)
#Enable all hosts
for host in list_hosts_response :
if(host.id == selected_host.id) :
StoragePool.update(self.apiclient, id=pool.id, enabled=True)
# Enable all hosts
for host in list_hosts_response:
if (host.id == selected_host.id):
continue
Host.update(self.apiclient, id=host.id, allocationstate="Enable")
cleanup_resources(self.apiclient, self.cleanup)
self.cleanup = []
StoragePool.enableMaintenance(self.apiclient,storage_pool_2.id)
StoragePool.enableMaintenance(self.apiclient, storage_pool_2.id)
time.sleep(30);
cmd = deleteStoragePool.deleteStoragePoolCmd()
cmd.id = storage_pool_2.id
@ -355,12 +345,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
class StorageTagsServices:
"""Test Storage Tags Data Class.
"""
def __init__(self):
self.storage_tags = {
"a" : "NFS-A",
"b" : "NFS-B"
"a": "NFS-A",
"b": "NFS-B"
}
class TestStorageTags(cloudstackTestCase):
@classmethod
@ -390,7 +382,6 @@ class TestStorageTags(cloudstackTestCase):
cls._cleanup = []
if not cls.hypervisorNotSupported:
cls.clusters = list_clusters(
cls.apiclient,
zoneid=cls.zone.id
@ -405,7 +396,7 @@ class TestStorageTags(cloudstackTestCase):
podid=cls.pod.id,
tags=cls.services["storage_tags"]["a"]
)
#PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources
# PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources
assert cls.storage_pool_1.state == 'Up'
storage_pools_response = list_storage_pools(cls.apiclient,
id=cls.storage_pool_1.id)
@ -564,7 +555,7 @@ class TestStorageTags(cloudstackTestCase):
self.assertEquals(None, vm_1_volumes, "Check that volume V-2 has not been attached to VM-1")
# Attach V_1 to VM_1
self.virtual_machine_1.attach_volume(self.apiclient,self.volume_1)
self.virtual_machine_1.attach_volume(self.apiclient, self.volume_1)
vm_1_volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
@ -667,7 +658,7 @@ class TestStorageTags(cloudstackTestCase):
self.apiclient,
id=vol.id
)
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
pools_suitable = filter(lambda p: p.suitableformigration, pools_response)
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
self.assertEquals(1, len(pools_suitable), "Check that there is only one item on the list")
@ -685,7 +676,7 @@ class TestStorageTags(cloudstackTestCase):
self.apiclient,
id=vol.id
)
pools_suitable = filter(lambda p : p.suitableformigration, pools_response)
pools_suitable = filter(lambda p: p.suitableformigration, pools_response)
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
self.assertEquals(0, len(pools_suitable), "Check that there is no migration option for volume")

View File

@ -16,16 +16,18 @@
# under the License.
""" BVT tests for Virtual Machine Life Cycle
"""
#Import Local Modules
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (recoverVirtualMachine,
destroyVirtualMachine,
attachIso,
detachIso,
provisionCertificate,
updateConfiguration)
from marvin.lib.utils import *
updateConfiguration,
migrateVirtualMachine)
from marvin.lib.utils import (cleanup_resources,
validateList,
SshClient)
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine,
@ -33,6 +35,7 @@ from marvin.lib.base import (Account,
Iso,
Router,
Configurations,
StoragePool,
Volume,
DiskOffering)
from marvin.lib.common import (get_domain,
@ -41,11 +44,12 @@ from marvin.lib.common import (get_domain,
list_hosts)
from marvin.codes import FAILED, PASS
from nose.plugins.attrib import attr
#Import System modules
# Import System modules
import time
import re
_multiprocess_shared_ = True
class TestDeployVM(cloudstackTestCase):
@classmethod
@ -59,8 +63,8 @@ class TestDeployVM(cloudstackTestCase):
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
#If local storage is enabled, alter the offerings to use localstorage
#this step is needed for devcloud
# If local storage is enabled, alter the offerings to use localstorage
# this step is needed for devcloud
if cls.zone.localstorageenabled == True:
cls.services["service_offerings"]["tiny"]["storagetype"] = 'local'
cls.services["service_offerings"]["small"]["storagetype"] = 'local'
@ -118,8 +122,7 @@ class TestDeployVM(cloudstackTestCase):
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_deploy_vm(self):
"""Test Deploy Virtual Machine
"""
@ -164,10 +167,9 @@ class TestDeployVM(cloudstackTestCase):
)
return
@attr(tags = ["advanced"], required_hardware="false")
@attr(tags=["advanced"], required_hardware="false")
def test_advZoneVirtualRouter(self):
#TODO: SIMENH: duplicate test, remove it
# TODO: SIMENH: duplicate test, remove it
"""
Test advanced zone virtual router
1. Is Running
@ -176,21 +178,20 @@ class TestDeployVM(cloudstackTestCase):
@return:
"""
routers = Router.list(self.apiclient, account=self.account.name)
self.assertTrue(len(routers) > 0, msg = "No virtual router found")
self.assertTrue(len(routers) > 0, msg="No virtual router found")
router = routers[0]
self.assertEqual(router.state, 'Running', msg="Router is not in running state")
self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account")
#Has linklocal, public and guest ips
# Has linklocal, public and guest ips
self.assertIsNotNone(router.linklocalip, msg="Router has no linklocal ip")
self.assertIsNotNone(router.publicip, msg="Router has no public ip")
self.assertIsNotNone(router.guestipaddress, msg="Router has no guest ip")
@attr(mode = ["basic"], required_hardware="false")
@attr(mode=["basic"], required_hardware="false")
def test_basicZoneVirtualRouter(self):
#TODO: SIMENH: duplicate test, remove it
# TODO: SIMENH: duplicate test, remove it
"""
Tests for basic zone virtual router
1. Is Running
@ -198,13 +199,13 @@ class TestDeployVM(cloudstackTestCase):
@return:
"""
routers = Router.list(self.apiclient, account=self.account.name)
self.assertTrue(len(routers) > 0, msg = "No virtual router found")
self.assertTrue(len(routers) > 0, msg="No virtual router found")
router = routers[0]
self.assertEqual(router.state, 'Running', msg="Router is not in running state")
self.assertEqual(router.account, self.account.name, msg="Router does not belong to the account")
@attr(tags = ['advanced','basic','sg'], required_hardware="false")
@attr(tags=['advanced', 'basic', 'sg'], required_hardware="false")
def test_deploy_vm_multiple(self):
"""Test Multiple Deploy Virtual Machine
@ -236,7 +237,8 @@ class TestDeployVM(cloudstackTestCase):
list_vms = VirtualMachine.list(self.apiclient, ids=[virtual_machine1.id, virtual_machine2.id], listAll=True)
self.debug(
"Verify listVirtualMachines response for virtual machines: %s, %s" % (virtual_machine1.id, virtual_machine2.id)
"Verify listVirtualMachines response for virtual machines: %s, %s" % (
virtual_machine1.id, virtual_machine2.id)
)
self.assertEqual(
isinstance(list_vms, list),
@ -271,8 +273,8 @@ class TestVMLifeCycle(cloudstackTestCase):
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
#if local storage is enabled, alter the offerings to use localstorage
#this step is needed for devcloud
# if local storage is enabled, alter the offerings to use localstorage
# this step is needed for devcloud
if cls.zone.localstorageenabled == True:
cls.services["service_offerings"]["tiny"]["storagetype"] = 'local'
cls.services["service_offerings"]["small"]["storagetype"] = 'local'
@ -308,7 +310,7 @@ class TestVMLifeCycle(cloudstackTestCase):
cls.apiclient,
cls.services["service_offerings"]["medium"]
)
#create small and large virtual machines
# create small and large virtual machines
cls.small_virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
@ -355,14 +357,13 @@ class TestVMLifeCycle(cloudstackTestCase):
def tearDown(self):
try:
#Clean up, terminate the created ISOs
# Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_01_stop_vm(self):
"""Test Stop Virtual Machine
"""
@ -377,8 +378,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.fail("Failed to stop VM: %s" % e)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_01_stop_vm_forced(self):
"""Test Force Stop Virtual Machine
"""
@ -410,8 +410,7 @@ class TestVMLifeCycle(cloudstackTestCase):
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_02_start_vm(self):
"""Test Start Virtual Machine
"""
@ -449,7 +448,7 @@ class TestVMLifeCycle(cloudstackTestCase):
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_03_reboot_vm(self):
"""Test Reboot Virtual Machine
"""
@ -475,7 +474,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM avaliable in List Virtual Machines"
"Check VM available in List Virtual Machines"
)
self.assertEqual(
@ -485,8 +484,7 @@ class TestVMLifeCycle(cloudstackTestCase):
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_06_destroy_vm(self):
"""Test destroy Virtual Machine
"""
@ -522,9 +520,9 @@ class TestVMLifeCycle(cloudstackTestCase):
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_07_restore_vm(self):
#TODO: SIMENH: add another test the data on the restored VM.
# TODO: SIMENH: add another test the data on the restored VM.
"""Test recover Virtual Machine
"""
@ -563,7 +561,7 @@ class TestVMLifeCycle(cloudstackTestCase):
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg", "multihost"], required_hardware="false")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg", "multihost"], required_hardware="false")
def test_08_migrate_vm(self):
"""Test migrate VM
"""
@ -591,10 +589,10 @@ class TestVMLifeCycle(cloudstackTestCase):
# For XenServer and VMware, migration is possible between hosts belonging to different clusters
# with the help of XenMotion and Vmotion respectively.
if self.hypervisor.lower() in ["kvm","simulator"]:
#identify suitable host
if self.hypervisor.lower() in ["kvm", "simulator"]:
# identify suitable host
clusters = [h.clusterid for h in hosts]
#find hosts withe same clusterid
# find hosts withe same clusterid
clusters = [cluster for index, cluster in enumerate(clusters) if clusters.count(cluster) > 1]
if len(clusters) <= 1:
@ -607,8 +605,8 @@ class TestVMLifeCycle(cloudstackTestCase):
target_host = suitable_hosts[0]
migrate_host = suitable_hosts[1]
#deploy VM on target host
self.vm_to_migrate = VirtualMachine.create(
# deploy VM on target host
vm_to_migrate = VirtualMachine.create(
self.apiclient,
self.services["small"],
accountid=self.account.name,
@ -618,30 +616,30 @@ class TestVMLifeCycle(cloudstackTestCase):
hostid=target_host.id
)
self.debug("Migrating VM-ID: %s to Host: %s" % (
self.vm_to_migrate.id,
vm_to_migrate.id,
migrate_host.id
))
self.vm_to_migrate.migrate(self.apiclient, migrate_host.id)
vm_to_migrate.migrate(self.apiclient, migrate_host.id)
retries_cnt = 3
while retries_cnt >=0:
while retries_cnt >= 0:
list_vm_response = VirtualMachine.list(self.apiclient,
id=self.vm_to_migrate.id)
id=vm_to_migrate.id)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listed"
)
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id,self.vm_to_migrate.id,"Check virtual machine ID of migrated VM")
self.assertEqual(vm_response.hostid,migrate_host.id,"Check destination hostID of migrated VM")
self.assertEqual(vm_response.id, vm_to_migrate.id, "Check virtual machine ID of migrated VM")
self.assertEqual(vm_response.hostid, migrate_host.id, "Check destination hostID of migrated VM")
retries_cnt = retries_cnt - 1
return
@attr(configuration = "expunge.interval")
@attr(configuration = "expunge.delay")
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
@attr(configuration="expunge.interval")
@attr(configuration="expunge.delay")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false")
def test_09_expunge_vm(self):
"""Test destroy(expunge) Virtual Machine
"""
@ -662,8 +660,8 @@ class TestVMLifeCycle(cloudstackTestCase):
expunge_delay = int(config[0].value)
time.sleep(expunge_delay * 2)
#VM should be destroyed unless expunge thread hasn't run
#Wait for two cycles of the expunge thread
# VM should be destroyed unless expunge thread hasn't run
# Wait for two cycles of the expunge thread
config = Configurations.list(
self.apiclient,
name='expunge.interval'
@ -683,10 +681,10 @@ class TestVMLifeCycle(cloudstackTestCase):
self.debug("listVirtualMachines response: %s" % list_vm_response)
self.assertEqual(list_vm_response,None,"Check Expunged virtual machine is in listVirtualMachines response")
self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
def test_10_attachAndDetach_iso(self):
"""Test for attach and detach ISO to virtual machine"""
@ -712,14 +710,14 @@ class TestVMLifeCycle(cloudstackTestCase):
try:
iso.download(self.apiclient)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
self.fail("Exception while downloading ISO %s: %s" \
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
#Attach ISO to virtual machine
# Attach ISO to virtual machine
cmd = attachIso.attachIsoCmd()
cmd.id = iso.id
cmd.virtualmachineid = self.virtual_machine.id
@ -760,14 +758,14 @@ class TestVMLifeCycle(cloudstackTestCase):
)
try:
#Unmount ISO
# Unmount ISO
command = "umount %s" % mount_dir
ssh_client.execute(command)
except Exception as e:
self.fail("SSH failed for virtual machine: %s - %s" %
(self.virtual_machine.ipaddress, e))
#Detach from VM
# Detach from VM
cmd = detachIso.detachIsoCmd()
cmd.virtualmachineid = self.virtual_machine.id
self.apiclient.detachIso(cmd)
@ -825,6 +823,7 @@ class TestVMLifeCycle(cloudstackTestCase):
self.assertEqual(Volume.list(self.apiclient, id=vol1.id), None, "List response contains records when it should not")
class TestSecuredVmMigration(cloudstackTestCase):
@classmethod
@ -842,7 +841,8 @@ class TestSecuredVmMigration(cloudstackTestCase):
domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
0].__dict__
cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"]
template = get_template(
@ -916,19 +916,20 @@ class TestSecuredVmMigration(cloudstackTestCase):
target_hosts = Host.listForMigration(self.apiclient,
virtualmachineid=virtualmachineid)
for host in target_hosts:
h = list_hosts(self.apiclient,type='Routing', id=host.id)[0]
h = list_hosts(self.apiclient, type='Routing', id=host.id)[0]
if h.details.secured == secured:
return h
cloudstackTestCase.skipTest(self, "No target hosts available, skipping test.")
def check_migration_protocol(self, protocol, host):
resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"],passwd=self.hostConfig["password"])\
resp = SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \
.execute("grep -a listen_%s=1 /etc/libvirt/libvirtd.conf | tail -1" % protocol)
if protocol not in resp[0]:
cloudstackTestCase.fail(self, "Libvirt listen protocol expected: '" + protocol + "\n"
"does not match actual: " + resp[0])
"does not match actual: " +
resp[0])
def migrate_and_check(self, vm, src_host, dest_host, proto='tls'):
"""
@ -940,7 +941,7 @@ class TestSecuredVmMigration(cloudstackTestCase):
self.assertEqual(vm_response.hostid, dest_host.id, "Check destination host ID of migrated VM")
def unsecure_host(self, host):
SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"])\
SshClient(host.ipaddress, port=22, user=self.hostConfig["username"], passwd=self.hostConfig["password"]) \
.execute("rm -f /etc/cloudstack/agent/cloud* && \
sed -i 's/listen_tls.*/listen_tls=0/g' /etc/libvirt/libvirtd.conf && \
sed -i 's/listen_tcp.*/listen_tcp=1/g' /etc/libvirt/libvirtd.conf && \
@ -1051,7 +1052,8 @@ class TestSecuredVmMigration(cloudstackTestCase):
self.migrate_and_check(vm, secure_host, unsecure_host, proto='tls')
except Exception:
pass
else: self.fail("Migration succeeded, instead it should fail")
else:
self.fail("Migration succeeded, instead it should fail")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
def test_04_nonsecured_to_secured_vm_migration(self):
@ -1072,5 +1074,217 @@ class TestSecuredVmMigration(cloudstackTestCase):
self.migrate_and_check(vm, unsecure_host, secure_host, proto='tcp')
except Exception:
pass
else: self.fail("Migration succeeded, instead it should fail")
else:
self.fail("Migration succeeded, instead it should fail")
class TestMigrateVMwithVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestMigrateVMwithVolume, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.hypervisor = testClient.getHypervisorInfo()
cls._cleanup = []
# Get Zone, Domain and templates
domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
0].__dict__
cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"]
template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
if template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
# Set Zones and disk offerings
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
cls.services["iso1"]["zoneid"] = cls.zone.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestMigrateVMwithVolume, cls).getClsTestClient().getApiClient()
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.hypervisor.lower() not in ["vmware"]:
self.skipTest("VM Migration with Volumes is not supported on other than VMware")
self.hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
hypervisor='KVM')
if len(self.hosts) < 2:
self.skipTest("Requires at least two hosts for performing migration related tests")
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def get_target_host(self, virtualmachineid):
target_hosts = Host.listForMigration(self.apiclient,
virtualmachineid=virtualmachineid)[0]
if len(target_hosts) < 1:
self.skipTest("No target hosts found")
return target_hosts[0]
def get_target_pool(self, volid):
target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
if len(target_pools) < 1:
self.skipTest("Not enough storage pools found")
return target_pools[0]
def get_vm_volumes(self, id):
return Volume.list(self.apiclient, virtualmachineid=id, listall=True)
def deploy_vm(self):
return VirtualMachine.create(
self.apiclient,
self.services["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.small_offering.id,
mode=self.services["mode"])
def migrate_vm_with_pools(self, target_pool, id):
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
cmd.storageid = target_pool.id
cmd.virtualmachineid = id
return self.apiclient.migrateVirtualMachine(cmd)
def create_volume(self):
small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0]
return Volume.create(
self.apiclient,
self.services,
account=self.account.name,
diskofferingid=small_disk_offering.id,
domainid=self.account.domainid,
zoneid=self.zone.id
)
"""
BVT for Vmware Offline VM and Volume Migration
"""
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
def test_01_migrate_VM_and_root_volume(self):
"""Test VM will be migrated with it's root volume"""
# Validate the following
# 1. Deploys a VM
# 2. Finds suitable host for migration
# 3. Finds suitable storage pool for root volume
# 4. Migrate the VM to new host and storage pool and assert migration successful
vm = self.deploy_vm()
root_volume = self.get_vm_volumes(vm.id)[0]
target_pool = self.get_target_pool(root_volume.id)
vm.stop(self.apiclient)
self.migrate_vm_with_pools(target_pool, vm.id)
root_volume = self.get_vm_volumes(vm.id)[0]
self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
def test_02_migrate_VM_with_two_data_disks(self):
"""Test VM will be migrated with it's root volume"""
# Validate the following
# 1. Deploys a VM and attaches 2 data disks
# 2. Finds suitable host for migration
# 3. Finds suitable storage pool for volumes
# 4. Migrate the VM to new host and storage pool and assert migration successful
vm = self.deploy_vm()
volume1 = self.create_volume()
volume2 = self.create_volume()
vm.attach_volume(self.apiclient, volume1)
vm.attach_volume(self.apiclient, volume2)
root_volume = self.get_vm_volumes(vm.id)[0]
target_pool = self.get_target_pool(root_volume.id)
vm.stop(self.apiclient)
self.migrate_vm_with_pools(target_pool, vm.id)
volume1 = Volume.list(self.apiclient, id=volume1.id)[0]
volume2 = Volume.list(self.apiclient, id=volume2.id)[0]
root_volume = self.get_vm_volumes(vm.id)[0]
self.assertEqual(root_volume.storageid, target_pool.id, "Pool ID was not as expected")
self.assertEqual(volume1.storageid, target_pool.id, "Pool ID was not as expected")
self.assertEqual(volume2.storageid, target_pool.id, "Pool ID was not as expected")
@attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
def test_03_migrate_detached_volume(self):
"""Test VM will be migrated with it's root volume"""
# Validate the following
# 1. Deploys a VM and attaches 1 data disk
# 2. Detaches the Disk
# 3. Finds suitable storage pool for the Disk
# 4. Migrate the storage pool and assert migration successful
vm = self.deploy_vm()
volume1 = self.create_volume()
vm.attach_volume(self.apiclient, volume1)
vm.detach_volume(self.apiclient, volume1)
target_pool = self.get_target_pool(volume1.id)
Volume.migrate(self.apiclient, storageid=target_pool.id, volumeid=volume1.id)
vol = Volume.list(self.apiclient, volume=volume1.id)[0]
self.assertEqual(vol.storageid, target_pool.id, "Storage pool was not the same as expected")

View File

@ -73,6 +73,14 @@ public class StringUtils {
public static String join(final String delimiter, final Object... components) {
return org.apache.commons.lang.StringUtils.join(components, delimiter);
}
/**
* @deprecated
* Please use org.apache.commons.lang.StringUtils.isBlank() as a replacement
*/
@Deprecated
public static boolean isBlank(String str) {
return org.apache.commons.lang.StringUtils.isBlank(str);
}
/**
* @deprecated

View File

@ -447,6 +447,23 @@ public class VirtualMachineMO extends BaseMO {
return false;
}
public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception {
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
relocateSpec.setDatastore(morDataStore);
ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null);
boolean result = _context.getVimClient().waitForTask(morTask);
if (result) {
_context.waitForTaskProgressDone(morTask);
return true;
} else {
s_logger.error("VMware change datastore relocateVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask));
}
return false;
}
public boolean relocate(ManagedObjectReference morTargetHost) throws Exception {
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
relocateSpec.setHost(morTargetHost);