Merge remote-tracking branch 'apache/4.18' into HEAD

This commit is contained in:
Wei Zhou 2023-06-21 15:08:56 +02:00
commit 09a4a252d7
26 changed files with 1972 additions and 109 deletions

View File

@ -86,7 +86,8 @@ public class StartVMCmd extends BaseAsyncCmd implements UserCmd {
type = CommandType.BOOLEAN, type = CommandType.BOOLEAN,
description = "True by default, CloudStack will firstly try to start the VM on the last host where it run on before stopping, if destination host is not specified. " + description = "True by default, CloudStack will firstly try to start the VM on the last host where it run on before stopping, if destination host is not specified. " +
"If false, CloudStack will not consider the last host and start the VM by normal process.", "If false, CloudStack will not consider the last host and start the VM by normal process.",
since = "4.18.0") since = "4.18.0",
authorized = {RoleType.Admin})
private Boolean considerLastHost; private Boolean considerLastHost;
@Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "Deployment planner to use for vm allocation. Available to ROOT admin only", since = "4.4", authorized = { RoleType.Admin }) @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "Deployment planner to use for vm allocation. Available to ROOT admin only", since = "4.4", authorized = { RoleType.Admin })

View File

@ -264,6 +264,8 @@ public interface VirtualMachineManager extends Manager {
Pair<Long, Long> findClusterAndHostIdForVm(long vmId); Pair<Long, Long> findClusterAndHostIdForVm(long vmId);
Pair<Long, Long> findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm);
/** /**
* Obtains statistics for a list of VMs; CPU and network utilization * Obtains statistics for a list of VMs; CPU and network utilization
* @param hostId ID of the host * @param hostId ID of the host

View File

@ -212,6 +212,7 @@ import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ScopeType; import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateVO;
@ -1054,6 +1055,26 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} }
} }
protected void checkAndAttemptMigrateVmAcrossCluster(final VMInstanceVO vm, final Long destinationClusterId, final Map<Volume, StoragePool> volumePoolMap) {
if (!HypervisorType.VMware.equals(vm.getHypervisorType()) || vm.getLastHostId() == null) {
return;
}
Host lastHost = _hostDao.findById(vm.getLastHostId());
if (destinationClusterId.equals(lastHost.getClusterId())) {
return;
}
if (volumePoolMap.values().stream().noneMatch(s -> destinationClusterId.equals(s.getClusterId()))) {
return;
}
Answer[] answer = attemptHypervisorMigration(vm, volumePoolMap, lastHost.getId());
if (answer == null) {
s_logger.warn("Hypervisor inter-cluster migration during VM start failed");
return;
}
// Other network related updates will be done using caller
markVolumesInPool(vm, answer);
}
@Override @Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner)
throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
@ -1227,6 +1248,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
resetVmNicsDeviceId(vm.getId()); resetVmNicsDeviceId(vm.getId());
_networkMgr.prepare(vmProfile, dest, ctx); _networkMgr.prepare(vmProfile, dest, ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) { if (vm.getHypervisorType() != HypervisorType.BareMetal) {
checkAndAttemptMigrateVmAcrossCluster(vm, cluster_id, dest.getStorageForDisks());
volumeMgr.prepare(vmProfile, dest); volumeMgr.prepare(vmProfile, dest);
} }
@ -2355,7 +2377,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
try { try {
return _agentMgr.send(hostId, commandsContainer); return _agentMgr.send(hostId, commandsContainer);
} catch (AgentUnavailableException | OperationTimedoutException e) { } catch (AgentUnavailableException | OperationTimedoutException e) {
throw new CloudRuntimeException(String.format("Failed to migrate VM: %s", vm.getUuid()),e); s_logger.warn(String.format("Hypervisor migration failed for the VM: %s", vm), e);
} }
} }
return null; return null;
@ -2904,7 +2926,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
* </ul> * </ul>
*/ */
protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) {
if (!currentPool.isManaged()) { if (!currentPool.isManaged() || currentPool.getPoolType().equals(Storage.StoragePoolType.PowerFlex)) {
return; return;
} }
if (currentPool.getId() == targetPool.getId()) { if (currentPool.getId() == targetPool.getId()) {
@ -5712,8 +5734,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return new Pair<>(clusterId, hostId); return new Pair<>(clusterId, hostId);
} }
private Pair<Long, Long> findClusterAndHostIdForVm(VirtualMachine vm) { @Override
Long hostId = vm.getHostId(); public Pair<Long, Long> findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm) {
Long hostId = null;
if (!skipCurrentHostForStartingVm || !State.Starting.equals(vm.getState())) {
hostId = vm.getHostId();
}
Long clusterId = null; Long clusterId = null;
if(hostId == null) { if(hostId == null) {
hostId = vm.getLastHostId(); hostId = vm.getLastHostId();
@ -5731,6 +5757,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return new Pair<>(clusterId, hostId); return new Pair<>(clusterId, hostId);
} }
private Pair<Long, Long> findClusterAndHostIdForVm(VirtualMachine vm) {
return findClusterAndHostIdForVm(vm, false);
}
@Override @Override
public Pair<Long, Long> findClusterAndHostIdForVm(long vmId) { public Pair<Long, Long> findClusterAndHostIdForVm(long vmId) {
VMInstanceVO vm = _vmDao.findById(vmId); VMInstanceVO vm = _vmDao.findById(vmId);

View File

@ -32,6 +32,8 @@ import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.stream.Collectors;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey;
@ -46,6 +48,7 @@ import org.mockito.InjectMocks;
import org.mockito.Mock; import org.mockito.Mock;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.Spy; import org.mockito.Spy;
import org.mockito.stubbing.Answer;
import org.mockito.runners.MockitoJUnitRunner; import org.mockito.runners.MockitoJUnitRunner;
import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager;
@ -68,6 +71,7 @@ import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ScopeType; import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolHostVO;
@ -373,9 +377,26 @@ public class VirtualMachineManagerImplTest {
Mockito.verify(storagePoolVoMock, Mockito.times(0)).getId(); Mockito.verify(storagePoolVoMock, Mockito.times(0)).getId();
} }
@Test
public void allowVolumeMigrationsForPowerFlexStorage() {
Mockito.doReturn(true).when(storagePoolVoMock).isManaged();
Mockito.doReturn(Storage.StoragePoolType.PowerFlex).when(storagePoolVoMock).getPoolType();
virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolProvided(storagePoolVoMock, volumeVoMock, Mockito.mock(StoragePoolVO.class));
Mockito.verify(storagePoolVoMock).isManaged();
Mockito.verify(storagePoolVoMock, Mockito.times(0)).getId();
}
@Test @Test
public void executeManagedStorageChecksWhenTargetStoragePoolProvidedTestCurrentStoragePoolEqualsTargetPool() { public void executeManagedStorageChecksWhenTargetStoragePoolProvidedTestCurrentStoragePoolEqualsTargetPool() {
Mockito.doReturn(true).when(storagePoolVoMock).isManaged(); Mockito.doReturn(true).when(storagePoolVoMock).isManaged();
// return any storage type except powerflex/scaleio
List<Storage.StoragePoolType> values = Arrays.asList(Storage.StoragePoolType.values());
when(storagePoolVoMock.getPoolType()).thenAnswer((Answer<Storage.StoragePoolType>) invocation -> {
List<Storage.StoragePoolType> filteredValues = values.stream().filter(v -> v != Storage.StoragePoolType.PowerFlex).collect(Collectors.toList());
int randomIndex = new Random().nextInt(filteredValues.size());
return filteredValues.get(randomIndex); });
virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolProvided(storagePoolVoMock, volumeVoMock, storagePoolVoMock); virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolProvided(storagePoolVoMock, volumeVoMock, storagePoolVoMock);
@ -386,6 +407,12 @@ public class VirtualMachineManagerImplTest {
@Test(expected = CloudRuntimeException.class) @Test(expected = CloudRuntimeException.class)
public void executeManagedStorageChecksWhenTargetStoragePoolProvidedTestCurrentStoragePoolNotEqualsTargetPool() { public void executeManagedStorageChecksWhenTargetStoragePoolProvidedTestCurrentStoragePoolNotEqualsTargetPool() {
Mockito.doReturn(true).when(storagePoolVoMock).isManaged(); Mockito.doReturn(true).when(storagePoolVoMock).isManaged();
// return any storage type except powerflex/scaleio
List<Storage.StoragePoolType> values = Arrays.asList(Storage.StoragePoolType.values());
when(storagePoolVoMock.getPoolType()).thenAnswer((Answer<Storage.StoragePoolType>) invocation -> {
List<Storage.StoragePoolType> filteredValues = values.stream().filter(v -> v != Storage.StoragePoolType.PowerFlex).collect(Collectors.toList());
int randomIndex = new Random().nextInt(filteredValues.size());
return filteredValues.get(randomIndex); });
virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolProvided(storagePoolVoMock, volumeVoMock, Mockito.mock(StoragePoolVO.class)); virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolProvided(storagePoolVoMock, volumeVoMock, Mockito.mock(StoragePoolVO.class));
} }
@ -838,4 +865,34 @@ public class VirtualMachineManagerImplTest {
Mockito.when(templateZoneDao.findByZoneTemplate(dcId, templateId)).thenReturn(Mockito.mock(VMTemplateZoneVO.class)); Mockito.when(templateZoneDao.findByZoneTemplate(dcId, templateId)).thenReturn(Mockito.mock(VMTemplateZoneVO.class));
virtualMachineManagerImpl.checkIfTemplateNeededForCreatingVmVolumes(vm); virtualMachineManagerImpl.checkIfTemplateNeededForCreatingVmVolumes(vm);
} }
@Test
public void checkAndAttemptMigrateVmAcrossClusterNonValid() {
// Below scenarios shouldn't result in VM migration
VMInstanceVO vm = Mockito.mock(VMInstanceVO.class);
Mockito.when(vm.getHypervisorType()).thenReturn(HypervisorType.KVM);
virtualMachineManagerImpl.checkAndAttemptMigrateVmAcrossCluster(vm, 1L, new HashMap<>());
Mockito.when(vm.getHypervisorType()).thenReturn(HypervisorType.VMware);
Mockito.when(vm.getLastHostId()).thenReturn(null);
virtualMachineManagerImpl.checkAndAttemptMigrateVmAcrossCluster(vm, 1L, new HashMap<>());
Long destinationClusterId = 10L;
Mockito.when(vm.getLastHostId()).thenReturn(1L);
HostVO hostVO = Mockito.mock(HostVO.class);
Mockito.when(hostVO.getClusterId()).thenReturn(destinationClusterId);
Mockito.when(hostDaoMock.findById(1L)).thenReturn(hostVO);
virtualMachineManagerImpl.checkAndAttemptMigrateVmAcrossCluster(vm, destinationClusterId, new HashMap<>());
destinationClusterId = 20L;
Map<Volume, StoragePool> map = new HashMap<>();
StoragePool pool1 = Mockito.mock(StoragePool.class);
Mockito.when(pool1.getClusterId()).thenReturn(10L);
map.put(Mockito.mock(Volume.class), pool1);
StoragePool pool2 = Mockito.mock(StoragePool.class);
Mockito.when(pool2.getClusterId()).thenReturn(null);
map.put(Mockito.mock(Volume.class), pool2);
virtualMachineManagerImpl.checkAndAttemptMigrateVmAcrossCluster(vm, destinationClusterId, map);
}
} }

View File

@ -1648,6 +1648,10 @@ public class VolumeServiceImpl implements VolumeService {
newVol.setPoolType(pool.getPoolType()); newVol.setPoolType(pool.getPoolType());
newVol.setLastPoolId(lastPoolId); newVol.setLastPoolId(lastPoolId);
newVol.setPodId(pool.getPodId()); newVol.setPodId(pool.getPodId());
if (volume.getPassphraseId() != null) {
newVol.setPassphraseId(volume.getPassphraseId());
newVol.setEncryptFormat(volume.getEncryptFormat());
}
return volDao.persist(newVol); return volDao.persist(newVol);
} }

View File

@ -21,6 +21,7 @@ import java.util.Map;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.libvirt.Connect; import org.libvirt.Connect;
import org.libvirt.Library;
import org.libvirt.LibvirtException; import org.libvirt.LibvirtException;
import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor;
@ -44,6 +45,7 @@ public class LibvirtConnection {
if (conn == null) { if (conn == null) {
s_logger.info("No existing libvirtd connection found. Opening a new one"); s_logger.info("No existing libvirtd connection found. Opening a new one");
conn = new Connect(hypervisorURI, false); conn = new Connect(hypervisorURI, false);
Library.initEventLoop();
s_logger.debug("Successfully connected to libvirt at: " + hypervisorURI); s_logger.debug("Successfully connected to libvirt at: " + hypervisorURI);
s_connections.put(hypervisorURI, conn); s_connections.put(hypervisorURI, conn);
} else { } else {

View File

@ -29,27 +29,255 @@ import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.resource.CommandWrapper; import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper; import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Map; import java.util.Map;
import java.util.UUID; import java.util.UUID;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainBlockJobInfo;
import org.libvirt.DomainInfo;
import org.libvirt.TypedParameter;
import org.libvirt.TypedUlongParameter;
import org.libvirt.LibvirtException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
@ResourceWrapper(handles = MigrateVolumeCommand.class) @ResourceWrapper(handles = MigrateVolumeCommand.class)
public final class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVolumeCommand, Answer, LibvirtComputingResource> { public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVolumeCommand, Answer, LibvirtComputingResource> {
private static final Logger LOGGER = Logger.getLogger(LibvirtMigrateVolumeCommandWrapper.class); private static final Logger LOGGER = Logger.getLogger(LibvirtMigrateVolumeCommandWrapper.class);
@Override @Override
public Answer execute(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { public Answer execute(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData();
PrimaryDataStoreTO srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore();
MigrateVolumeAnswer answer;
if (srcPrimaryDataStore.getPoolType().equals(Storage.StoragePoolType.PowerFlex)) {
answer = migratePowerFlexVolume(command, libvirtComputingResource);
} else {
answer = migrateRegularVolume(command, libvirtComputingResource);
}
return answer;
}
protected MigrateVolumeAnswer migratePowerFlexVolume(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
// Source Details
VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData();
String srcPath = srcVolumeObjectTO.getPath();
final String srcVolumeId = ScaleIOUtil.getVolumePath(srcVolumeObjectTO.getPath());
final String vmName = srcVolumeObjectTO.getVmName();
// Destination Details
VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData();
String destPath = destVolumeObjectTO.getPath();
final String destVolumeId = ScaleIOUtil.getVolumePath(destVolumeObjectTO.getPath());
Map<String, String> destDetails = command.getDestDetails();
final String destSystemId = destDetails.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
String destDiskLabel = null;
final String destDiskFileName = ScaleIOUtil.DISK_NAME_PREFIX + destSystemId + "-" + destVolumeId;
final String diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + destDiskFileName;
Domain dm = null;
try {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
Connect conn = libvirtUtilitiesHelper.getConnection();
dm = libvirtComputingResource.getDomain(conn, vmName);
if (dm == null) {
return new MigrateVolumeAnswer(command, false, "Migrate volume failed due to can not find vm: " + vmName, null);
}
DomainInfo.DomainState domainState = dm.getInfo().state ;
if (domainState != DomainInfo.DomainState.VIR_DOMAIN_RUNNING) {
return new MigrateVolumeAnswer(command, false, "Migrate volume failed due to VM is not running: " + vmName + " with domainState = " + domainState, null);
}
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
PrimaryDataStoreTO spool = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore();
KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getPoolType(), spool.getUuid());
pool.connectPhysicalDisk(destVolumeObjectTO.getPath(), null);
String srcSecretUUID = null;
String destSecretUUID = null;
if (ArrayUtils.isNotEmpty(destVolumeObjectTO.getPassphrase())) {
srcSecretUUID = libvirtComputingResource.createLibvirtVolumeSecret(conn, srcVolumeObjectTO.getPath(), srcVolumeObjectTO.getPassphrase());
destSecretUUID = libvirtComputingResource.createLibvirtVolumeSecret(conn, destVolumeObjectTO.getPath(), destVolumeObjectTO.getPassphrase());
}
String diskdef = generateDestinationDiskXML(dm, srcVolumeId, diskFilePath, destSecretUUID);
destDiskLabel = generateDestinationDiskLabel(diskdef);
TypedUlongParameter parameter = new TypedUlongParameter("bandwidth", 0);
TypedParameter[] parameters = new TypedParameter[1];
parameters[0] = parameter;
dm.blockCopy(destDiskLabel, diskdef, parameters, Domain.BlockCopyFlags.REUSE_EXT);
LOGGER.info(String.format("Block copy has started for the volume %s : %s ", destDiskLabel, srcPath));
return checkBlockJobStatus(command, dm, destDiskLabel, srcPath, destPath, libvirtComputingResource, conn, srcSecretUUID);
} catch (Exception e) {
String msg = "Migrate volume failed due to " + e.toString();
LOGGER.warn(msg, e);
if (destDiskLabel != null) {
try {
dm.blockJobAbort(destDiskLabel, Domain.BlockJobAbortFlags.ASYNC);
} catch (LibvirtException ex) {
LOGGER.error("Migrate volume failed while aborting the block job due to " + ex.getMessage());
}
}
return new MigrateVolumeAnswer(command, false, msg, null);
} finally {
if (dm != null) {
try {
dm.free();
} catch (LibvirtException l) {
LOGGER.trace("Ignoring libvirt error.", l);
};
}
}
}
protected MigrateVolumeAnswer checkBlockJobStatus(MigrateVolumeCommand command, Domain dm, String diskLabel, String srcPath, String destPath, LibvirtComputingResource libvirtComputingResource, Connect conn, String srcSecretUUID) throws LibvirtException {
int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if disk is found
int waitTimeInSec = command.getWait();
while (waitTimeInSec > 0) {
DomainBlockJobInfo blockJobInfo = dm.getBlockJobInfo(diskLabel, 0);
if (blockJobInfo != null) {
LOGGER.debug(String.format("Volume %s : %s block copy progress: %s%% current value:%s end value:%s", diskLabel, srcPath, (blockJobInfo.end == 0)? 0 : 100*(blockJobInfo.cur / (double) blockJobInfo.end), blockJobInfo.cur, blockJobInfo.end));
if (blockJobInfo.cur == blockJobInfo.end) {
LOGGER.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath));
dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT);
if (StringUtils.isNotEmpty(srcSecretUUID)) {
libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID);
}
break;
}
} else {
LOGGER.info("Failed to get the block copy status, trying to abort the job");
dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC);
}
waitTimeInSec--;
try {
Thread.sleep(timeBetweenTries);
} catch (Exception ex) {
// don't do anything
}
}
if (waitTimeInSec <= 0) {
String msg = "Block copy is taking long time, failing the job";
LOGGER.error(msg);
try {
dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC);
} catch (LibvirtException ex) {
LOGGER.error("Migrate volume failed while aborting the block job due to " + ex.getMessage());
}
return new MigrateVolumeAnswer(command, false, msg, null);
}
return new MigrateVolumeAnswer(command, true, null, destPath);
}
private String generateDestinationDiskLabel(String diskXml) throws ParserConfigurationException, IOException, SAXException {
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
Document doc = dBuilder.parse(new ByteArrayInputStream(diskXml.getBytes("UTF-8")));
doc.getDocumentElement().normalize();
Element disk = doc.getDocumentElement();
String diskLabel = getAttrValue("target", "dev", disk);
return diskLabel;
}
protected String generateDestinationDiskXML(Domain dm, String srcVolumeId, String diskFilePath, String destSecretUUID) throws LibvirtException, ParserConfigurationException, IOException, TransformerException, SAXException {
final String domXml = dm.getXMLDesc(0);
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
Document doc = dBuilder.parse(new ByteArrayInputStream(domXml.getBytes("UTF-8")));
doc.getDocumentElement().normalize();
NodeList disks = doc.getElementsByTagName("disk");
for (int i = 0; i < disks.getLength(); i++) {
Element disk = (Element)disks.item(i);
String type = disk.getAttribute("type");
if (!type.equalsIgnoreCase("network")) {
String diskDev = getAttrValue("source", "dev", disk);
if (StringUtils.isNotEmpty(diskDev) && diskDev.contains(srcVolumeId)) {
setAttrValue("source", "dev", diskFilePath, disk);
if (StringUtils.isNotEmpty(destSecretUUID)) {
setAttrValue("secret", "uuid", destSecretUUID, disk);
}
StringWriter diskSection = new StringWriter();
Transformer xformer = TransformerFactory.newInstance().newTransformer();
xformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
xformer.transform(new DOMSource(disk), new StreamResult(diskSection));
return diskSection.toString();
}
}
}
return null;
}
private static String getAttrValue(String tag, String attr, Element eElement) {
NodeList tagNode = eElement.getElementsByTagName(tag);
if (tagNode.getLength() == 0) {
return null;
}
Element node = (Element)tagNode.item(0);
return node.getAttribute(attr);
}
private static void setAttrValue(String tag, String attr, String newValue, Element eElement) {
NodeList tagNode = eElement.getElementsByTagName(tag);
if (tagNode.getLength() == 0) {
return;
}
Element node = (Element)tagNode.item(0);
node.setAttribute(attr, newValue);
}
protected MigrateVolumeAnswer migrateRegularVolume(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
KVMStoragePoolManager storagePoolManager = libvirtComputingResource.getStoragePoolMgr(); KVMStoragePoolManager storagePoolManager = libvirtComputingResource.getStoragePoolMgr();
VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData(); VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData();
PrimaryDataStoreTO srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore(); PrimaryDataStoreTO srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore();
Map<String, String> srcDetails = command.getSrcDetails(); Map<String, String> srcDetails = command.getSrcDetails();
String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath(); String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath();
VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData(); VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData();

View File

@ -37,6 +37,7 @@ import java.util.UUID;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import com.cloud.storage.ScopeType; import com.cloud.storage.ScopeType;
import com.cloud.storage.Volume;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer; import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand;
@ -2448,7 +2449,12 @@ public class KVMStorageProcessor implements StorageProcessor {
destPool = storagePoolMgr.getStoragePool(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid()); destPool = storagePoolMgr.getStoragePool(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid());
try { try {
storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds()); if (srcVol.getPassphrase() != null && srcVol.getVolumeType().equals(Volume.Type.ROOT)) {
volume.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS);
storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds(), srcVol.getPassphrase(), destVol.getPassphrase(), srcVol.getProvisioningType());
} else {
storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds());
}
} catch (Exception e) { // Any exceptions while copying the disk, should send failed answer with the error message } catch (Exception e) { // Any exceptions while copying the disk, should send failed answer with the error message
String errMsg = String.format("Failed to copy volume: %s to dest storage: %s, due to %s", srcVol.getName(), destPrimaryStore.getName(), e.toString()); String errMsg = String.format("Failed to copy volume: %s to dest storage: %s, due to %s", srcVol.getName(), destPrimaryStore.getName(), e.toString());
s_logger.debug(errMsg, e); s_logger.debug(errMsg, e);
@ -2467,6 +2473,7 @@ public class KVMStorageProcessor implements StorageProcessor {
String path = destPrimaryStore.isManaged() ? destVolumeName : destVolumePath + File.separator + destVolumeName; String path = destPrimaryStore.isManaged() ? destVolumeName : destVolumePath + File.separator + destVolumeName;
newVol.setPath(path); newVol.setPath(path);
newVol.setFormat(destFormat); newVol.setFormat(destFormat);
newVol.setEncryptFormat(destVol.getEncryptFormat());
return new CopyCmdAnswer(newVol); return new CopyCmdAnswer(newVol);
} catch (final CloudRuntimeException e) { } catch (final CloudRuntimeException e) {
s_logger.debug("Failed to copyVolumeFromPrimaryToPrimary: ", e); s_logger.debug("Failed to copyVolumeFromPrimaryToPrimary: ", e);

View File

@ -387,6 +387,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
boolean forceSourceFormat = srcQemuFile.getFormat() == QemuImg.PhysicalDiskFormat.RAW; boolean forceSourceFormat = srcQemuFile.getFormat() == QemuImg.PhysicalDiskFormat.RAW;
LOGGER.debug(String.format("Starting copy from source disk %s(%s) to PowerFlex volume %s(%s), forcing source format is %b", srcQemuFile.getFileName(), srcQemuFile.getFormat(), destQemuFile.getFileName(), destQemuFile.getFormat(), forceSourceFormat)); LOGGER.debug(String.format("Starting copy from source disk %s(%s) to PowerFlex volume %s(%s), forcing source format is %b", srcQemuFile.getFileName(), srcQemuFile.getFormat(), destQemuFile.getFileName(), destQemuFile.getFormat(), forceSourceFormat));
qemuImageOpts.setImageOptsFlag(true);
qemu.convert(srcQemuFile, destQemuFile, options, qemuObjects, qemuImageOpts,null, forceSourceFormat); qemu.convert(srcQemuFile, destQemuFile, options, qemuObjects, qemuImageOpts,null, forceSourceFormat);
LOGGER.debug("Successfully converted source disk image " + srcQemuFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); LOGGER.debug("Successfully converted source disk image " + srcQemuFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath());

View File

@ -30,6 +30,7 @@ public class QemuImageOptions {
private static final String LUKS_KEY_SECRET_PARAM_KEY = "key-secret"; private static final String LUKS_KEY_SECRET_PARAM_KEY = "key-secret";
private static final String QCOW2_KEY_SECRET_PARAM_KEY = "encrypt.key-secret"; private static final String QCOW2_KEY_SECRET_PARAM_KEY = "encrypt.key-secret";
private static final String DRIVER = "driver"; private static final String DRIVER = "driver";
private boolean addImageOpts = false;
private QemuImg.PhysicalDiskFormat format; private QemuImg.PhysicalDiskFormat format;
private static final List<QemuImg.PhysicalDiskFormat> DISK_FORMATS_THAT_SUPPORT_OPTION_IMAGE_OPTS = Arrays.asList(QemuImg.PhysicalDiskFormat.QCOW2, QemuImg.PhysicalDiskFormat.LUKS); private static final List<QemuImg.PhysicalDiskFormat> DISK_FORMATS_THAT_SUPPORT_OPTION_IMAGE_OPTS = Arrays.asList(QemuImg.PhysicalDiskFormat.QCOW2, QemuImg.PhysicalDiskFormat.LUKS);
@ -71,13 +72,19 @@ public class QemuImageOptions {
} }
} }
public void setImageOptsFlag(boolean addImageOpts) {
this.addImageOpts = addImageOpts;
}
/** /**
* Converts QemuImageOptions into the command strings required by qemu-img flags * Converts QemuImageOptions into the command strings required by qemu-img flags
* @return array of strings representing command flag and value (--image-opts) * @return array of strings representing command flag and value (--image-opts)
*/ */
public String[] toCommandFlag() { public String[] toCommandFlag() {
if (format == null || !DISK_FORMATS_THAT_SUPPORT_OPTION_IMAGE_OPTS.contains(format)) { if (!addImageOpts) {
return new String[] { params.get(FILENAME_PARAM_KEY) }; if (format == null || !DISK_FORMATS_THAT_SUPPORT_OPTION_IMAGE_OPTS.contains(format)) {
return new String[] { params.get(FILENAME_PARAM_KEY) };
}
} }
Map<String, String> sorted = new TreeMap<>(params); Map<String, String> sorted = new TreeMap<>(params);
String paramString = Joiner.on(",").withKeyValueSeparator("=").join(sorted); String paramString = Joiner.on(",").withKeyValueSeparator("=").join(sorted);

View File

@ -0,0 +1,388 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.storage.MigrateVolumeCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.storage.Storage;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainBlockJobInfo;
import org.libvirt.DomainInfo;
import org.libvirt.LibvirtException;
import org.libvirt.TypedParameter;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.runners.MockitoJUnitRunner;
import org.xml.sax.SAXException;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtMigrateVolumeCommandWrapperTest {
@Spy
@InjectMocks
private LibvirtMigrateVolumeCommandWrapper libvirtMigrateVolumeCommandWrapper;
@Mock
MigrateVolumeCommand command;
@Mock
LibvirtComputingResource libvirtComputingResource;
@Mock
LibvirtUtilitiesHelper libvirtUtilitiesHelper;
private String domxml = "<domain type='kvm' id='1'>\n" +
" <name>i-2-27-VM</name>\n" +
" <uuid>2d37fe1a-621a-4903-9ab5-5c9544c733f8</uuid>\n" +
" <description>Ubuntu 18.04 LTS</description>\n" +
" <memory unit='KiB'>524288</memory>\n" +
" <currentMemory unit='KiB'>524288</currentMemory>\n" +
" <vcpu placement='static'>1</vcpu>\n" +
" <cputune>\n" +
" <shares>256</shares>\n" +
" </cputune>\n" +
" <resource>\n" +
" <partition>/machine</partition>\n" +
" </resource>\n" +
" <sysinfo type='smbios'>\n" +
" <system>\n" +
" <entry name='manufacturer'>Apache Software Foundation</entry>\n" +
" <entry name='product'>CloudStack KVM Hypervisor</entry>\n" +
" <entry name='uuid'>2d37fe1a-621a-4903-9ab5-5c9544c733f8</entry>\n" +
" </system>\n" +
" </sysinfo>\n" +
" <os>\n" +
" <type arch='x86_64' machine='pc-i440fx-rhel7.6.0'>hvm</type>\n" +
" <boot dev='cdrom'/>\n" +
" <boot dev='hd'/>\n" +
" <smbios mode='sysinfo'/>\n" +
" </os>\n" +
" <features>\n" +
" <acpi/>\n" +
" <apic/>\n" +
" <pae/>\n" +
" </features>\n" +
" <cpu mode='custom' match='exact' check='full'>\n" +
" <model fallback='forbid'>qemu64</model>\n" +
" <feature policy='require' name='x2apic'/>\n" +
" <feature policy='require' name='hypervisor'/>\n" +
" <feature policy='require' name='lahf_lm'/>\n" +
" <feature policy='disable' name='svm'/>\n" +
" </cpu>\n" +
" <clock offset='utc'>\n" +
" <timer name='kvmclock'/>\n" +
" </clock>\n" +
" <on_poweroff>destroy</on_poweroff>\n" +
" <on_reboot>restart</on_reboot>\n" +
" <on_crash>destroy</on_crash>\n" +
" <devices>\n" +
" <emulator>/usr/libexec/qemu-kvm</emulator>\n" +
" <disk type='block' device='disk'>\n" +
" <driver name='qemu' type='raw' cache='none'/>\n" +
" <source dev='/dev/disk/by-id/emc-vol-610204d03e3ad60f-bec108c400000018' index='4'/>\n" +
" <backingStore/>\n" +
" <target dev='vda' bus='virtio'/>\n" +
" <serial>38a54bf719f24af6b070</serial>\n" +
" <alias name='virtio-disk0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>\n" +
" </disk>\n" +
" <disk type='block' device='disk'>\n" +
" <driver name='qemu' type='raw' cache='none'/>\n" +
" <source dev='/dev/disk/by-id/emc-vol-7332760565f6340f-01b381820000001c' index='2'/>\n" +
" <backingStore/>\n" +
" <target dev='vdb' bus='virtio'/>\n" +
" <serial>0ceeb7c643b447aba5ce</serial>\n" +
" <alias name='virtio-disk1'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>\n" +
" </disk>\n" +
" <disk type='file' device='cdrom'>\n" +
" <driver name='qemu'/>\n" +
" <target dev='hdc' bus='ide'/>\n" +
" <readonly/>\n" +
" <alias name='ide0-1-0'/>\n" +
" <address type='drive' controller='0' bus='1' target='0' unit='0'/>\n" +
" </disk>\n" +
" <controller type='usb' index='0' model='piix3-uhci'>\n" +
" <alias name='usb'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n" +
" </controller>\n" +
" <controller type='pci' index='0' model='pci-root'>\n" +
" <alias name='pci.0'/>\n" +
" </controller>\n" +
" <controller type='ide' index='0'>\n" +
" <alias name='ide'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>\n" +
" </controller>\n" +
" <controller type='virtio-serial' index='0'>\n" +
" <alias name='virtio-serial0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n" +
" </controller>\n" +
" <interface type='bridge'>\n" +
" <mac address='02:00:23:fd:00:17'/>\n" +
" <source bridge='breth1-1640'/>\n" +
" <bandwidth>\n" +
" <inbound average='25600' peak='25600'/>\n" +
" <outbound average='25600' peak='25600'/>\n" +
" </bandwidth>\n" +
" <target dev='vnet0'/>\n" +
" <model type='virtio'/>\n" +
" <link state='up'/>\n" +
" <alias name='net0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n" +
" </interface>\n" +
" <serial type='pty'>\n" +
" <source path='/dev/pts/1'/>\n" +
" <target type='isa-serial' port='0'>\n" +
" <model name='isa-serial'/>\n" +
" </target>\n" +
" <alias name='serial0'/>\n" +
" </serial>\n" +
" <console type='pty' tty='/dev/pts/1'>\n" +
" <source path='/dev/pts/1'/>\n" +
" <target type='serial' port='0'/>\n" +
" <alias name='serial0'/>\n" +
" </console>\n" +
" <channel type='unix'>\n" +
" <source mode='bind' path='/var/lib/libvirt/qemu/i-2-27-VM.org.qemu.guest_agent.0'/>\n" +
" <target type='virtio' name='org.qemu.guest_agent.0' state='connected'/>\n" +
" <alias name='channel0'/>\n" +
" <address type='virtio-serial' controller='0' bus='0' port='1'/>\n" +
" </channel>\n" +
" <input type='tablet' bus='usb'>\n" +
" <alias name='input0'/>\n" +
" <address type='usb' bus='0' port='1'/>\n" +
" </input>\n" +
" <input type='mouse' bus='ps2'>\n" +
" <alias name='input1'/>\n" +
" </input>\n" +
" <input type='keyboard' bus='ps2'>\n" +
" <alias name='input2'/>\n" +
" </input>\n" +
" <graphics type='vnc' port='5900' autoport='yes' listen='10.0.32.170'>\n" +
" <listen type='address' address='10.0.32.170'/>\n" +
" </graphics>\n" +
" <audio id='1' type='none'/>\n" +
" <video>\n" +
" <model type='cirrus' vram='16384' heads='1' primary='yes'/>\n" +
" <alias name='video0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>\n" +
" </video>\n" +
" <watchdog model='i6300esb' action='none'>\n" +
" <alias name='watchdog0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>\n" +
" </watchdog>\n" +
" <memballoon model='virtio'>\n" +
" <alias name='balloon0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>\n" +
" </memballoon>\n" +
" </devices>\n" +
" <seclabel type='dynamic' model='dac' relabel='yes'>\n" +
" <label>+0:+0</label>\n" +
" <imagelabel>+0:+0</imagelabel>\n" +
" </seclabel>\n" +
"</domain>\n";
@Test
public void testPowerFlexMigrateVolumeMethod() {
VolumeObjectTO srcVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(srcVolumeObjectTO).when(command).getSrcData();
PrimaryDataStoreTO srcPrimaryDataStore = Mockito.mock(PrimaryDataStoreTO.class);
Mockito.doReturn(srcPrimaryDataStore).when(srcVolumeObjectTO).getDataStore();
Mockito.doReturn(Storage.StoragePoolType.PowerFlex).when(srcPrimaryDataStore).getPoolType();
MigrateVolumeAnswer powerFlexAnswer = Mockito.mock(MigrateVolumeAnswer.class);
MigrateVolumeAnswer regularVolumeAnswer = Mockito.mock(MigrateVolumeAnswer.class);
Mockito.doReturn(true).when(powerFlexAnswer).getResult();
Mockito.doReturn(powerFlexAnswer).when(libvirtMigrateVolumeCommandWrapper).migratePowerFlexVolume(command, libvirtComputingResource);
Answer answer = libvirtMigrateVolumeCommandWrapper.execute(command, libvirtComputingResource);
Assert.assertTrue(answer.getResult());
}
@Test
public void testRegularMigrateVolumeMethod() {
VolumeObjectTO srcVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(srcVolumeObjectTO).when(command).getSrcData();
PrimaryDataStoreTO srcPrimaryDataStore = Mockito.mock(PrimaryDataStoreTO.class);
Mockito.doReturn(srcPrimaryDataStore).when(srcVolumeObjectTO).getDataStore();
Mockito.doReturn(Storage.StoragePoolType.NetworkFilesystem).when(srcPrimaryDataStore).getPoolType();
MigrateVolumeAnswer powerFlexAnswer = Mockito.mock(MigrateVolumeAnswer.class);
MigrateVolumeAnswer regularVolumeAnswer = Mockito.mock(MigrateVolumeAnswer.class);
Mockito.doReturn(false).when(regularVolumeAnswer).getResult();
Mockito.doReturn(regularVolumeAnswer).when(libvirtMigrateVolumeCommandWrapper).migrateRegularVolume(command, libvirtComputingResource);
Answer answer = libvirtMigrateVolumeCommandWrapper.execute(command, libvirtComputingResource);
Assert.assertFalse(answer.getResult());
}
@Test
public void testMigratePowerFlexVolume() throws LibvirtException, ParserConfigurationException, IOException, TransformerException, SAXException {
VolumeObjectTO srcVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(srcVolumeObjectTO).when(command).getSrcData();
String srcPath = "bec108c400000018:vol-60-7acb-9e22";
Mockito.doReturn(srcPath).when(srcVolumeObjectTO).getPath();
String vmName = "i-2-27-VM";
Mockito.doReturn(vmName).when(srcVolumeObjectTO).getVmName();
VolumeObjectTO destVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(destVolumeObjectTO).when(command).getDestData();
String destPath = "01b381820000001c:vol-60-ec76-b7dc";
Mockito.doReturn(destPath).when(destVolumeObjectTO).getPath();
Map<String, String> destDetails = new HashMap<>();
destDetails.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, "610204d03e3ad60f");
Mockito.doReturn(libvirtUtilitiesHelper).when(libvirtComputingResource).getLibvirtUtilitiesHelper();
Connect conn = Mockito.mock(Connect.class);
Domain dm = Mockito.mock(Domain.class);
Mockito.doReturn(conn).when(libvirtUtilitiesHelper).getConnection();
Mockito.doReturn(dm).when(libvirtComputingResource).getDomain(conn, vmName);
DomainInfo domainInfo = Mockito.mock(DomainInfo.class);
domainInfo.state = DomainInfo.DomainState.VIR_DOMAIN_RUNNING;
Mockito.doReturn(domainInfo).when(dm).getInfo();
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
Mockito.doReturn(storagePoolMgr).when(libvirtComputingResource).getStoragePoolMgr();
PrimaryDataStoreTO spool = Mockito.mock(PrimaryDataStoreTO.class);
Mockito.doReturn(spool).when(destVolumeObjectTO).getDataStore();
KVMStoragePool pool = Mockito.mock(KVMStoragePool.class);
Mockito.doReturn(pool).when(storagePoolMgr).getStoragePool(Mockito.any(), Mockito.any());
Mockito.doReturn(true).when(pool).connectPhysicalDisk(Mockito.any(), Mockito.any());
Mockito.doReturn(null).when(destVolumeObjectTO).getPassphrase();
Mockito.doReturn(domxml).when(dm).getXMLDesc(0);
Mockito.doNothing().when(dm).blockCopy(Matchers.anyString(), Matchers.anyString(), Matchers.any(TypedParameter[].class), Matchers.anyInt());
MigrateVolumeAnswer answer = new MigrateVolumeAnswer(command, true, null, destPath);
Mockito.doReturn(answer).when(libvirtMigrateVolumeCommandWrapper).checkBlockJobStatus(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
Answer migrateVolumeAnswer = libvirtMigrateVolumeCommandWrapper.migratePowerFlexVolume(command, libvirtComputingResource);
Assert.assertTrue(migrateVolumeAnswer.getResult());
}
@Test
public void testMigratePowerFlexVolumeFailure() throws LibvirtException, ParserConfigurationException, IOException, TransformerException, SAXException {
VolumeObjectTO srcVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(srcVolumeObjectTO).when(command).getSrcData();
String srcPath = "bec108c400000018:vol-60-7acb-9e22";
Mockito.doReturn(srcPath).when(srcVolumeObjectTO).getPath();
String vmName = "i-2-27-VM";
Mockito.doReturn(vmName).when(srcVolumeObjectTO).getVmName();
VolumeObjectTO destVolumeObjectTO = Mockito.mock(VolumeObjectTO.class);
Mockito.doReturn(destVolumeObjectTO).when(command).getDestData();
String destPath = "01b381820000001c:vol-60-ec76-b7dc";
Mockito.doReturn(destPath).when(destVolumeObjectTO).getPath();
Map<String, String> destDetails = new HashMap<>();
destDetails.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, "610204d03e3ad60f");
Mockito.doReturn(libvirtUtilitiesHelper).when(libvirtComputingResource).getLibvirtUtilitiesHelper();
Connect conn = Mockito.mock(Connect.class);
Domain dm = Mockito.mock(Domain.class);
Mockito.doReturn(conn).when(libvirtUtilitiesHelper).getConnection();
Mockito.doReturn(dm).when(libvirtComputingResource).getDomain(conn, vmName);
DomainInfo domainInfo = Mockito.mock(DomainInfo.class);
domainInfo.state = DomainInfo.DomainState.VIR_DOMAIN_RUNNING;
Mockito.doReturn(domainInfo).when(dm).getInfo();
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
Mockito.doReturn(storagePoolMgr).when(libvirtComputingResource).getStoragePoolMgr();
PrimaryDataStoreTO spool = Mockito.mock(PrimaryDataStoreTO.class);
Mockito.doReturn(spool).when(destVolumeObjectTO).getDataStore();
KVMStoragePool pool = Mockito.mock(KVMStoragePool.class);
Mockito.doReturn(pool).when(storagePoolMgr).getStoragePool(Mockito.any(), Mockito.any());
Mockito.doReturn(true).when(pool).connectPhysicalDisk(Mockito.any(), Mockito.any());
Mockito.doReturn(null).when(destVolumeObjectTO).getPassphrase();
Mockito.doReturn(domxml).when(dm).getXMLDesc(0);
Mockito.doThrow(LibvirtException.class).when(dm).blockCopy(Matchers.anyString(), Matchers.anyString(), Matchers.any(TypedParameter[].class), Matchers.anyInt());
Answer migrateVolumeAnswer = libvirtMigrateVolumeCommandWrapper.migratePowerFlexVolume(command, libvirtComputingResource);
Assert.assertFalse(migrateVolumeAnswer.getResult());
}
@Test
public void testCheckBlockJobStatus() throws LibvirtException {
Connect conn = Mockito.mock(Connect.class);
Domain dm = Mockito.mock(Domain.class);
String destDiskLabel = "vda";
String srcPath = "bec108c400000018:vol-60-7acb-9e22";
String destPath = "01b381820000001c:vol-60-ec76-b7dc";
Mockito.doReturn(60).when(command).getWait();
DomainBlockJobInfo blockJobInfo = Mockito.mock(DomainBlockJobInfo.class);
Mockito.doReturn(blockJobInfo).when(dm).getBlockJobInfo(destDiskLabel, 0);
blockJobInfo.cur = 100;
blockJobInfo.end = 100;
MigrateVolumeAnswer answer = libvirtMigrateVolumeCommandWrapper.checkBlockJobStatus(command, dm, destDiskLabel, srcPath, destPath, libvirtComputingResource, conn, null);
Assert.assertTrue(answer.getResult());
}
@Test
public void testCheckBlockJobStatusFailure() throws LibvirtException {
Connect conn = Mockito.mock(Connect.class);
Domain dm = Mockito.mock(Domain.class);
String destDiskLabel = "vda";
String srcPath = "bec108c400000018:vol-60-7acb-9e22";
String destPath = "01b381820000001c:vol-60-ec76-b7dc";
Mockito.doReturn(1).when(command).getWait();
DomainBlockJobInfo blockJobInfo = Mockito.mock(DomainBlockJobInfo.class);
Mockito.doReturn(blockJobInfo).when(dm).getBlockJobInfo(destDiskLabel, 0);
blockJobInfo.cur = 10;
blockJobInfo.end = 100;
MigrateVolumeAnswer answer = libvirtMigrateVolumeCommandWrapper.checkBlockJobStatus(command, dm, destDiskLabel, srcPath, destPath, libvirtComputingResource, conn, null);
Assert.assertFalse(answer.getResult());
}
}

View File

@ -27,9 +27,6 @@ import java.util.UUID;
import javax.inject.Inject; import javax.inject.Inject;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.Backup;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
@ -112,7 +109,9 @@ import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.GuestOSVO; import com.cloud.storage.GuestOSVO;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateVO;
@ -120,6 +119,7 @@ import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO; import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDao;
@ -1149,10 +1149,10 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
@Override @Override
public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) { public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) {
List<Command> commands = new ArrayList<Command>(); List<Command> commands = new ArrayList<>();
// OfflineVmwareMigration: specialised migration command // OfflineVmwareMigration: specialised migration command
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>(); List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<>();
Long poolClusterId = null; Long poolClusterId = null;
StoragePool targetLocalPoolForVM = null; StoragePool targetLocalPoolForVM = null;
for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) { for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) {
@ -1166,10 +1166,10 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
if (volume.getVolumeType().equals(Volume.Type.ROOT) && pool.isLocal()) { if (volume.getVolumeType().equals(Volume.Type.ROOT) && pool.isLocal()) {
targetLocalPoolForVM = pool; targetLocalPoolForVM = pool;
} }
volumeToFilerTo.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo)); volumeToFilerTo.add(new Pair<>(volumeTo, filerTo));
} }
final Long destClusterId = poolClusterId; final Long destClusterId = poolClusterId;
final Long srcClusterId = vmManager.findClusterAndHostIdForVm(vm.getId()).first(); final Long srcClusterId = vmManager.findClusterAndHostIdForVm(vm, true).first();
final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId); final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId);
String targetHostGuid = getTargetHostGuid(targetLocalPoolForVM, destClusterId, isInterClusterMigration); String targetHostGuid = getTargetHostGuid(targetLocalPoolForVM, destClusterId, isInterClusterMigration);

View File

@ -16,20 +16,13 @@
// under the License. // under the License.
package com.cloud.hypervisor.guru; package com.cloud.hypervisor.guru;
import com.cloud.agent.api.Command; import static org.junit.Assert.assertEquals;
import com.cloud.agent.api.MigrateVmToPoolCommand;
import com.cloud.dc.ClusterDetailsDao; import java.util.ArrayList;
import com.cloud.host.HostVO; import java.util.HashMap;
import com.cloud.host.dao.HostDao; import java.util.List;
import com.cloud.storage.Storage.ProvisioningType; import java.util.Map;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Assert; import org.junit.Assert;
@ -46,12 +39,20 @@ import org.powermock.modules.junit4.PowerMockRunner;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.support.AnnotationConfigContextLoader; import org.springframework.test.context.support.AnnotationConfigContextLoader;
import static org.junit.Assert.assertEquals; import com.cloud.agent.api.Command;
import com.cloud.agent.api.MigrateVmToPoolCommand;
import java.util.ArrayList; import com.cloud.dc.ClusterDetailsDao;
import java.util.HashMap; import com.cloud.host.HostVO;
import java.util.List; import com.cloud.host.dao.HostDao;
import java.util.Map; import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineManager;
@RunWith(PowerMockRunner.class) @RunWith(PowerMockRunner.class)
@PrepareForTest({VMwareGuru.class}) @PrepareForTest({VMwareGuru.class})
@ -105,7 +106,7 @@ public class VMwareGuruTest {
Mockito.when(localStorage.isLocal()).thenReturn(true); Mockito.when(localStorage.isLocal()).thenReturn(true);
Pair<Long, Long> clusterAndHost = new Pair<>(1L, 1L); Pair<Long, Long> clusterAndHost = new Pair<>(1L, 1L);
Mockito.when(vmManager.findClusterAndHostIdForVm(1L)).thenReturn(clusterAndHost); Mockito.when(vmManager.findClusterAndHostIdForVm(vm, true)).thenReturn(clusterAndHost);
List<StoragePoolHostVO> storagePoolHostVOS = new ArrayList<>(); List<StoragePoolHostVO> storagePoolHostVOS = new ArrayList<>();
storagePoolHostVOS.add(storagePoolHostVO); storagePoolHostVOS.add(storagePoolHostVO);

View File

@ -761,7 +761,7 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient {
} }
String srcPoolId = volume.getStoragePoolId(); String srcPoolId = volume.getStoragePoolId();
LOG.debug("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId + LOG.info("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId +
" in the same PowerFlex cluster"); " in the same PowerFlex cluster");
post("/instances/Volume::" + srcVolumeId + "/action/migrateVTree", post("/instances/Volume::" + srcVolumeId + "/action/migrateVTree",

View File

@ -22,6 +22,7 @@ import java.util.Map;
import javax.inject.Inject; import javax.inject.Inject;
import com.cloud.agent.api.storage.MigrateVolumeCommand;
import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.agent.api.storage.ResizeVolumeCommand;
import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.host.HostVO; import com.cloud.host.HostVO;
@ -41,6 +42,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.RemoteHostEndPoint;
@ -70,6 +72,7 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.alert.AlertManager; import com.cloud.alert.AlertManager;
import com.cloud.configuration.Config; import com.cloud.configuration.Config;
import com.cloud.host.Host; import com.cloud.host.Host;
@ -126,12 +129,14 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private HostDao hostDao; private HostDao hostDao;
@Inject @Inject
private VMInstanceDao vmInstanceDao; private VMInstanceDao vmInstanceDao;
@Inject
private VolumeService volumeService;
public ScaleIOPrimaryDataStoreDriver() { public ScaleIOPrimaryDataStoreDriver() {
} }
private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao);
} }
@ -253,12 +258,33 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} }
} }
public void revokeVolumeAccess(String volumePath, Host host, DataStore dataStore) {
if (host == null) {
LOGGER.warn("Declining to revoke access to PowerFlex volume when a host is not provided");
return;
}
try {
LOGGER.debug("Revoking access for PowerFlex volume: " + volumePath);
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
if (StringUtils.isBlank(sdcId)) {
throw new CloudRuntimeException("Unable to revoke access for volume: " + volumePath + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId);
} catch (Exception e) {
LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e);
}
}
private void revokeAccess(DataObject dataObject, EndPoint ep, DataStore dataStore) { private void revokeAccess(DataObject dataObject, EndPoint ep, DataStore dataStore) {
Host host = hostDao.findById(ep.getId()); Host host = hostDao.findById(ep.getId());
revokeAccess(dataObject, host, dataStore); revokeAccess(dataObject, host, dataStore);
} }
private String getConnectedSdc(long poolId, long hostId) { public String getConnectedSdc(long poolId, long hostId) {
try { try {
StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId); StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (poolHostVO == null) { if (poolHostVO == null) {
@ -443,7 +469,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} }
} }
private CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId) { public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId) {
return createVolume(volumeInfo, storagePoolId, false);
}
public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId, boolean migrationInvolved) {
LOGGER.debug("Creating PowerFlex volume"); LOGGER.debug("Creating PowerFlex volume");
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
@ -474,7 +504,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
volume.setFolder(scaleIOVolume.getVtreeId()); volume.setFolder(scaleIOVolume.getVtreeId());
volume.setSize(scaleIOVolume.getSizeInKb() * 1024); volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
volume.setPoolType(Storage.StoragePoolType.PowerFlex); volume.setPoolType(Storage.StoragePoolType.PowerFlex);
volume.setFormat(Storage.ImageFormat.RAW); if (volumeInfo.getVolumeType().equals(Volume.Type.ROOT)) {
volume.setFormat(volumeInfo.getFormat());
} else {
volume.setFormat(Storage.ImageFormat.RAW);
}
volume.setPoolId(storagePoolId); volume.setPoolId(storagePoolId);
VolumeObject createdObject = VolumeObject.getVolumeObject(volumeInfo.getDataStore(), volume); VolumeObject createdObject = VolumeObject.getVolumeObject(volumeInfo.getDataStore(), volume);
createdObject.update(); createdObject.update();
@ -488,7 +522,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
CreateObjectAnswer answer = new CreateObjectAnswer(createdObject.getTO()); CreateObjectAnswer answer = new CreateObjectAnswer(createdObject.getTO());
// if volume needs to be set up with encryption, do it now if it's not a root disk (which gets done during template copy) // if volume needs to be set up with encryption, do it now if it's not a root disk (which gets done during template copy)
if (anyVolumeRequiresEncryption(volumeInfo) && !volumeInfo.getVolumeType().equals(Volume.Type.ROOT)) { if (anyVolumeRequiresEncryption(volumeInfo) && (!volumeInfo.getVolumeType().equals(Volume.Type.ROOT) || migrationInvolved)) {
LOGGER.debug(String.format("Setting up encryption for volume %s", volumeInfo.getId())); LOGGER.debug(String.format("Setting up encryption for volume %s", volumeInfo.getId()));
VolumeObjectTO prepVolume = (VolumeObjectTO) createdObject.getTO(); VolumeObjectTO prepVolume = (VolumeObjectTO) createdObject.getTO();
prepVolume.setPath(volumePath); prepVolume.setPath(volumePath);
@ -682,7 +716,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (isSameScaleIOStorageInstance(srcStore, destStore)) { if (isSameScaleIOStorageInstance(srcStore, destStore)) {
answer = migrateVolume(srcData, destData); answer = migrateVolume(srcData, destData);
} else { } else {
answer = copyVolume(srcData, destData, destHost); String vmName = ((VolumeInfo) srcData).getAttachedVmName();
if (vmName == null || !vmInstanceDao.findVMByInstanceName(vmName).getState().equals(VirtualMachine.State.Running)) {
answer = copyOfflineVolume(srcData, destData, destHost);
} else {
answer = liveMigrateVolume(srcData, destData);
}
} }
} else { } else {
errMsg = "Unsupported copy operation from src object: (" + srcData.getType() + ", " + srcData.getDataStore() + "), dest object: (" errMsg = "Unsupported copy operation from src object: (" + srcData.getType() + ", " + srcData.getDataStore() + "), dest object: ("
@ -702,6 +741,9 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} }
result = new CopyCommandResult(null, answer); result = new CopyCommandResult(null, answer);
if (answer != null && !answer.getResult()) {
result.setResult(answer.getDetails());
}
callback.complete(result); callback.complete(result);
} }
@ -753,7 +795,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
return answer; return answer;
} }
private Answer copyVolume(DataObject srcData, DataObject destData, Host destHost) { protected Answer copyOfflineVolume(DataObject srcData, DataObject destData, Host destHost) {
// Copy PowerFlex/ScaleIO volume // Copy PowerFlex/ScaleIO volume
LOGGER.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "<not specified>")); LOGGER.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "<not specified>"));
String value = configDao.getValue(Config.CopyVolumeWait.key()); String value = configDao.getValue(Config.CopyVolumeWait.key());
@ -775,6 +817,227 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
return answer; return answer;
} }
public Answer liveMigrateVolume(DataObject srcData, DataObject destData) {
// Volume migration across different PowerFlex/ScaleIO clusters
final long srcVolumeId = srcData.getId();
DataStore srcStore = srcData.getDataStore();
Map<String, String> srcDetails = getVolumeDetails((VolumeInfo) srcData, srcStore);
DataStore destStore = destData.getDataStore();
final long destPoolId = destStore.getId();
Map<String, String> destDetails = getVolumeDetails((VolumeInfo) destData, destStore);
VolumeObjectTO destVolTO = (VolumeObjectTO) destData.getTO();
String destVolumePath = null;
Host host = findEndpointForVolumeOperation(srcData);
EndPoint ep = RemoteHostEndPoint.getHypervisorHostEndPoint(host);
Answer answer = null;
try {
CreateObjectAnswer createAnswer = createVolume((VolumeInfo) destData, destStore.getId(), true);
destVolumePath = createAnswer.getData().getPath();
destVolTO.setPath(destVolumePath);
grantAccess(destData, host, destData.getDataStore());
int waitInterval = NumbersUtil.parseInt(configDao.getValue(Config.MigrateWait.key()), Integer.parseInt(Config.MigrateWait.getDefaultValue()));
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcData.getTO(), destVolTO,
srcDetails, destDetails, waitInterval);
answer = ep.sendMessage(migrateVolumeCommand);
boolean migrateStatus = answer.getResult();
if (migrateStatus) {
updateVolumeAfterCopyVolume(srcData, destData);
updateSnapshotsAfterCopyVolume(srcData, destData);
deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host);
LOGGER.debug(String.format("Successfully migrated migrate PowerFlex volume %d to storage pool %d", srcVolumeId, destPoolId));
answer = new Answer(null, true, null);
} else {
String errorMsg = "Failed to migrate PowerFlex volume: " + srcVolumeId + " to storage pool " + destPoolId;
LOGGER.debug(errorMsg);
answer = new Answer(null, false, errorMsg);
}
} catch (Exception e) {
LOGGER.error("Failed to migrate PowerFlex volume: " + srcVolumeId + " due to: " + e.getMessage());
answer = new Answer(null, false, e.getMessage());
}
if (destVolumePath != null && !answer.getResult()) {
revertBlockCopyVolumeOperations(srcData, destData, host, destVolumePath);
}
return answer;
}
protected void updateVolumeAfterCopyVolume(DataObject srcData, DataObject destData) {
// destination volume is already created and volume path is set in database by this time at "CreateObjectAnswer createAnswer = createVolume((VolumeInfo) destData, destStore.getId());"
final long srcVolumeId = srcData.getId();
final long destVolumeId = destData.getId();
if (srcVolumeId != destVolumeId) {
VolumeVO srcVolume = volumeDao.findById(srcVolumeId);
srcVolume.set_iScsiName(null);
srcVolume.setPath(null);
srcVolume.setFolder(null);
volumeDao.update(srcVolumeId, srcVolume);
} else {
// Live migrate volume
VolumeVO volume = volumeDao.findById(srcVolumeId);
Long oldPoolId = volume.getPoolId();
volume.setLastPoolId(oldPoolId);
volumeDao.update(srcVolumeId, volume);
}
}
private Host findEndpointForVolumeOperation(DataObject srcData) {
long hostId = 0;
VMInstanceVO instance = vmInstanceDao.findVMByInstanceName(((VolumeInfo) srcData).getAttachedVmName());
if (instance.getState().equals(VirtualMachine.State.Running)) {
hostId = instance.getHostId();
}
if (hostId == 0) {
hostId = selector.select(srcData, true).getId();
}
HostVO host = hostDao.findById(hostId);
if (host == null) {
throw new CloudRuntimeException("Found no hosts to run migrate volume command on");
}
return host;
}
public void updateSnapshotsAfterCopyVolume(DataObject srcData, DataObject destData) throws Exception {
final long srcVolumeId = srcData.getId();
DataStore srcStore = srcData.getDataStore();
final long srcPoolId = srcStore.getId();
final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId);
DataStore destStore = destData.getDataStore();
final long destPoolId = destStore.getId();
final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId);
List<SnapshotVO> snapshots = snapshotDao.listByVolumeId(srcVolumeId);
if (CollectionUtils.isNotEmpty(snapshots)) {
for (SnapshotVO snapshot : snapshots) {
SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
if (snapshotStore == null) {
continue;
}
String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshotStore.getInstallPath());
String newSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshot.getId(),
destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
boolean renamed = client.renameVolume(snapshotVolumeId, newSnapshotName);
snapshotStore.setDataStoreId(destPoolId);
// Snapshot Id in the PowerFlex/ScaleIO pool remains the same after the migration
// Update PowerFlex snapshot name only after it is renamed, to maintain the consistency
if (renamed) {
snapshotStore.setInstallPath(ScaleIOUtil.updatedPathWithVolumeName(snapshotVolumeId, newSnapshotName));
}
snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore);
}
}
}
public void deleteSourceVolumeAfterSuccessfulBlockCopy(DataObject srcData, Host host) {
DataStore srcStore = srcData.getDataStore();
String srcVolumePath = srcData.getTO().getPath();
revokeVolumeAccess(srcVolumePath, host, srcData.getDataStore());
String errMsg;
try {
String scaleIOVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath);
final ScaleIOGatewayClient client = getScaleIOClient(srcStore.getId());
Boolean deleteResult = client.deleteVolume(scaleIOVolumeId);
if (!deleteResult) {
errMsg = "Failed to delete source PowerFlex volume with id: " + scaleIOVolumeId;
LOGGER.warn(errMsg);
}
} catch (Exception e) {
errMsg = "Unable to delete source PowerFlex volume: " + srcVolumePath + " due to " + e.getMessage();
LOGGER.warn(errMsg);;
}
}
public void revertBlockCopyVolumeOperations(DataObject srcData, DataObject destData, Host host, String destVolumePath) {
final String srcVolumePath = ((VolumeInfo) srcData).getPath();
final String srcVolumeFolder = ((VolumeInfo) srcData).getFolder();
DataStore destStore = destData.getDataStore();
revokeAccess(destData, host, destData.getDataStore());
String errMsg;
try {
String scaleIOVolumeId = ScaleIOUtil.getVolumePath(destVolumePath);
final ScaleIOGatewayClient client = getScaleIOClient(destStore.getId());
Boolean deleteResult = client.deleteVolume(scaleIOVolumeId);
if (!deleteResult) {
errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId;
LOGGER.warn(errMsg);
}
} catch (Exception e) {
errMsg = "Unable to delete destination PowerFlex volume: " + destVolumePath + " due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
final long srcVolumeId = srcData.getId();
if (srcVolumeId == destData.getId()) {
VolumeVO volume = volumeDao.findById(srcVolumeId);
volume.set_iScsiName(srcVolumePath);
volume.setPath(srcVolumePath);
volume.setFolder(srcVolumeFolder);
volume.setPoolId(((VolumeInfo) srcData).getPoolId());
volumeDao.update(srcVolumeId, volume);
}
}
private Map<String, String> getVolumeDetails(VolumeInfo volumeInfo, DataStore dataStore) {
long storagePoolId = dataStore.getId();
StoragePoolVO storagePoolVO = storagePoolDao.findById(storagePoolId);
if (!storagePoolVO.isManaged()) {
return null;
}
Map<String, String> volumeDetails = new HashMap<>();
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress());
volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName());
volumeDetails.put(DiskTO.PROTOCOL_TYPE, (volumeVO.getPoolType() != null) ? volumeVO.getPoolType().toString() : null);
volumeDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePoolVO.getId())));
volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize()));
volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID));
ChapInfo chapInfo = volumeService.getChapInfo(volumeInfo, dataStore);
if (chapInfo != null) {
volumeDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
volumeDetails.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
volumeDetails.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
volumeDetails.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
volumeDetails.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
return volumeDetails;
}
private String getVolumeProperty(long volumeId, String property) {
VolumeDetailVO volumeDetails = volumeDetailsDao.findDetail(volumeId, property);
if (volumeDetails != null) {
return volumeDetails.getValue();
}
return null;
}
private Answer migrateVolume(DataObject srcData, DataObject destData) { private Answer migrateVolume(DataObject srcData, DataObject destData) {
// Volume migration within same PowerFlex/ScaleIO cluster (with same System ID) // Volume migration within same PowerFlex/ScaleIO cluster (with same System ID)
DataStore srcStore = srcData.getDataStore(); DataStore srcStore = srcData.getDataStore();
@ -861,7 +1124,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
return answer; return answer;
} }
private boolean isSameScaleIOStorageInstance(DataStore srcStore, DataStore destStore) { public boolean isSameScaleIOStorageInstance(DataStore srcStore, DataStore destStore) {
long srcPoolId = srcStore.getId(); long srcPoolId = srcStore.getId();
String srcPoolSystemId = null; String srcPoolSystemId = null;
StoragePoolDetailVO srcPoolSystemIdDetail = storagePoolDetailsDao.findDetail(srcPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); StoragePoolDetailVO srcPoolSystemIdDetail = storagePoolDetailsDao.findDetail(srcPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
@ -1148,7 +1411,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
/** /**
* Does any object require encryption support? * Does any object require encryption support?
*/ */
private boolean anyVolumeRequiresEncryption(DataObject ... objects) { protected boolean anyVolumeRequiresEncryption(DataObject ... objects) {
for (DataObject o : objects) { for (DataObject o : objects) {
if (o instanceof VolumeInfo && ((VolumeInfo) o).getPassphraseId() != null) { if (o instanceof VolumeInfo && ((VolumeInfo) o).getPassphraseId() != null) {
return true; return true;

View File

@ -0,0 +1,527 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.storage.datastore.driver;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.configuration.Config;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import java.util.Optional;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.when;
@RunWith(PowerMockRunner.class)
@PrepareForTest(RemoteHostEndPoint.class)
public class ScaleIOPrimaryDataStoreDriverTest {
@Spy
@InjectMocks
ScaleIOPrimaryDataStoreDriver scaleIOPrimaryDataStoreDriver = new ScaleIOPrimaryDataStoreDriver();
@Mock
StoragePoolDetailsDao storagePoolDetailsDao;
@Mock
PrimaryDataStoreDao storagePoolDao;
@Mock
VolumeDao volumeDao;
@Mock
VolumeDetailsDao volumeDetailsDao;
@Mock
VolumeService volumeService;
@Mock
VMInstanceDao vmInstanceDao;
@Mock
HostDao hostDao;
@Mock
ConfigurationDao configDao;
@Before
public void initMocks() {
MockitoAnnotations.initMocks(this);
}
@Test
public void testSameScaleIOStorageInstance() {
DataStore srcStore = Mockito.mock(DataStore.class);
DataStore destStore = Mockito.mock(DataStore.class);
when(srcStore.getId()).thenReturn(1L);
when(destStore.getId()).thenReturn(2L);
StoragePoolDetailVO srcPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
String srcPoolSystemId = "610204d03e3ad60f";
when(srcPoolSystemIdDetail.getValue()).thenReturn(srcPoolSystemId);
StoragePoolDetailVO destPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
String destPoolSystemId = "610204d03e3ad60f";
when(destPoolSystemIdDetail.getValue()).thenReturn(destPoolSystemId);
when(storagePoolDetailsDao.findDetail(1L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(srcPoolSystemIdDetail);
when(storagePoolDetailsDao.findDetail(2L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(destPoolSystemIdDetail);
boolean result = scaleIOPrimaryDataStoreDriver.isSameScaleIOStorageInstance(srcStore, destStore);
Assert.assertTrue(result);
}
@Test
public void testDifferentScaleIOStorageInstance() {
DataStore srcStore = Mockito.mock(DataStore.class);
DataStore destStore = Mockito.mock(DataStore.class);
when(srcStore.getId()).thenReturn(1L);
when(destStore.getId()).thenReturn(2L);
StoragePoolDetailVO srcPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
String srcPoolSystemId = "610204d03e3ad60f";
when(srcPoolSystemIdDetail.getValue()).thenReturn(srcPoolSystemId);
StoragePoolDetailVO destPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
String destPoolSystemId = "7332760565f6340f";
when(destPoolSystemIdDetail.getValue()).thenReturn(destPoolSystemId);
when(storagePoolDetailsDao.findDetail(1L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(srcPoolSystemIdDetail);
when(storagePoolDetailsDao.findDetail(2L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(destPoolSystemIdDetail);
boolean result = scaleIOPrimaryDataStoreDriver.isSameScaleIOStorageInstance(srcStore, destStore);
Assert.assertFalse(result);
}
@Test (expected = CloudRuntimeException.class)
public void testCheckVolumeOnDifferentScaleIOStorageInstanceSystemIdShouldNotBeNull() {
DataStore srcStore = Mockito.mock(DataStore.class);
DataStore destStore = Mockito.mock(DataStore.class);
when(srcStore.getId()).thenReturn(1L);
when(destStore.getId()).thenReturn(2L);
StoragePoolDetailVO srcPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
String srcPoolSystemId = "610204d03e3ad60f";
when(srcPoolSystemIdDetail.getValue()).thenReturn(srcPoolSystemId);
StoragePoolDetailVO destPoolSystemIdDetail = Mockito.mock(StoragePoolDetailVO.class);
when(destPoolSystemIdDetail.getValue()).thenReturn(null);
when(storagePoolDetailsDao.findDetail(1L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(srcPoolSystemIdDetail);
when(storagePoolDetailsDao.findDetail(2L,ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(destPoolSystemIdDetail);
scaleIOPrimaryDataStoreDriver.isSameScaleIOStorageInstance(srcStore, destStore);
}
@Test
public void testMigrateVolumeWithinSameScaleIOClusterSuccess() throws Exception {
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
DataStore srcStore = Mockito.mock(DataStore.class);
DataStore destStore = Mockito.mock(DataStore.class);
when(srcData.getDataStore()).thenReturn(srcStore);
when(destData.getDataStore()).thenReturn(destStore);
fillSrcVolumeDetails(srcData, srcStore);
fillDestVolumeDetails(destData, destStore);
VolumeObjectTO destVolTO = Mockito.mock(VolumeObjectTO.class);
when(destData.getTO()).thenReturn(destVolTO);
Host host = prepareEndpointForVolumeOperation(srcData);
PowerMockito.mockStatic(RemoteHostEndPoint.class);
RemoteHostEndPoint ep = Mockito.mock(RemoteHostEndPoint.class);
when(RemoteHostEndPoint.getHypervisorHostEndPoint(host)).thenReturn(ep);
DataTO dataTO = Mockito.mock(DataTO.class);
CreateObjectAnswer createAnswer = new CreateObjectAnswer(dataTO);
doReturn(createAnswer).when(scaleIOPrimaryDataStoreDriver).createVolume(destData, 2L, true);
when(dataTO.getPath()).thenReturn("bec0ba7700000007:vol-11-6aef-10ee");
doReturn(true).when(scaleIOPrimaryDataStoreDriver)
.grantAccess(any(), any(), any());
when(configDao.getValue(Config.MigrateWait.key())).thenReturn("3600");
MigrateVolumeAnswer migrateVolumeAnswer = Mockito.mock(MigrateVolumeAnswer.class);
when(ep.sendMessage(any())).thenReturn(migrateVolumeAnswer);
when(migrateVolumeAnswer.getResult()).thenReturn(true);
Mockito.doNothing().when(scaleIOPrimaryDataStoreDriver)
.updateVolumeAfterCopyVolume(any(), any());
Mockito.doNothing().when(scaleIOPrimaryDataStoreDriver)
.updateSnapshotsAfterCopyVolume(any(), any());
Mockito.doNothing().when(scaleIOPrimaryDataStoreDriver)
.deleteSourceVolumeAfterSuccessfulBlockCopy(any(), any());
Answer answer = scaleIOPrimaryDataStoreDriver.liveMigrateVolume(srcData, destData);
Assert.assertTrue(answer.getResult());
}
@Test
public void testMigrateVolumeWithinSameScaleIOClusterFailure() throws Exception {
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
DataStore srcStore = Mockito.mock(DataStore.class);
DataStore destStore = Mockito.mock(DataStore.class);
when(srcData.getDataStore()).thenReturn(srcStore);
when(destData.getDataStore()).thenReturn(destStore);
fillSrcVolumeDetails(srcData, srcStore);
fillDestVolumeDetails(destData, destStore);
VolumeObjectTO destVolTO = Mockito.mock(VolumeObjectTO.class);
when(destData.getTO()).thenReturn(destVolTO);
Host host = prepareEndpointForVolumeOperation(srcData);
PowerMockito.mockStatic(RemoteHostEndPoint.class);
RemoteHostEndPoint ep = Mockito.mock(RemoteHostEndPoint.class);
when(RemoteHostEndPoint.getHypervisorHostEndPoint(host)).thenReturn(ep);
DataTO dataTO = Mockito.mock(DataTO.class);
CreateObjectAnswer createAnswer = new CreateObjectAnswer(dataTO);
doReturn(createAnswer).when(scaleIOPrimaryDataStoreDriver).createVolume(destData, 2L, true);
when(dataTO.getPath()).thenReturn("bec0ba7700000007:vol-11-6aef-10ee");
doReturn(true).when(scaleIOPrimaryDataStoreDriver)
.grantAccess(any(), any(), any());
when(configDao.getValue(Config.MigrateWait.key())).thenReturn("3600");
MigrateVolumeAnswer migrateVolumeAnswer = Mockito.mock(MigrateVolumeAnswer.class);
when(ep.sendMessage(any())).thenReturn(migrateVolumeAnswer);
when(migrateVolumeAnswer.getResult()).thenReturn(false);
Mockito.doNothing().when(scaleIOPrimaryDataStoreDriver)
.revertBlockCopyVolumeOperations(any(), any(), any(), any());
Answer answer = scaleIOPrimaryDataStoreDriver.liveMigrateVolume(srcData, destData);
Assert.assertFalse(answer.getResult());
}
private void fillSrcVolumeDetails(VolumeInfo srcData, DataStore srcStore) {
when(srcStore.getId()).thenReturn(1L);
when(srcData.getId()).thenReturn(1L);
StoragePoolVO storagePoolVO = Mockito.mock(StoragePoolVO.class);
when(storagePoolDao.findById(1L)).thenReturn(storagePoolVO);
when(storagePoolVO.isManaged()).thenReturn(true);
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
when(volumeDao.findById(1L)).thenReturn(volumeVO);
when(volumeDetailsDao.findDetail(1L, DiskTO.SCSI_NAA_DEVICE_ID)).thenReturn(null);
when(volumeService.getChapInfo(srcData, srcStore)).thenReturn(null);
StoragePoolDetailVO srcStoragePoolDetail = Mockito.mock(StoragePoolDetailVO.class);
when(srcStoragePoolDetail.getValue()).thenReturn("610204d03e3ad60f");
when(storagePoolDetailsDao.findDetail(1L, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(srcStoragePoolDetail);
}
private void fillDestVolumeDetails(VolumeInfo srcData, DataStore srcStore) {
when(srcStore.getId()).thenReturn(2L);
when(srcData.getId()).thenReturn(2L);
StoragePoolVO storagePoolVO = Mockito.mock(StoragePoolVO.class);
when(storagePoolDao.findById(2L)).thenReturn(storagePoolVO);
when(storagePoolVO.isManaged()).thenReturn(true);
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
when(volumeDao.findById(2L)).thenReturn(volumeVO);
when(volumeDetailsDao.findDetail(2L, DiskTO.SCSI_NAA_DEVICE_ID)).thenReturn(null);
when(volumeService.getChapInfo(srcData, srcStore)).thenReturn(null);
StoragePoolDetailVO srcStoragePoolDetail = Mockito.mock(StoragePoolDetailVO.class);
when(srcStoragePoolDetail.getValue()).thenReturn("7332760565f6340f");
when(storagePoolDetailsDao.findDetail(2L, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)).thenReturn(srcStoragePoolDetail);
}
private Host prepareEndpointForVolumeOperation(VolumeInfo srcData) {
VMInstanceVO instance = Mockito.mock(VMInstanceVO.class);
when(srcData.getAttachedVmName()).thenReturn("i-2-VM");
when(vmInstanceDao.findVMByInstanceName("i-2-VM")).thenReturn(instance);
when(instance.getHostId()).thenReturn(4L);
when(instance.getState()).thenReturn(VirtualMachine.State.Running);
HostVO host = Mockito.mock(HostVO.class);
when(hostDao.findById(4L)).thenReturn(host);
return host;
}
@Test
public void updateVolumeAfterCopyVolumeLiveMigrate() {
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
when(srcData.getId()).thenReturn(1L);
when(destData.getId()).thenReturn(1L);
VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
volume.setPoolId(2L);
when(volumeDao.findById(1L)).thenReturn(volume);
when(volumeDao.update(1L, volume)).thenReturn(true);
scaleIOPrimaryDataStoreDriver.updateVolumeAfterCopyVolume(srcData, destData);
Assert.assertEquals(Optional.of(2L), Optional.of(volume.getLastPoolId()));
}
@Test
public void updateVolumeAfterCopyVolumeOffline() {
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
when(srcData.getId()).thenReturn(1L);
when(destData.getId()).thenReturn(2L);
VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
when(volumeDao.findById(1L)).thenReturn(volume);
when(volumeDao.update(1L, volume)).thenReturn(true);
scaleIOPrimaryDataStoreDriver.updateVolumeAfterCopyVolume(srcData, destData);
Assert.assertNull(volume.get_iScsiName());
Assert.assertNull(volume.getPath());
Assert.assertNull(volume.getFolder());
}
@Test
public void revertBlockCopyVolumeOperationsOnDeleteSuccess() throws Exception{
//Either destination volume delete success or failure, DB operations should get revert
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
Host host = Mockito.mock(Host.class);
String destVolumePath = "01b332b300000007:vol-11-b9e2-10ee";
when(srcData.getId()).thenReturn(1L);
when(srcData.getPoolId()).thenReturn(1L);
when(destData.getId()).thenReturn(1L);
when(srcData.getPath()).thenReturn("bec0ba7700000007:vol-11-6aef-10ee");
when(srcData.getFolder()).thenReturn("921c364500000007");
DataStore destStore = Mockito.mock(DataStore.class);
when(destStore.getId()).thenReturn(2L);
when(destData.getDataStore()).thenReturn(destStore);
doNothing().when(scaleIOPrimaryDataStoreDriver)
.revokeAccess(any(), any(), any());
ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class);
doReturn(client).when(scaleIOPrimaryDataStoreDriver)
.getScaleIOClient(any());
when(client.deleteVolume(any())).thenReturn(true);
VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
when(volumeDao.findById(1L)).thenReturn(volume);
when(volumeDao.update(1L, volume)).thenReturn(true);
scaleIOPrimaryDataStoreDriver.revertBlockCopyVolumeOperations(srcData, destData, host, destVolumePath);
Assert.assertEquals("bec0ba7700000007:vol-11-6aef-10ee", volume.get_iScsiName());
Assert.assertEquals("bec0ba7700000007:vol-11-6aef-10ee", volume.getPath());
Assert.assertEquals("921c364500000007", volume.getFolder());
}
@Test
public void revertBlockCopyVolumeOperationsOnDeleteFailure() throws Exception{
//Either destination volume delete success or failure, DB operations should get revert
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
VolumeInfo destData = Mockito.mock(VolumeInfo.class);
Host host = Mockito.mock(Host.class);
String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee";
String destVolumePath = "01b332b300000007:vol-11-b9e2-10ee";
when(srcData.getId()).thenReturn(1L);
when(srcData.getPoolId()).thenReturn(1L);
when(destData.getId()).thenReturn(1L);
when(srcData.getPath()).thenReturn(srcVolumePath);
when(srcData.getFolder()).thenReturn("921c364500000007");
DataStore destStore = Mockito.mock(DataStore.class);
when(destStore.getId()).thenReturn(2L);
when(destData.getDataStore()).thenReturn(destStore);
doNothing().when(scaleIOPrimaryDataStoreDriver).revokeAccess(any(), any(), any());
ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class);
doReturn(client).when(scaleIOPrimaryDataStoreDriver)
.getScaleIOClient(any());
when(client.deleteVolume(any())).thenReturn(false);
VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
when(volumeDao.findById(1L)).thenReturn(volume);
when(volumeDao.update(1L, volume)).thenReturn(true);
scaleIOPrimaryDataStoreDriver.revertBlockCopyVolumeOperations(srcData, destData, host, destVolumePath);
Assert.assertEquals(srcVolumePath, volume.get_iScsiName());
Assert.assertEquals(srcVolumePath, volume.getPath());
Assert.assertEquals("921c364500000007", volume.getFolder());
}
@Test
public void deleteSourceVolumeSuccessScenarioAfterSuccessfulBlockCopy() throws Exception {
// Either Volume deletion success or failure method should complete
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
Host host = Mockito.mock(Host.class);
String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee";
DataStore srcStore = Mockito.mock(DataStore.class);
DataTO volumeTO = Mockito.mock(DataTO.class);
when(srcData.getDataStore()).thenReturn(srcStore);
when(srcData.getTO()).thenReturn(volumeTO);
when(volumeTO.getPath()).thenReturn(srcVolumePath);
doNothing().when(scaleIOPrimaryDataStoreDriver).revokeVolumeAccess(any(), any(), any());
ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class);
doReturn(client).when(scaleIOPrimaryDataStoreDriver)
.getScaleIOClient(any());
when(client.deleteVolume(any())).thenReturn(true);
scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host);
}
@Test
public void deleteSourceVolumeFailureScenarioAfterSuccessfulBlockCopy() throws Exception {
// Either Volume deletion success or failure method should complete
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
Host host = Mockito.mock(Host.class);
when(host.getId()).thenReturn(1L);
String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee";
DataStore srcStore = Mockito.mock(DataStore.class);
when(srcStore.getId()).thenReturn(1L);
DataTO volumeTO = Mockito.mock(DataTO.class);
when(srcData.getDataStore()).thenReturn(srcStore);
when(srcData.getTO()).thenReturn(volumeTO);
when(volumeTO.getPath()).thenReturn(srcVolumePath);
String sdcId = "7332760565f6340f";
doReturn(sdcId).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L);
ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class);
doReturn(client).when(scaleIOPrimaryDataStoreDriver)
.getScaleIOClient(any());
doReturn(true).when(client).unmapVolumeFromSdc(any(), any());
when(client.deleteVolume(any())).thenReturn(false);
scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host);
}
@Test
public void deleteSourceVolumeFailureScenarioWhenNoSDCisFound() {
// Either Volume deletion success or failure method should complete
VolumeInfo srcData = Mockito.mock(VolumeInfo.class);
Host host = Mockito.mock(Host.class);
when(host.getId()).thenReturn(1L);
String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee";
DataStore srcStore = Mockito.mock(DataStore.class);
when(srcStore.getId()).thenReturn(1L);
DataTO volumeTO = Mockito.mock(DataTO.class);
when(srcData.getDataStore()).thenReturn(srcStore);
when(srcData.getTO()).thenReturn(volumeTO);
when(volumeTO.getPath()).thenReturn(srcVolumePath);
String sdcId = "7332760565f6340f";
doReturn(null).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L);
scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host);
}
@Test
public void testCopyOfflineVolume() {
when(configDao.getValue(Config.CopyVolumeWait.key())).thenReturn("3600");
DataObject srcData = Mockito.mock(DataObject.class);
DataTO srcDataTO = Mockito.mock(DataTO.class);
when(srcData.getTO()).thenReturn(srcDataTO);
DataObject destData = Mockito.mock(DataObject.class);
DataTO destDataTO = Mockito.mock(DataTO.class);
when(destData.getTO()).thenReturn(destDataTO);
Host destHost = Mockito.mock(Host.class);
doReturn(false).when(scaleIOPrimaryDataStoreDriver).anyVolumeRequiresEncryption(srcData, destData);
PowerMockito.mockStatic(RemoteHostEndPoint.class);
RemoteHostEndPoint ep = Mockito.mock(RemoteHostEndPoint.class);
when(RemoteHostEndPoint.getHypervisorHostEndPoint(destHost)).thenReturn(ep);
Answer answer = Mockito.mock(Answer.class);
when(ep.sendMessage(any())).thenReturn(answer);
Answer expectedAnswer = scaleIOPrimaryDataStoreDriver.copyOfflineVolume(srcData, destData, destHost);
Assert.assertEquals(expectedAnswer, answer);
}
@Test
public void testCopyOfflineVolumeFailureWhenNoEndpointFound() {
when(configDao.getValue(Config.CopyVolumeWait.key())).thenReturn("3600");
DataObject srcData = Mockito.mock(DataObject.class);
DataTO srcDataTO = Mockito.mock(DataTO.class);
when(srcData.getTO()).thenReturn(srcDataTO);
DataObject destData = Mockito.mock(DataObject.class);
DataTO destDataTO = Mockito.mock(DataTO.class);
when(destData.getTO()).thenReturn(destDataTO);
Host destHost = Mockito.mock(Host.class);
doReturn(false).when(scaleIOPrimaryDataStoreDriver).anyVolumeRequiresEncryption(srcData, destData);
PowerMockito.mockStatic(RemoteHostEndPoint.class);
when(RemoteHostEndPoint.getHypervisorHostEndPoint(destHost)).thenReturn(null);
Answer answer = scaleIOPrimaryDataStoreDriver.copyOfflineVolume(srcData, destData, destHost);
Assert.assertEquals(false, answer.getResult());
}
}

View File

@ -30,9 +30,9 @@ REV="X.Y"
CODENAME="" CODENAME=""
function get_from_redhat_release { function get_from_redhat_release {
DIST=`cat /etc/redhat-release | awk '{print $1}'` DIST=`cat /etc/redhat-release | awk -F 'release' '{print $1}'`
CODENAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//` CODENAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//`
REV=`cat /etc/redhat-release | awk '{print $3,$4}' | grep -o "[0-9.]*"` REV=`cat /etc/redhat-release | awk -F 'release' '{print $2}' | awk '{print $1}'`
} }
function get_from_lsb_release { function get_from_lsb_release {

View File

@ -471,8 +471,12 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
String hostOsInCluster = oneHost.getDetail("Host.OS"); String hostOsInCluster = oneHost.getDetail("Host.OS");
String hostOs = ssCmd.getHostDetails().get("Host.OS"); String hostOs = ssCmd.getHostDetails().get("Host.OS");
if (!hostOsInCluster.equalsIgnoreCase(hostOs)) { if (!hostOsInCluster.equalsIgnoreCase(hostOs)) {
throw new IllegalArgumentException("Can't add host: " + firstCmd.getPrivateIpAddress() + " with hostOS: " + hostOs + " into a cluster," + String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster);
"in which there are " + hostOsInCluster + " hosts added"); if (hostOs != null && hostOs.startsWith(hostOsInCluster)) {
s_logger.warn(String.format("Adding %s. This may or may not be ok!", msg));
} else {
throw new IllegalArgumentException(String.format("Can't add %s.", msg));
}
} }
} }

View File

@ -92,6 +92,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
@ -326,6 +327,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject @Inject
protected ProjectManager projectManager; protected ProjectManager projectManager;
@Inject
protected StoragePoolDetailsDao storagePoolDetailsDao;
protected Gson _gson; protected Gson _gson;
@ -1098,8 +1101,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (isNotPossibleToResize(volume, diskOffering)) { if (isNotPossibleToResize(volume, diskOffering)) {
throw new InvalidParameterValueException( throw new InvalidParameterValueException(
"Failed to resize Root volume. The service offering of this Volume has been configured with a root disk size; " "Failed to resize Root volume. The service offering of this Volume has been configured with a root disk size; "
+ "on such case a Root Volume can only be resized when changing to another Service Offering with a Root disk size. " + "on such case a Root Volume can only be resized when changing to another Service Offering with a Root disk size. "
+ "For more details please check out the Official Resizing Volumes documentation."); + "For more details please check out the Official Resizing Volumes documentation.");
} }
// convert from bytes to GiB // convert from bytes to GiB
@ -1246,7 +1249,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
*/ */
if (currentSize > newSize && !shrinkOk) { if (currentSize > newSize && !shrinkOk) {
throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume."
+ "Need to sign off by supplying the shrinkok parameter with value of true."); + "Need to sign off by supplying the shrinkok parameter with value of true.");
} }
if (newSize > currentSize) { if (newSize > currentSize) {
@ -2966,10 +2969,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
vm = _vmInstanceDao.findById(instanceId); vm = _vmInstanceDao.findById(instanceId);
} }
if (vol.getPassphraseId() != null) {
throw new InvalidParameterValueException("Migration of encrypted volumes is unsupported");
}
// Check that Vm to which this volume is attached does not have VM Snapshots // Check that Vm to which this volume is attached does not have VM Snapshots
// OfflineVmwareMigration: consider if this is needed and desirable // OfflineVmwareMigration: consider if this is needed and desirable
if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) { if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) {
@ -2983,11 +2982,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
} }
StoragePoolVO storagePoolVO = _storagePoolDao.findById(vol.getPoolId());
if (storagePoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) {
throw new InvalidParameterValueException("Migrate volume of a running VM is unsupported on storage pool type " + storagePoolVO.getPoolType());
}
// Check if the underlying hypervisor supports storage motion. // Check if the underlying hypervisor supports storage motion.
Long hostId = vm.getHostId(); Long hostId = vm.getHostId();
if (hostId != null) { if (hostId != null) {
@ -3002,7 +2996,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
liveMigrateVolume = capabilities.isStorageMotionSupported(); liveMigrateVolume = capabilities.isStorageMotionSupported();
} }
if (liveMigrateVolume && HypervisorType.KVM.equals(host.getHypervisorType())) { StoragePoolVO storagePoolVO = _storagePoolDao.findById(vol.getPoolId());
if (liveMigrateVolume && HypervisorType.KVM.equals(host.getHypervisorType()) && !storagePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex)) {
StoragePoolVO destinationStoragePoolVo = _storagePoolDao.findById(storagePoolId); StoragePoolVO destinationStoragePoolVo = _storagePoolDao.findById(storagePoolId);
if (isSourceOrDestNotOnStorPool(storagePoolVO, destinationStoragePoolVo)) { if (isSourceOrDestNotOnStorPool(storagePoolVO, destinationStoragePoolVo)) {

View File

@ -6270,16 +6270,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
+ " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS)); + " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS));
} }
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
if (vols.size() > 1) {
// OffLineVmwareMigration: data disks are not permitted, here!
if (vols.size() > 1 &&
// OffLineVmwareMigration: allow multiple disks for vmware
!HypervisorType.VMware.equals(hypervisorType)) {
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
}
}
// Check that Vm does not have VM Snapshots // Check that Vm does not have VM Snapshots
if (_vmSnapshotDao.findByVm(vmId).size() > 0) { if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");

View File

@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
@ -1065,11 +1066,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
Map<String, String> details = _vmDetailsDao.listDetailsKeyPairs(vm.getId()); Map<String, String> details = _vmDetailsDao.listDetailsKeyPairs(vm.getId());
vm.setDetails(details); vm.setDetails(details);
DataStore secStore = _dataStoreMgr.getImageStoreWithFreeCapacity(dest.getDataCenter().getId()); List<DataStore> secStores= _dataStoreMgr.listImageStoresWithFreeCapacity(dest.getDataCenter().getId());
if (secStore == null) { if (CollectionUtils.isEmpty(secStores)) {
s_logger.warn(String.format("Unable to finalize virtual machine profile [%s] as it has no secondary storage available to satisfy storage needs for zone [%s].", profile.toString(), dest.getDataCenter().getUuid())); s_logger.warn(String.format("Unable to finalize virtual machine profile [%s] as it has no secondary storage available to satisfy storage needs for zone [%s].", profile.toString(), dest.getDataCenter().getUuid()));
return false; return false;
} }
Collections.shuffle(secStores);
final Map<String, String> sshAccessDetails = _networkMgr.getSystemVMAccessDetails(profile.getVirtualMachine()); final Map<String, String> sshAccessDetails = _networkMgr.getSystemVMAccessDetails(profile.getVirtualMachine());
final Map<String, String> ipAddressDetails = new HashMap<>(sshAccessDetails); final Map<String, String> ipAddressDetails = new HashMap<>(sshAccessDetails);
@ -1163,7 +1165,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
if (dc.getDns2() != null) { if (dc.getDns2() != null) {
buf.append(" dns2=").append(dc.getDns2()); buf.append(" dns2=").append(dc.getDns2());
} }
String nfsVersion = imageStoreDetailsUtil != null ? imageStoreDetailsUtil.getNfsVersion(secStore.getId()) : null; String nfsVersion = imageStoreDetailsUtil != null ? imageStoreDetailsUtil.getNfsVersion(secStores.get(0).getId()) : null;
buf.append(" nfsVersion=").append(nfsVersion); buf.append(" nfsVersion=").append(nfsVersion);
buf.append(" keystore_password=").append(VirtualMachineGuru.getEncodedString(PasswordGenerator.generateRandomPassword(16))); buf.append(" keystore_password=").append(VirtualMachineGuru.getEncodedString(PasswordGenerator.generateRandomPassword(16)));
String bootArgs = buf.toString(); String bootArgs = buf.toString();
@ -1175,27 +1177,44 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
s_logger.debug(String.format("Setting UseHttpsToUpload config on cmdline with [%s] value.", useHttpsToUpload)); s_logger.debug(String.format("Setting UseHttpsToUpload config on cmdline with [%s] value.", useHttpsToUpload));
buf.append(" useHttpsToUpload=").append(useHttpsToUpload); buf.append(" useHttpsToUpload=").append(useHttpsToUpload);
addSecondaryStorageServerAddressToBuffer(buf, secStore, vmName); addSecondaryStorageServerAddressToBuffer(buf, secStores, vmName);
return true; return true;
} }
/** /**
* Adds the secondary storage address to the buffer if it is in the following pattern: <protocol>//<address>/... * Adds the secondary storages address to the buffer if it is in the following pattern: <protocol>//<address>/...
*/ */
protected void addSecondaryStorageServerAddressToBuffer(StringBuilder buffer, DataStore dataStore, String vmName) { protected void addSecondaryStorageServerAddressToBuffer(StringBuilder buffer, List<DataStore> dataStores, String vmName) {
String url = dataStore.getTO().getUrl(); List<String> addresses = new ArrayList<>();
String[] urlArray = url.split("/"); for (DataStore dataStore: dataStores) {
String url = dataStore.getTO().getUrl();
String[] urlArray = url.split("/");
s_logger.debug(String.format("Found [%s] as secondary storage's URL for SSVM [%s].", url, vmName)); if (s_logger.isDebugEnabled()) {
if (ArrayUtils.getLength(urlArray) < 3) { s_logger.debug(String.format("Found [%s] as secondary storage [%s] URL for SSVM [%s].", dataStore.getName(), url, vmName));
s_logger.debug(String.format("Could not retrieve secondary storage address from URL [%s] of SSVM [%s].", url, vmName)); }
if (ArrayUtils.getLength(urlArray) < 3) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Could not retrieve secondary storage [%s] address from URL [%s] of SSVM [%s].", dataStore.getName(), url, vmName));
}
continue;
}
String address = urlArray[2];
s_logger.info(String.format("Using [%s] as address of secondary storage [%s] of SSVM [%s].", address, dataStore.getName(), vmName));
if (!addresses.contains(address)) {
addresses.add(address);
}
}
if (addresses.isEmpty()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("No address found for the secondary storages: [%s] of SSVM: [%s]", StringUtils.join(dataStores.stream().map(DataStore::getName).collect(Collectors.toList()), ","), vmName));
}
return; return;
} }
buffer.append(" secondaryStorageServerAddress=").append(StringUtils.join(addresses, ","));
String address = urlArray[2];
s_logger.info(String.format("Using [%s] as address of secondary storage of SSVM [%s].", address, vmName));
buffer.append(" secondaryStorageServerAddress=").append(address);
} }
@Override @Override

View File

@ -0,0 +1,89 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.secondarystorage;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.commons.lang3.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.utils.net.NetUtils;
import com.google.common.net.InetAddresses;
@RunWith(MockitoJUnitRunner.class)
public class SecondaryStorageManagerImplTest {
private final SecureRandom secureRandom = new SecureRandom();
@Spy
@InjectMocks
private SecondaryStorageManagerImpl secondaryStorageManager;
private List<DataStore> mockDataStoresForTestAddSecondaryStorageServerAddressToBuffer(List<String> addresses) {
List<DataStore> dataStores = new ArrayList<>();
for (String address: addresses) {
DataStore dataStore = Mockito.mock(DataStore.class);
DataStoreTO dataStoreTO = Mockito.mock(DataStoreTO.class);
Mockito.when(dataStoreTO.getUrl()).thenReturn(NetUtils.isValidIp4(address) ? String.format("http://%s", address) : address);
Mockito.when(dataStore.getTO()).thenReturn(dataStoreTO);
dataStores.add(dataStore);
}
return dataStores;
}
private void runAddSecondaryStorageServerAddressToBufferTest(List<String> addresses, String expected) {
List<DataStore> dataStores = mockDataStoresForTestAddSecondaryStorageServerAddressToBuffer(addresses);
StringBuilder builder = new StringBuilder();
secondaryStorageManager.addSecondaryStorageServerAddressToBuffer(builder, dataStores, "VM");
String result = builder.toString();
result = result.contains("=") ? result.split("=")[1] : null;
Assert.assertEquals(expected, result);
}
@Test
public void testAddSecondaryStorageServerAddressToBufferDifferentAddress() {
String randomIp1 = InetAddresses.fromInteger(secureRandom.nextInt()).getHostAddress();
String randomIp2 = InetAddresses.fromInteger(secureRandom.nextInt()).getHostAddress();
List<String> addresses = List.of(randomIp1, randomIp2);
String expected = StringUtils.join(addresses, ",");
runAddSecondaryStorageServerAddressToBufferTest(addresses, expected);
}
@Test
public void testAddSecondaryStorageServerAddressToBufferSameAddress() {
String randomIp1 = InetAddresses.fromInteger(secureRandom.nextInt()).getHostAddress();
List<String> addresses = List.of(randomIp1, randomIp1);
runAddSecondaryStorageServerAddressToBufferTest(addresses, randomIp1);
}
@Test
public void testAddSecondaryStorageServerAddressToBufferInvalidAddress() {
String randomIp1 = InetAddresses.fromInteger(secureRandom.nextInt()).getHostAddress();
String randomIp2 = InetAddresses.fromInteger(secureRandom.nextInt()).getHostAddress();
List<String> addresses = List.of(randomIp1, "garbage", randomIp2);
runAddSecondaryStorageServerAddressToBufferTest(addresses, StringUtils.join(List.of(randomIp1, randomIp2), ","));
}
}

View File

@ -101,7 +101,7 @@ then
else else
echo "ERROR: Storage $storage is not currently mounted" echo "ERROR: Storage $storage is not currently mounted"
echo "Verifying if we can at least ping the storage" echo "Verifying if we can at least ping the storage"
STORAGE_ADDRESS=`grep "secondaryStorageServerAddress" $CMDLINE | sed -E 's/.*secondaryStorageServerAddress=([^ ]*).*/\1/g'` STORAGE_ADDRESSES=`grep "secondaryStorageServerAddress" $CMDLINE | sed -E 's/.*secondaryStorageServerAddress=([^ ]*).*/\1/g'`
if [[ -z "$STORAGE_ADDRESS" ]] if [[ -z "$STORAGE_ADDRESS" ]]
then then
@ -117,16 +117,21 @@ else
route -n route -n
fi fi
else else
echo "Storage address is $STORAGE_ADDRESS, trying to ping it" echo "Storage address(s): $STORAGE_ADDRESSES, trying to ping"
ping -c 2 $STORAGE_ADDRESS STORAGE_ADDRESS_LIST=$(echo $STORAGE_ADDRESSES | tr ",")
if [ $? -eq 0 ] for STORAGE_ADDRESS in $STORAGE_ADDRESS_LIST
then do
echo "Good: Can ping $storage storage address" echo "Pinging storage address: $STORAGE_ADDRESS"
else ping -c 2 $STORAGE_ADDRESS
echo "WARNING: Cannot ping $storage storage address" if [ $? -eq 0 ]
echo routing table follows then
route -n echo "Good: Can ping $storage storage address"
fi else
echo "WARNING: Cannot ping $storage storage address"
echo routing table follows
route -n
fi
done
fi fi
fi fi

View File

@ -1144,6 +1144,249 @@ class TestScaleIOVolumes(cloudstackTestCase):
test_virtual_machine.delete(self.apiClient, True) test_virtual_machine.delete(self.apiClient, True)
@attr(tags=['advanced', 'migration'], required_hardware=False)
def test_11_live_migrate_volume_to_same_instance_pool(self):
'''Migrate volume to the same instance pool'''
if not TestData.migrationTests:
self.skipTest("Volume migration tests not enabled, skipping test")
#######################################
# STEP 1: Create VM and Start VM #
#######################################
test_virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine3],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=False
)
TestScaleIOVolumes._start_vm(test_virtual_machine)
#######################################
# STEP 2: Create vol and attach to VM #
#######################################
new_volume = Volume.create(
self.apiClient,
self.testdata[TestData.volume_3],
account=self.account.name,
domainid=self.domain.id,
zoneid=self.zone.id,
diskofferingid=self.disk_offering_same_inst.id
)
volume_to_delete_later = new_volume
new_volume = test_virtual_machine.attach_volume(
self.apiClient,
new_volume
)
vm = self._get_vm(test_virtual_machine.id)
self.assertEqual(
new_volume.virtualmachineid,
vm.id,
"Check if attached to virtual machine"
)
self.assertEqual(
vm.state.lower(),
'running',
str(vm.state)
)
#######################################
# STEP 3: Migrate volume #
#######################################
pools = StoragePool.listForMigration(
self.apiClient,
id=new_volume.id
)
if not pools:
self.skipTest("No suitable storage pools found for volume migration, skipping test")
self.assertEqual(
validateList(pools)[0],
PASS,
"Invalid pool response from findStoragePoolsForMigration API"
)
pool = pools[0]
self.debug("Migrating Volume-ID: %s to Same Instance Pool: %s" % (new_volume.id, pool.id))
try:
Volume.migrate(
self.apiClient,
volumeid=new_volume.id,
storageid=pool.id
)
except Exception as e:
self.fail("Volume migration failed with error %s" % e)
#######################################
# STEP 4: Detach and delete volume #
#######################################
new_volume = test_virtual_machine.detach_volume(
self.apiClient,
new_volume
)
self.assertEqual(
new_volume.virtualmachineid,
None,
"Check if attached to virtual machine"
)
volume_to_delete_later.delete(self.apiClient)
list_volumes_response = list_volumes(
self.apiClient,
id=new_volume.id
)
self.assertEqual(
list_volumes_response,
None,
"Check volume was deleted"
)
#######################################
# STEP 4: Delete VM #
#######################################
test_virtual_machine.delete(self.apiClient, True)
@attr(tags=['advanced', 'migration'], required_hardware=False)
def test_12_migrate_volume_to_distinct_instance_pool(self):
'''Migrate volume to distinct instance pool'''
if not TestData.migrationTests:
self.skipTest("Volume migration tests not enabled, skipping test")
#######################################
# STEP 1: Create VM and Start VM #
#######################################
test_virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine4],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=False
)
TestScaleIOVolumes._start_vm(test_virtual_machine)
#######################################
# STEP 2: Create vol and attach to VM #
#######################################
new_volume = Volume.create(
self.apiClient,
self.testdata[TestData.volume_4],
account=self.account.name,
domainid=self.domain.id,
zoneid=self.zone.id,
diskofferingid=self.disk_offering_distinct_inst.id
)
volume_to_delete_later = new_volume
new_volume = test_virtual_machine.attach_volume(
self.apiClient,
new_volume
)
vm = self._get_vm(test_virtual_machine.id)
self.assertEqual(
new_volume.virtualmachineid,
vm.id,
"Check if attached to virtual machine"
)
self.assertEqual(
vm.state.lower(),
'running',
str(vm.state)
)
#######################################
# STEP 3: Migrate volume #
#######################################
pools = StoragePool.listForMigration(
self.apiClient,
id=new_volume.id
)
if not pools:
self.skipTest("No suitable storage pools found for volume migration, skipping test")
self.assertEqual(
validateList(pools)[0],
PASS,
"Invalid pool response from findStoragePoolsForMigration API"
)
pool = pools[0]
self.debug("Migrating Volume-ID: %s to Distinct Instance Pool: %s" % (new_volume.id, pool.id))
try:
Volume.migrate(
self.apiClient,
volumeid=new_volume.id,
storageid=pool.id
)
except Exception as e:
self.fail("Volume migration failed with error %s" % e)
#######################################
# STEP 4: Detach and delete volume #
#######################################
new_volume = test_virtual_machine.detach_volume(
self.apiClient,
new_volume
)
self.assertEqual(
new_volume.virtualmachineid,
None,
"Check if attached to virtual machine"
)
volume_to_delete_later.delete(self.apiClient)
list_volumes_response = list_volumes(
self.apiClient,
id=new_volume.id
)
self.assertEqual(
list_volumes_response,
None,
"Check volume was deleted"
)
#######################################
# STEP 4: Delete VM #
#######################################
test_virtual_machine.delete(self.apiClient, True)
def _create_vm_using_template_and_destroy_vm(self, template): def _create_vm_using_template_and_destroy_vm(self, template):
vm_name = "VM-%d" % random.randint(0, 100) vm_name = "VM-%d" % random.randint(0, 100)

View File

@ -88,6 +88,13 @@
</a-select-option> </a-select-option>
</a-select> </a-select>
</a-form-item> </a-form-item>
<a-form-item name="considerlasthost" ref="considerlasthost">
<template #label>
<tooltip-label :title="$t('label.considerlasthost')" :tooltip="apiParams.considerlasthost.description"/>
</template>
<a-switch v-model:checked="form.considerlasthost" />
</a-form-item>
</div> </div>
<a-form-item name="bootintosetup" ref="bootintosetup" v-if="resource.hypervisor === 'VMware'"> <a-form-item name="bootintosetup" ref="bootintosetup" v-if="resource.hypervisor === 'VMware'">
@ -97,13 +104,6 @@
<a-switch v-model:checked="form.bootintosetup" /> <a-switch v-model:checked="form.bootintosetup" />
</a-form-item> </a-form-item>
<a-form-item name="considerlasthost" ref="considerlasthost">
<template #label>
<tooltip-label :title="$t('label.considerlasthost')" :tooltip="apiParams.considerlasthost.description"/>
</template>
<a-switch v-model:checked="form.considerlasthost" />
</a-form-item>
<div :span="24" class="action-button"> <div :span="24" class="action-button">
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button> <a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
<a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ $t('label.ok') }}</a-button> <a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ $t('label.ok') }}</a-button>

View File

@ -416,7 +416,7 @@ public class UriUtils {
List<String> urls = metalinkUrls.get("url"); List<String> urls = metalinkUrls.get("url");
boolean validUrl = false; boolean validUrl = false;
for (String u : urls) { for (String u : urls) {
if (url.endsWith("torrent")) { if (u.endsWith("torrent")) {
continue; continue;
} }
try { try {