Volume encryption support for StorPool plug-in (#7539)

Supported Virtual machine operations:
- live migration of VM to another host
- virtual machine snapshots (group snapshot without memory)
- revert VM snapshot
- delete VM snapshot
Supported Volume operations:
- attach/detach volume
- live migrate volume between two StorPool primary storages
- volume snapshot
- delete snapshot
- revert snapshot
This commit is contained in:
slavkap 2023-06-26 12:24:51 +03:00 committed by GitHub
parent c809201247
commit faaf72b1a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1126 additions and 27 deletions

View File

@ -149,7 +149,7 @@ public class Storage {
ManagedNFS(true, false, false),
Linstor(true, true, false),
DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters
StorPool(true, true, false);
StorPool(true, true, true);
private final boolean shared;
private final boolean overprovisioning;

View File

@ -85,6 +85,8 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> findByClusterId(Long clusterId);
List<HostVO> findByClusterIdAndEncryptionSupport(Long clusterId);
/**
* Returns hosts that are 'Up' and 'Enabled' from the given Data Center/Zone
*/

View File

@ -1150,6 +1150,32 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return listBy(sc);
}
@Override
public List<HostVO> findByClusterIdAndEncryptionSupport(Long clusterId) {
SearchBuilder<DetailVO> hostCapabilitySearch = _detailsDao.createSearchBuilder();
DetailVO tagEntity = hostCapabilitySearch.entity();
hostCapabilitySearch.and("capability", tagEntity.getName(), SearchCriteria.Op.EQ);
hostCapabilitySearch.and("value", tagEntity.getValue(), SearchCriteria.Op.EQ);
SearchBuilder<HostVO> hostSearch = createSearchBuilder();
HostVO entity = hostSearch.entity();
hostSearch.and("cluster", entity.getClusterId(), SearchCriteria.Op.EQ);
hostSearch.and("status", entity.getStatus(), SearchCriteria.Op.EQ);
hostSearch.join("hostCapabilitySearch", hostCapabilitySearch, entity.getId(), tagEntity.getHostId(), JoinBuilder.JoinType.INNER);
SearchCriteria<HostVO> sc = hostSearch.create();
sc.setJoinParameters("hostCapabilitySearch", "value", Boolean.toString(true));
sc.setJoinParameters("hostCapabilitySearch", "capability", Host.HOST_VOLUME_ENCRYPTION);
if (clusterId != null) {
sc.setParameters("cluster", clusterId);
}
sc.setParameters("status", Status.Up.toString());
sc.setParameters("resourceState", ResourceState.Enabled.toString());
return listBy(sc);
}
@Override
public HostVO findByPublicIp(String publicIp) {
SearchCriteria<HostVO> sc = PublicIpAddressSearch.create();

View File

@ -342,3 +342,12 @@ Max IOPS are kept in StorPool's volumes with the help of custom service offering
corresponding system disk offering.
CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient.
## Supported operations for Volume encryption
Supported Virtual machine operations - live migration of VM to another host, virtual machine snapshots (group snapshot without memory), revert VM snapshot, delete VM snapshot
Supported Volume operations - attach/detach volume, live migrate volume between two StorPool primary storages, volume snapshot, delete snapshot, revert snapshot
Note: volume snapshot are allowed only when `sp.bypass.secondary.storage` is set to `true`. This means that the snapshots are not backed up to secondary storage

View File

@ -0,0 +1,47 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
public class StorPoolSetVolumeEncryptionAnswer extends Answer {
private VolumeObjectTO volume;
public StorPoolSetVolumeEncryptionAnswer(Command command, boolean success, String details) {
super(command, success, details);
}
public StorPoolSetVolumeEncryptionAnswer(VolumeObjectTO volume) {
super();
this.volume = volume;
this.result = true;
}
public VolumeObjectTO getVolume() {
return volume;
}
public void setVolume(VolumeObjectTO volume) {
this.volume = volume;
}
}

View File

@ -0,0 +1,70 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api.storage;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
public class StorPoolSetVolumeEncryptionCommand extends StorageSubSystemCommand {
private boolean isDataDisk;
private VolumeObjectTO volumeObjectTO;
private String srcVolumeName;
public StorPoolSetVolumeEncryptionCommand(VolumeObjectTO volumeObjectTO, String srcVolumeName,
boolean isDataDisk) {
this.volumeObjectTO = volumeObjectTO;
this.srcVolumeName = srcVolumeName;
this.isDataDisk = isDataDisk;
}
public VolumeObjectTO getVolumeObjectTO() {
return volumeObjectTO;
}
public void setVolumeObjectTO(VolumeObjectTO volumeObjectTO) {
this.volumeObjectTO = volumeObjectTO;
}
public void setIsDataDisk(boolean isDataDisk) {
this.isDataDisk = isDataDisk;
}
public boolean isDataDisk() {
return isDataDisk;
}
public String getSrcVolumeName() {
return srcVolumeName;
}
public void setSrcVolumeName(String srcVolumeName) {
this.srcVolumeName = srcVolumeName;
}
@Override
public void setExecuteInSequence(boolean inSeq) {
inSeq = false;
}
@Override
public boolean executeInSequence() {
return false;
}
}

View File

@ -0,0 +1,161 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.cryptsetup.CryptSetup;
import org.apache.cloudstack.utils.cryptsetup.CryptSetupException;
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.cloudstack.utils.qemu.QemuObject;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.libvirt.LibvirtException;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.StorPoolSetVolumeEncryptionAnswer;
import com.cloud.agent.api.storage.StorPoolSetVolumeEncryptionCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.exception.CloudRuntimeException;
@ResourceWrapper(handles = StorPoolSetVolumeEncryptionCommand.class)
public class StorPoolSetVolumeEncryptionCommandWrapper extends
CommandWrapper<StorPoolSetVolumeEncryptionCommand, StorPoolSetVolumeEncryptionAnswer, LibvirtComputingResource> {
private static final Logger logger = Logger.getLogger(StorPoolSetVolumeEncryptionCommandWrapper.class);
@Override
public StorPoolSetVolumeEncryptionAnswer execute(StorPoolSetVolumeEncryptionCommand command,
LibvirtComputingResource serverResource) {
VolumeObjectTO volume = command.getVolumeObjectTO();
String srcVolumeName = command.getSrcVolumeName();
try {
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", volume.getPath());
KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) volume.getDataStore();
KVMStoragePool pool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
KVMPhysicalDisk disk = pool.getPhysicalDisk(volume.getPath());
if (command.isDataDisk()) {
encryptDataDisk(volume, disk);
} else {
disk = encryptRootDisk(command, volume, srcVolumeName, pool, disk);
}
logger.debug(String.format("StorPoolSetVolumeEncryptionCommandWrapper disk=%s", disk));
} catch (Exception e) {
new Answer(command, e);
} finally {
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "volume", volume.getPath());
volume.clearPassphrase();
}
return new StorPoolSetVolumeEncryptionAnswer(volume);
}
private KVMPhysicalDisk encryptRootDisk(StorPoolSetVolumeEncryptionCommand command, VolumeObjectTO volume,
String srcVolumeName, KVMStoragePool pool, KVMPhysicalDisk disk) {
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", srcVolumeName);
KVMPhysicalDisk srcVolume = pool.getPhysicalDisk(srcVolumeName);
disk = copyPhysicalDisk(srcVolume, disk, command.getWait() * 1000, null, volume.getPassphrase());
disk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS);
disk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
volume.setEncryptFormat(disk.getQemuEncryptFormat().toString());
StorPoolStorageAdaptor.attachOrDetachVolume("detach", "snapshot", srcVolumeName);
return disk;
}
private void encryptDataDisk(VolumeObjectTO volume, KVMPhysicalDisk disk) throws CryptSetupException {
CryptSetup crypt = new CryptSetup();
crypt.luksFormat(volume.getPassphrase(), CryptSetup.LuksType.LUKS, disk.getPath());
disk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS);
disk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
volume.setEncryptFormat(disk.getQemuEncryptFormat().toString());
}
private KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, int timeout,
byte[] srcPassphrase, byte[] dstPassphrase) {
logger.debug("Copy physical disk with size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()
+ ", format: " + disk.getFormat());
destDisk.setVirtualSize(disk.getVirtualSize());
destDisk.setSize(disk.getSize());
QemuImg qemu = null;
QemuImgFile srcQemuFile = null;
QemuImgFile destQemuFile = null;
String srcKeyName = "sec0";
String destKeyName = "sec1";
List<QemuObject> qemuObjects = new ArrayList<>();
Map<String, String> options = new HashMap<>();
try (KeyFile srcKey = new KeyFile(srcPassphrase); KeyFile dstKey = new KeyFile(dstPassphrase)) {
qemu = new QemuImg(timeout, true, false);
String srcPath = disk.getPath();
String destPath = destDisk.getPath();
QemuImageOptions qemuImageOpts = new QemuImageOptions(srcPath);
srcQemuFile = new QemuImgFile(srcPath, disk.getFormat());
destQemuFile = new QemuImgFile(destPath);
if (srcKey.isSet()) {
qemuObjects.add(QemuObject.prepareSecretForQemuImg(disk.getFormat(), disk.getQemuEncryptFormat(),
srcKey.toString(), srcKeyName, options));
qemuImageOpts = new QemuImageOptions(disk.getFormat(), srcPath, srcKeyName);
}
if (dstKey.isSet()) {
qemu.setSkipZero(false);
destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
destQemuFile.setFormat(QemuImg.PhysicalDiskFormat.LUKS);
qemuObjects.add(QemuObject.prepareSecretForQemuImg(destDisk.getFormat(), QemuObject.EncryptFormat.LUKS,
dstKey.toString(), destKeyName, options));
destDisk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS);
}
qemu.convert(srcQemuFile, destQemuFile, options, qemuObjects, qemuImageOpts, null, true);
logger.debug("Successfully converted source disk image " + srcQemuFile.getFileName()
+ " to StorPool volume: " + destDisk.getPath());
} catch (QemuImgException | LibvirtException | IOException e) {
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(),
destDisk.getName(), ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
logger.error(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
return destDisk;
}
}

View File

@ -26,12 +26,15 @@ import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand;
import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand;
import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand;
import com.cloud.agent.api.storage.StorPoolSetVolumeEncryptionAnswer;
import com.cloud.agent.api.storage.StorPoolSetVolumeEncryptionCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.server.ResourceTag;
@ -93,9 +96,12 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import java.util.List;
import java.util.Map;
public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
@ -217,12 +223,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
@Override
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null;
String err = null;
Answer answer;
if (data.getType() == DataObjectType.VOLUME) {
try {
VolumeInfo vinfo = (VolumeInfo)data;
String name = vinfo.getUuid();
Long size = vinfo.getSize();
Long size = vinfo.getPassphraseId() == null ? vinfo.getSize() : vinfo.getSize() + 2097152;
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
@ -231,30 +237,66 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
path = StorPoolUtil.devPath(volumeName);
VolumeVO volume = volumeDao.findById(vinfo.getId());
volume.setPoolId(dataStore.getId());
volume.setPath(path);
volumeDao.update(volume.getId(), volume);
updateVolume(dataStore, path, vinfo);
if (vinfo.getPassphraseId() != null) {
VolumeObjectTO volume = updateVolumeObjectTO(vinfo, resp);
answer = createEncryptedVolume(dataStore, data, vinfo, size, volume, null, true);
} else {
answer = new Answer(null, true, null);
}
updateStoragePool(dataStore.getId(), size);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", volumeName, vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
} else {
err = String.format("Could not create StorPool volume %s. Error: %s", name, resp.getError());
answer = new Answer(null, false, String.format("Could not create StorPool volume %s. Error: %s", name, resp.getError()));
}
} catch (Exception e) {
err = String.format("Could not create volume due to %s", e.getMessage());
answer = new Answer(null, false, String.format("Could not create volume due to %s", e.getMessage()));
}
} else {
err = String.format("Invalid object type \"%s\" passed to createAsync", data.getType());
answer = new Answer(null, false, String.format("Invalid object type \"%s\" passed to createAsync", data.getType()));
}
CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err == null, err));
res.setResult(err);
CreateCmdResult res = new CreateCmdResult(path, answer);
res.setResult(answer.getDetails());
if (callback != null) {
callback.complete(res);
}
}
private void updateVolume(DataStore dataStore, String path, VolumeInfo vinfo) {
VolumeVO volume = volumeDao.findById(vinfo.getId());
volume.setPoolId(dataStore.getId());
volume.setPath(path);
volume.setPoolType(StoragePoolType.StorPool);
volumeDao.update(volume.getId(), volume);
}
private StorPoolSetVolumeEncryptionAnswer createEncryptedVolume(DataStore dataStore, DataObject data, VolumeInfo vinfo, Long size, VolumeObjectTO volume, String parentName, boolean isDataDisk) {
StorPoolSetVolumeEncryptionAnswer ans;
EndPoint ep = null;
if (parentName == null) {
ep = selector.select(data, vinfo.getPassphraseId() != null);
} else {
Long clusterId = StorPoolHelper.findClusterIdByGlobalId(parentName, clusterDao);
if (clusterId == null) {
ep = selector.select(data, vinfo.getPassphraseId() != null);
} else {
List<HostVO> hosts = hostDao.findByClusterIdAndEncryptionSupport(clusterId);
ep = CollectionUtils.isNotEmpty(hosts) ? RemoteHostEndPoint.getHypervisorHostEndPoint(hosts.get(0)) : ep;
}
}
if (ep == null) {
ans = new StorPoolSetVolumeEncryptionAnswer(null, false, "Could not find a host with volume encryption");
} else {
StorPoolSetVolumeEncryptionCommand cmd = new StorPoolSetVolumeEncryptionCommand(volume, parentName, isDataDisk);
ans = (StorPoolSetVolumeEncryptionAnswer) ep.sendMessage(cmd);
if (ans.getResult()) {
updateStoragePool(dataStore.getId(), size);
}
}
return ans;
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null;
@ -623,30 +665,42 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
// create volume from template on Storpool PRIMARY
TemplateInfo tinfo = (TemplateInfo)srcData;
VolumeInfo vinfo = (VolumeInfo)dstData;
VMTemplateStoragePoolVO templStoragePoolVO = StorPoolHelper.findByPoolTemplate(vinfo.getPoolId(), tinfo.getId());
final String parentName = templStoragePoolVO.getLocalDownloadPath() !=null ? StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getLocalDownloadPath(), true) : StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getInstallPath(), true);
VolumeInfo vinfo = (VolumeInfo) dstData;
VMTemplateStoragePoolVO templStoragePoolVO = StorPoolHelper.findByPoolTemplate(vinfo.getPoolId(),
tinfo.getId());
final String parentName = templStoragePoolVO.getLocalDownloadPath() != null
? StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getLocalDownloadPath(), true)
: StorPoolStorageAdaptor.getVolumeNameFromPath(templStoragePoolVO.getInstallPath(), true);
final String name = vinfo.getUuid();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(),
vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
Long snapshotSize = templStoragePoolVO.getTemplateSize();
long size = vinfo.getSize();
boolean withoutEncryption = vinfo.getPassphraseId() == null;
long size = withoutEncryption ? vinfo.getSize() : vinfo.getSize() + 2097152;
if (snapshotSize != null && size < snapshotSize) {
StorPoolUtil.spLog(String.format("provided size is too small for snapshot. Provided %d, snapshot %d. Using snapshot size", size, snapshotSize));
size = snapshotSize;
size = withoutEncryption ? snapshotSize : snapshotSize + 2097152;
}
StorPoolUtil.spLog(String.format("volume size is: %d", size));
Long vmId = vinfo.getInstanceId();
SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId),
getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn);
SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId),
"volume", vinfo.getMaxIops(), conn);
if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize());
updateVolumePoolType(vinfo);
VolumeObjectTO to = (VolumeObjectTO) vinfo.getTO();
to.setSize(vinfo.getSize());
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
answer = new CopyCmdAnswer(to);
if (withoutEncryption) {
VolumeObjectTO to = updateVolumeObjectTO(vinfo, resp);
answer = new CopyCmdAnswer(to);
} else {
VolumeObjectTO volume = updateVolumeObjectTO(vinfo, resp);
String snapshotPath = StorPoolUtil.devPath(parentName.split("~")[1]);
answer = createEncryptedVolume(dstData.getDataStore(), dstData, vinfo, size, volume, snapshotPath, false);
if (answer.getResult()) {
answer = new CopyCmdAnswer(((StorPoolSetVolumeEncryptionAnswer) answer).getVolume());
}
}
} else {
err = String.format("Could not create Storpool volume %s. Error: %s", name, resp.getError());
}
@ -775,6 +829,19 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
callback.complete(res);
}
private void updateVolumePoolType(VolumeInfo vinfo) {
VolumeVO volumeVO = volumeDao.findById(vinfo.getId());
volumeVO.setPoolType(StoragePoolType.StorPool);
volumeDao.update(volumeVO.getId(), volumeVO);
}
private VolumeObjectTO updateVolumeObjectTO(VolumeInfo vinfo, SpApiResponse resp) {
VolumeObjectTO to = (VolumeObjectTO) vinfo.getTO();
to.setSize(vinfo.getSize());
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
return to;
}
/**
* Live migrate/copy volume from one StorPool storage to another
* @param srcData The source volume data

View File

@ -292,6 +292,9 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
if (srcVolumeInfo.getPassphraseId() != null) {
throw new CloudRuntimeException(String.format("Cannot live migrate encrypted volume [%s] to StorPool", srcVolumeInfo.getName()));
}
DataStore destDataStore = entry.getValue();
VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId());

View File

@ -160,6 +160,7 @@ import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
@ -2983,6 +2984,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
boolean liveMigrateVolume = false;
boolean srcAndDestOnStorPool = false;
Long instanceId = vol.getInstanceId();
Long srcClusterId = null;
VMInstanceVO vm = null;
@ -3026,6 +3028,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
"Therefore, to live migrate a volume between storage pools, one must migrate the VM to a different host as well to force the VM XML domain update. " +
"Use 'migrateVirtualMachineWithVolumes' instead.");
}
srcAndDestOnStorPool = isSourceAndDestOnStorPool(storagePoolVO, destinationStoragePoolVo);
}
}
@ -3039,6 +3042,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
}
if (vol.getPassphraseId() != null && !srcAndDestOnStorPool) {
throw new InvalidParameterValueException("Migration of encrypted volumes is unsupported");
}
if (vm != null &&
HypervisorType.VMware.equals(vm.getHypervisorType()) &&
State.Stopped.equals(vm.getState())) {
@ -3177,6 +3184,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|| destinationStoragePoolVo.getPoolType() != Storage.StoragePoolType.StorPool;
}
private boolean isSourceAndDestOnStorPool(StoragePoolVO storagePoolVO, StoragePoolVO destinationStoragePoolVo) {
return storagePoolVO.getPoolType() == Storage.StoragePoolType.StorPool
&& destinationStoragePoolVo.getPoolType() == Storage.StoragePoolType.StorPool;
}
/**
* Retrieves the new disk offering UUID that might be sent to replace the current one in the volume being migrated.
* If no disk offering UUID is provided we return null. Otherwise, we perform the following checks.
@ -3476,7 +3488,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
}
if (volume.getEncryptFormat() != null && volume.getAttachedVM() != null && volume.getAttachedVM().getState() != State.Stopped) {
boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage"));
if (volume.getEncryptFormat() != null && volume.getAttachedVM() != null && volume.getAttachedVM().getState() != State.Stopped && !isSnapshotOnStorPoolOnly) {
s_logger.debug(String.format("Refusing to take snapshot of encrypted volume (%s) on running VM (%s)", volume, volume.getAttachedVM()));
throw new UnsupportedOperationException("Volume snapshots for encrypted volumes are not supported if VM is running");
}

View File

@ -0,0 +1,681 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual
from marvin.lib.base import (Account,
Cluster,
Configurations,
ServiceOffering,
Snapshot,
StoragePool,
Template,
VirtualMachine,
VmSnapshot,
Volume,
SecurityGroup,
Role,
DiskOffering,
)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_disk_offering,
list_hosts,
list_snapshots,
list_storage_pools,
list_volumes,
list_virtual_machines,
list_configurations,
list_service_offering,
list_clusters,
list_zones)
from marvin.cloudstackAPI import (listOsTypes,
listTemplates,
listHosts,
createTemplate,
createVolume,
getVolumeSnapshotDetails,
resizeVolume,
listZones,
migrateVirtualMachine,
findHostsForMigration,
revertSnapshot,
deleteSnapshot)
from marvin.sshClient import SshClient
import time
import pprint
import random
import subprocess
from storpool import spapi
from storpool import sptypes
import unittest
import uuid
from sp_util import (TestData, StorPoolHelper)
class TestEncryptedVolumes(cloudstackTestCase):
@classmethod
def setUpClass(cls):
super(TestEncryptedVolumes, cls).setUpClass()
try:
cls.setUpCloudStack()
except Exception:
cls.cleanUpCloudStack()
raise
@classmethod
def setUpCloudStack(cls):
testClient = super(TestEncryptedVolumes, cls).getClsTestClient()
cls._cleanup = []
config = cls.getClsConfig()
StorPoolHelper.logger = cls
cls.logger = StorPoolHelper.logger
cls.apiclient = testClient.getApiClient()
zone = config.zones[0]
assert zone is not None
cls.zone = list_zones(cls.apiclient, name=zone.name)[0]
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True)
cls.helper = StorPoolHelper()
cls.unsupportedHypervisor = False
cls.hypervisor = testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ("hyperv", "lxc"):
cls.unsupportedHypervisor = True
return
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
td = TestData()
cls.testdata = td.testdata
cls.sp_template_1 = "ssd"
storpool_primary_storage = {
"name": cls.sp_template_1,
"zoneid": cls.zone.id,
"url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_1),
"scope": "zone",
"capacitybytes": 564325555333,
"capacityiops": 155466,
"hypervisor": "kvm",
"provider": "StorPool",
"tags": cls.sp_template_1
}
cls.storpool_primary_storage = storpool_primary_storage
storage_pool = list_storage_pools(
cls.apiclient,
name=storpool_primary_storage["name"]
)
if storage_pool is None:
newTemplate = sptypes.VolumeTemplateCreateDesc(name=storpool_primary_storage["name"], placeAll="virtual",
placeTail="virtual", placeHead="virtual", replication=1)
template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
else:
storage_pool = storage_pool[0]
cls.primary_storage = storage_pool
storpool_service_offerings_ssd = {
"name": "ssd-encrypted",
"displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"hypervisorsnapshotreserve": 200,
"encryptroot": True,
"tags": cls.sp_template_1
}
service_offerings_ssd = list_service_offering(
cls.apiclient,
name=storpool_service_offerings_ssd["name"]
)
if service_offerings_ssd is None:
service_offerings_ssd = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd, encryptroot=True)
else:
service_offerings_ssd = service_offerings_ssd[0]
cls.service_offering = service_offerings_ssd
cls.debug(pprint.pformat(cls.service_offering))
cls.sp_template_2 = "ssd2"
storpool_primary_storage2 = {
"name": cls.sp_template_2,
"zoneid": cls.zone.id,
"url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_2),
"scope": "zone",
"capacitybytes": 564325555333,
"capacityiops": 1554,
"hypervisor": "kvm",
"provider": "StorPool",
"tags": cls.sp_template_2
}
cls.storpool_primary_storage2 = storpool_primary_storage2
storage_pool = list_storage_pools(
cls.apiclient,
name=storpool_primary_storage2["name"]
)
if storage_pool is None:
newTemplate = sptypes.VolumeTemplateCreateDesc(name=storpool_primary_storage2["name"], placeAll="virtual",
placeTail="virtual", placeHead="virtual", replication=1)
template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2)
else:
storage_pool = storage_pool[0]
cls.primary_storage2 = storage_pool
storpool_service_offerings_ssd2 = {
"name": "ssd2-encrypted",
"displaytext": "SP_CO_2",
"cpunumber": 1,
"cpuspeed": 500,
"memory": 512,
"storagetype": "shared",
"customizediops": False,
"encryptroot": True,
"tags": cls.sp_template_2
}
service_offerings_ssd2 = list_service_offering(
cls.apiclient,
name=storpool_service_offerings_ssd2["name"]
)
if service_offerings_ssd2 is None:
service_offerings_ssd2 = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd2, encryptroot=True)
else:
service_offerings_ssd2 = service_offerings_ssd2[0]
cls.service_offering2 = service_offerings_ssd2
cls.disk_offerings_ssd2_encrypted = list_disk_offering(
cls.apiclient,
name=cls.testdata[TestData.diskOfferingEncrypted2]["name"]
)
if cls.disk_offerings_ssd2_encrypted is None:
cls.disk_offerings_ssd2_encrypted = DiskOffering.create(cls.apiclient, cls.testdata[TestData.diskOfferingEncrypted2], encrypt=True)
else:
cls.disk_offerings_ssd2_encrypted = cls.disk_offerings_ssd2_encrypted[0]
cls.disk_offering_ssd_encrypted = list_disk_offering(
cls.apiclient,
name=cls.testdata[TestData.diskOfferingEncrypted]["name"]
)
if cls.disk_offering_ssd_encrypted is None:
cls.disk_offering_ssd_encrypted = DiskOffering.create(cls.apiclient, cls.testdata[TestData.diskOfferingEncrypted], encrypt=True)
else:
cls.disk_offering_ssd_encrypted = cls.disk_offering_ssd_encrypted[0]
template = get_template(
cls.apiclient,
cls.zone.id,
account="system"
)
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = template.ostypeid
cls.services["zoneid"] = cls.zone.id
role = Role.list(cls.apiclient, name='Root Admin')
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id,
roleid= 1
)
securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0]
cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid,
id=securitygroup.id)
cls._cleanup.append(cls.account)
cls.volume_1 = Volume.create(
cls.apiclient,
{"diskname": "StorPoolEncryptedDiskLiveMigrate"},
zoneid=cls.zone.id,
diskofferingid=cls.disk_offering_ssd_encrypted.id,
account=cls.account.name,
domainid=cls.account.domainid,
)
cls.volume_2 = Volume.create(
cls.apiclient,
{"diskname": "StorPoolEncryptedDiskVMSnapshot"},
zoneid=cls.zone.id,
diskofferingid=cls.disk_offering_ssd_encrypted.id,
account=cls.account.name,
domainid=cls.account.domainid,
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
{"name": "StorPool-LiveMigrate-VM%s" % uuid.uuid4()},
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
templateid=template.id,
serviceofferingid=cls.service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.virtual_machine2 = VirtualMachine.create(
cls.apiclient,
{"name": "StorPool-VMSnapshots%s" % uuid.uuid4()},
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
templateid=template.id,
serviceofferingid=cls.service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.virtual_machine3 = VirtualMachine.create(
cls.apiclient,
{"name": "StorPool-VolumeSnapshots%s" % uuid.uuid4()},
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
templateid=template.id,
serviceofferingid=cls.service_offering.id,
hypervisor=cls.hypervisor,
rootdisksize=10
)
cls.template = template
cls.hostid = cls.virtual_machine.hostid
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "/tmp"
cls.random_data = "random.data"
return
@classmethod
def tearDownClass(cls):
cls.cleanUpCloudStack()
@classmethod
def cleanUpCloudStack(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
return
# live migrate VM with encrypted volumes to another host
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_live_migrate_vm(self):
'''
Live Migrate VM to another host with encrypted volumes
'''
self.virtual_machine.attach_volume(
self.apiclient,
self.volume_1
)
volumes = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id,
)
vm_host = list_hosts(self.apiclient, id=self.virtual_machine.hostid)[0]
self.logger.debug(vm_host)
# sshc = SshClient(
# host=vm_host.name,
# port=22,
# user=None,
# passwd=None)
#
# for volume in volumes:
# cmd = 'blkid %s' % volume.path
# result = sshc.execute(cmd)
# if "LUKS" not in result:
# self.fail("The volume isn't encrypted %s" % volume)
dest_host_cmd = findHostsForMigration.findHostsForMigrationCmd()
dest_host_cmd.virtualmachineid = self.virtual_machine.id
host = self.apiclient.findHostsForMigration(dest_host_cmd)[0]
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.hostid = host.id
self.apiclient.migrateVirtualMachine(cmd)
# VM snapshot
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_vm_snapshot(self):
self.virtual_machine2.attach_volume(
self.apiclient,
self.volume_2
)
try:
ssh_client = self.virtual_machine2.get_ssh_client(reconnect=True)
cmds = [
"echo %s > %s/%s" %
(self.random_data_0, self.test_dir, self.random_data),
"sync",
"sleep 1",
"sync",
"sleep 1",
"cat %s/%s" %
(self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine2.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data has be write into temp file!"
)
time.sleep(30)
MemorySnapshot = False
vm_snapshot = VmSnapshot.create(
self.apiclient,
self.virtual_machine2.id,
MemorySnapshot,
"TestSnapshot",
"Display Text"
)
self.assertEqual(
vm_snapshot.state,
"Ready",
"Check the snapshot of vm is ready!"
)
# Revert VM snapshot
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_03_revert_vm_snapshots(self):
"""Test to revert VM snapshots
"""
try:
ssh_client = self.virtual_machine2.get_ssh_client(reconnect=True)
cmds = [
"rm -rf %s/%s" % (self.test_dir, self.random_data),
"ls %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine2.ipaddress)
if str(result[0]).index("No such file or directory") == -1:
self.fail("Check the random data has be delete from temp file!")
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine2.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
self.assertEqual(
list_snapshot_response[0].state,
"Ready",
"Check the snapshot of vm is ready!"
)
self.virtual_machine2.stop(self.apiclient, forced=True)
VmSnapshot.revertToSnapshot(
self.apiclient,
list_snapshot_response[0].id
)
self.virtual_machine2.start(self.apiclient)
try:
ssh_client = self.virtual_machine2.get_ssh_client(reconnect=True)
cmds = [
"cat %s/%s" % (self.test_dir, self.random_data)
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine2.ipaddress)
self.assertEqual(
self.random_data_0,
result[0],
"Check the random data is equal with the ramdom file!"
)
# Delete VM snapshot
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_delete_vm_snapshots(self):
"""Test to delete vm snapshots
"""
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine2.id,
listall=True)
self.assertEqual(
isinstance(list_snapshot_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshot_response,
None,
"Check if snapshot exists in ListSnapshot"
)
VmSnapshot.deleteVMSnapshot(
self.apiclient,
list_snapshot_response[0].id)
time.sleep(30)
list_snapshot_response = VmSnapshot.list(
self.apiclient,
#vmid=self.virtual_machine.id,
virtualmachineid=self.virtual_machine2.id,
listall=False)
self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
# Take volume snapshot
@unittest.expectedFailure
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_05_snapshot_volume_with_secondary(self):
'''
Test Create snapshot and backup to secondary
'''
backup_config = Configurations.update(self.apiclient,
name = "sp.bypass.secondary.storage",
value = "false")
volume = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine3.id,
type = "ROOT",
listall = True,
)
snapshot = Snapshot.create(
self.apiclient,
volume_id = volume[0].id,
account=self.account.name,
domainid=self.account.domainid,
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_06_snapshot_volume_on_primary(self):
'''
Test Create snapshot and backup to secondary
'''
backup_config = Configurations.update(self.apiclient,
name = "sp.bypass.secondary.storage",
value = "true")
volume = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine3.id,
type = "ROOT",
listall = True,
)
snapshot = Snapshot.create(
self.apiclient,
volume_id = volume[0].id,
account=self.account.name,
domainid=self.account.domainid,
)
try:
cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
cmd.snapshotid = snapshot.id
snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
flag = False
for s in snapshot_details:
if s["snapshotDetailsName"] == snapshot.id:
name = s["snapshotDetailsValue"].split("/")[3]
sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
flag = True
if flag == False:
raise Exception("Could not find snapshot in snapshot_details")
except spapi.ApiError as err:
raise Exception(err)
self.assertIsNotNone(snapshot, "Could not create snapshot")
# Rever Volume snapshot
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_07_revert_volume_on_primary(self):
volume = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine3.id,
type = "ROOT",
listall = True,
)[0]
snapshot = list_snapshots(
self.apiclient,
volumeid = volume.id,
listall=True
)[0]
self.virtual_machine3.stop(self.apiclient, forced=True)
cmd = revertSnapshot.revertSnapshotCmd()
cmd.id = snapshot.id
revertcmd = self.apiclient.revertSnapshot(cmd)
# Delete volume snapshot
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_08_delete_volume_on_primary(self):
volume = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine3.id,
type = "ROOT",
listall = True,
)[0]
snapshot = list_snapshots(
self.apiclient,
volumeid = volume.id,
listall=True
)[0]
cmd = deleteSnapshot.deleteSnapshotCmd()
cmd.id = snapshot.id
self.apiclient.deleteSnapshot(cmd)
# Live migrate encrypted volume
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_09_live_migrate_volume(self):
volume = list_volumes(
self.apiclient,
virtualmachineid = self.virtual_machine.id,
type = "ROOT",
listall = True,
)[0]
Volume.migrate(self.apiclient, volumeid=volume.id, storageid=self.primary_storage2.id, livemigrate=True)

View File

@ -75,6 +75,8 @@ class TestData():
diskName = "diskname"
diskOffering = "diskoffering"
diskOffering2 = "diskoffering2"
diskOfferingEncrypted = "diskOfferingEncrypted"
diskOfferingEncrypted2 = "diskOfferingEncrypted2"
cephDiskOffering = "cephDiskOffering"
nfsDiskOffering = "nfsDiskOffering"
domainId = "domainId"
@ -236,6 +238,24 @@ class TestData():
TestData.tags: sp_template_2,
"storagetype": "shared"
},
TestData.diskOfferingEncrypted: {
"name": "ssd-encrypted",
"displaytext": "ssd-encrypted",
"disksize": 5,
"hypervisorsnapshotreserve": 200,
"encrypt": True,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOfferingEncrypted2: {
"name": "ssd2-encrypted",
"displaytext": "ssd2-encrypted",
"disksize": 5,
"hypervisorsnapshotreserve": 200,
"encrypt": True,
TestData.tags: sp_template_2,
"storagetype": "shared"
},
TestData.cephDiskOffering: {
"name": "ceph",
"displaytext": "Ceph fixed disk offering",