Added support for storpool_qos service (#8755)

This commit is contained in:
slavkap 2024-08-29 10:23:25 +03:00 committed by GitHub
parent 2a1db67eeb
commit 12d9c26747
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 1026 additions and 65 deletions

View File

@ -24,6 +24,7 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CommandResult;
import com.cloud.host.Host; import com.cloud.host.Host;
import com.cloud.offering.DiskOffering;
import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume; import com.cloud.storage.Volume;
import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.StoragePoolType;
@ -199,4 +200,9 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) { default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
return volumeSize; return volumeSize;
} }
default boolean informStorageForDiskOfferingChange() {
return false;
}
default void updateStorageWithTheNewDiskOffering(Volume volume, DiskOffering newDiskOffering) {}
} }

View File

@ -345,6 +345,46 @@ corresponding system disk offering.
CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient. CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient.
================================================================================
StorPool provides the storpool_qos service ([QoS user guide](https://kb.storpool.com/storpool_misc/qos.html#storpool-qos-user-guide)) that tracks and configures the storage tier for all volumes based on a specifically provided `qc` tag specifying the storage tier for each volume.
To manage the QoS limits with a `qc` tag, you have to add a `qc` tag resource detail to each disk offering to which a tier should be applied, with a key `SP_QOSCLASS` and the value from the configuration file for the `storpool_qos` service:
add resourcedetail resourceid={diskofferingid} details[0].key=SP_QOSCLASS details[0].value={the name of the tier from the config} resourcetype=DiskOffering
To change the tier via CloudStack, you can use the CloudStack API call `changeOfferingForVolume`. The size is required, but the user could use the current volume size. Example:
change offeringforvolume id={The UUID of the Volume} diskofferingid={The UUID of the disk offering} size={The current or a new size for the volume}
Users who were using the offerings to change the StorPool template via the `SP_TEMPLATE` detail, will continue to have this functionality but should use `changeOfferingForVolume` API call instead of:
- `resizeVolume` API call for DATA disk
- `scaleVirtualMachine` API call for ROOT disk
If the disk offering has both `SP_TEMPLATE` and `SP_QOSCLASS` defined, the `SP_QOSCLASS` detail will be prioritised, setting the volumes QoS using the respective qc tag value. In case the QoS for a volume is changed manually, the storpool_qos service will automatically reset the QoS limits following the qc tag value once per minute.
<h4>Usage</h4>
Creating Disk Offering for each tier.
Go to Service Offerings > Disk Offering > Add disk offering.
Add disk offering detail with API call in CloudStack CLI.
add resourcedetail resourcetype=diskoffering resourceid=$UUID details[0].key=SP_QOSCLASS details[0].value=$Tier Name
Creating VM with QoS
Deploy virtual machine: Go to Compute> Instances> Add Instances.
- For the ROOT volume, choose the option `Override disk offering`. This will set the required `qc` tag from the disk offering (DO) detail.
Creating DATA disk with QoS
- Create volume via GUI/CLI and choose a disk offering which has the required `SP_QOSCLASS` detail
To update the tier of a ROOT/DATA volume go to Storage> Volumes and select the Volume and click on the Change disk offering for the volume in the upper right corner.
## Supported operations for Volume encryption ## Supported operations for Volume encryption
Supported Virtual machine operations - live migration of VM to another host, virtual machine snapshots (group snapshot without memory), revert VM snapshot, delete VM snapshot Supported Virtual machine operations - live migration of VM to another host, virtual machine snapshots (group snapshot without memory), revert VM snapshot, delete VM snapshot

View File

@ -0,0 +1,109 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.api;
import java.io.Serializable;
import java.util.Map;
public class StorPoolVolumeDef implements Serializable {
private static final long serialVersionUID = 1L;
private transient String name;
private Long size;
private Map<String, String> tags;
private String parent;
private Long iops;
private String template;
private String baseOn;
private String rename;
private Boolean shrinkOk;
public StorPoolVolumeDef() {
}
public StorPoolVolumeDef(String name, Long size, Map<String, String> tags, String parent, Long iops, String template,
String baseOn, String rename, Boolean shrinkOk) {
super();
this.name = name;
this.size = size;
this.tags = tags;
this.parent = parent;
this.iops = iops;
this.template = template;
this.baseOn = baseOn;
this.rename = rename;
this.shrinkOk = shrinkOk;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Long getSize() {
return size;
}
public void setSize(Long size) {
this.size = size;
}
public Map<String, String> getTags() {
return tags;
}
public void setTags(Map<String, String> tags) {
this.tags = tags;
}
public String getParent() {
return parent;
}
public void setParent(String parent) {
this.parent = parent;
}
public Long getIops() {
return iops;
}
public void setIops(Long iops) {
this.iops = iops;
}
public String getTemplate() {
return template;
}
public void setTemplate(String template) {
this.template = template;
}
public String getBaseOn() {
return baseOn;
}
public void setBaseOn(String baseOn) {
this.baseOn = baseOn;
}
public String getRename() {
return rename;
}
public void setRename(String rename) {
this.rename = rename;
}
public Boolean getShrinkOk() {
return shrinkOk;
}
public void setShrinkOk(Boolean shrinkOk) {
this.shrinkOk = shrinkOk;
}
}

View File

@ -39,12 +39,15 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef; import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef;
import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
@ -81,12 +84,18 @@ import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.Host; import com.cloud.host.Host;
import com.cloud.host.HostVO; import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor; import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
import com.cloud.offering.DiskOffering;
import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.service.ServiceOfferingDetailsVO;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.DataStoreRole; import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot;
@ -156,6 +165,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private StoragePoolHostDao storagePoolHostDao; private StoragePoolHostDao storagePoolHostDao;
@Inject @Inject
DataStoreManager dataStoreManager; DataStoreManager dataStoreManager;
@Inject
private DiskOfferingDetailsDao diskOfferingDetailsDao;
@Inject
private ServiceOfferingDetailsDao serviceOfferingDetailDao;
@Inject
private ServiceOfferingDao serviceOfferingDao;
private SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) { private SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) {
List<SnapshotDataStoreVO> snaps = snapshotDataStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); List<SnapshotDataStoreVO> snaps = snapshotDataStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image);
@ -259,15 +274,25 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) { public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null; String path = null;
Answer answer; Answer answer;
String tier = null;
String template = null;
if (data.getType() == DataObjectType.VOLUME) { if (data.getType() == DataObjectType.VOLUME) {
try { try {
VolumeInfo vinfo = (VolumeInfo)data; VolumeInfo vinfo = (VolumeInfo)data;
String name = vinfo.getUuid(); String name = vinfo.getUuid();
Long size = vinfo.getPassphraseId() == null ? vinfo.getSize() : vinfo.getSize() + 2097152; Long size = vinfo.getPassphraseId() == null ? vinfo.getSize() : vinfo.getSize() + 2097152;
Long vmId = vinfo.getInstanceId();
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao); SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName()); if (vinfo.getDiskOfferingId() != null) {
SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, "volume", vinfo.getMaxIops(), conn); tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId());
if (tier == null) {
template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId());
}
}
SpApiResponse resp = createStorPoolVolume(template, tier, vinfo, name, size, vmId, conn);
if (resp.getError() == null) { if (resp.getError() == null) {
String volumeName = StorPoolUtil.getNameFromResponse(resp, false); String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
path = StorPoolUtil.devPath(volumeName); path = StorPoolUtil.devPath(volumeName);
@ -298,6 +323,26 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} }
} }
private SpApiResponse createStorPoolVolume(String template, String tier, VolumeInfo vinfo, String name, Long size,
Long vmId, SpConnectionDesc conn) {
SpApiResponse resp = new SpApiResponse();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier);
if (tier != null || template != null) {
StorPoolUtil.spLog(
"Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details",
vinfo.getUuid(), template, tier);
resp = StorPoolUtil.volumeCreate(size, null, template, tags, conn);
} else {
StorPoolUtil.spLog(
"StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s",
vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(),
vinfo.getpayload(), conn.getTemplateName());
resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null,
"volume", vinfo.getMaxIops(), conn);
}
return resp;
}
private void updateVolume(DataStore dataStore, String path, VolumeInfo vinfo) { private void updateVolume(DataStore dataStore, String path, VolumeInfo vinfo) {
VolumeVO volume = volumeDao.findById(vinfo.getId()); VolumeVO volume = volumeDao.findById(vinfo.getId());
volume.setPoolId(dataStore.getId()); volume.setPoolId(dataStore.getId());
@ -336,37 +381,98 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) { public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
String path = null; String path = null;
String err = null; String err = null;
ResizeVolumeAnswer answer = null;
if (data.getType() == DataObjectType.VOLUME) { if (data.getType() == DataObjectType.VOLUME) {
VolumeObject vol = (VolumeObject)data; VolumeObject vol = (VolumeObject)data;
StoragePool pool = (StoragePool)data.getDataStore(); path = vol.getPath();
ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true); err = resizeVolume(data, path, vol);
} else {
err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
}
CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err != null, err));
res.setResult(err);
callback.complete(res);
}
private String resizeVolume(DataObject data, String path, VolumeObject vol) {
String err = null;
ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
boolean needResize = vol.getSize() != payload.newSize;
final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(path, true);
final long oldSize = vol.getSize(); final long oldSize = vol.getSize();
Long oldMaxIops = vol.getMaxIops(); Long oldMaxIops = vol.getMaxIops();
try { try {
SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops; err = updateStorPoolVolume(vol, payload, conn);
if (err == null && needResize) {
err = notifyQemuForTheNewSize(data, err, vol, payload);
}
StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s", name, vol.getUuid(), oldSize, payload.newSize, payload.shrinkOk, maxIops); if (err != null) {
// try restoring volume to its initial size
SpApiResponse response = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
if (response.getError() != null) {
logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, response.getError()));
}
}
} catch (Exception e) {
logger.debug("sending resize command failed", e);
err = e.toString();
}
return err;
}
private String notifyQemuForTheNewSize(DataObject data, String err, VolumeObject vol, ResizeVolumePayload payload)
throws StorageUnavailableException {
StoragePool pool = (StoragePool)data.getDataStore();
SpApiResponse resp = StorPoolUtil.volumeUpdate(name, payload.newSize, payload.shrinkOk, maxIops, conn);
if (resp.getError() != null) {
err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
} else {
StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk, StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk,
payload.instanceName, payload.hosts == null ? false : true); payload.instanceName, payload.hosts == null ? false : true);
answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd); ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd);
if (answer == null || !answer.getResult()) { if (answer == null || !answer.getResult()) {
err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason"; err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason";
} else { }
path = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)); return err;
}
private String updateStorPoolVolume(VolumeObject vol, ResizeVolumePayload payload, SpConnectionDesc conn) {
String err = null;
String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true);
Long newDiskOfferingId = payload.getNewDiskOfferingId();
String tier = null;
String template = null;
if (newDiskOfferingId != null) {
tier = getTierFromOfferingDetail(newDiskOfferingId);
if (tier == null) {
template = getTemplateFromOfferingDetail(newDiskOfferingId);
}
}
SpApiResponse resp = new SpApiResponse();
if (tier != null || template != null) {
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier);
StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, tags, null, null, template, null, null,
payload.shrinkOk);
resp = StorPoolUtil.volumeUpdate(spVolume, conn);
} else {
long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops;
StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, null, null, maxIops, null, null, null,
payload.shrinkOk);
StorPoolUtil.spLog(
"StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s",
name, vol.getUuid(), vol.getSize(), payload.newSize, payload.shrinkOk, maxIops);
resp = StorPoolUtil.volumeUpdate(spVolume, conn);
}
if (resp.getError() != null) {
err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
} else {
vol.setSize(payload.newSize); vol.setSize(payload.newSize);
vol.update(); vol.update();
if (payload.newMaxIops != null) { if (payload.newMaxIops != null) {
@ -375,27 +481,9 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
volumeDao.update(volume.getId(), volume); volumeDao.update(volume.getId(), volume);
} }
updateStoragePool(vol.getPoolId(), payload.newSize - oldSize); updateStoragePool(vol.getPoolId(), payload.newSize - vol.getSize());
} }
} return err;
if (err != null) {
// try restoring volume to its initial size
resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
if (resp.getError() != null) {
logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError()));
}
}
} catch (Exception e) {
logger.debug("sending resize command failed", e);
err = e.toString();
}
} else {
err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
}
CreateCmdResult res = new CreateCmdResult(path, answer);
res.setResult(err);
callback.complete(res);
} }
@Override @Override
@ -772,8 +860,30 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} }
StorPoolUtil.spLog(String.format("volume size is: %d", size)); StorPoolUtil.spLog(String.format("volume size is: %d", size));
Long vmId = vinfo.getInstanceId(); Long vmId = vinfo.getInstanceId();
SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId),
"volume", vinfo.getMaxIops(), conn); String template = null;
String tier = null;
SpApiResponse resp = new SpApiResponse();
if (vinfo.getDiskOfferingId() != null) {
tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId());
if (tier == null) {
template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId());
}
}
if (tier != null || template != null) {
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier);
StorPoolUtil.spLog(
"Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details",
vinfo.getUuid(), template, tier);
resp = StorPoolUtil.volumeCreate(size, parentName, template, tags, conn);
} else {
resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId),
getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn);
}
if (resp.getError() == null) { if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize()); updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize());
updateVolumePoolType(vinfo); updateVolumePoolType(vinfo);
@ -1255,4 +1365,67 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
StorPoolUtil.spLog("The volume [%s] is detach from all clusters [%s]", volName, resp); StorPoolUtil.spLog("The volume [%s] is detach from all clusters [%s]", volName, resp);
} }
} }
@Override
public boolean informStorageForDiskOfferingChange() {
return true;
}
@Override
public void updateStorageWithTheNewDiskOffering(Volume volume, DiskOffering newDiskOffering) {
if (newDiskOffering == null) {
return;
}
StoragePoolVO pool = primaryStoreDao.findById(volume.getPoolId());
if (pool == null) {
return;
}
String tier = getTierFromOfferingDetail(newDiskOffering.getId());
String template = null;
if (tier == null) {
template = getTemplateFromOfferingDetail(newDiskOffering.getId());
}
if (tier == null && template == null) {
return;
}
SpConnectionDesc conn = StorPoolUtil.getSpConnection(pool.getUuid(), pool.getId(), storagePoolDetailsDao, primaryStoreDao);
StorPoolUtil.spLog("Updating volume [%s] with tier tag [%s] or template [%s] from Disk offering", volume.getId(), tier, template);
String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier);
StorPoolVolumeDef spVolume = new StorPoolVolumeDef(volumeName, null, tags, null, null, template, null, null, null);
SpApiResponse response = StorPoolUtil.volumeUpdate(spVolume, conn);
if (response.getError() != null) {
StorPoolUtil.spLog("Could not update volume [%s] with tier tag [%s] or template [%s] from Disk offering due to [%s]", volume.getId(), tier, template, response.getError());
}
}
private String getTemplateFromOfferingDetail(Long diskOfferingId) {
String template = null;
DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TEMPLATE);
if (diskOfferingDetail == null ) {
ServiceOfferingVO serviceOffering = serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, true);
if (serviceOffering != null) {
ServiceOfferingDetailsVO serviceOfferingDetail = serviceOfferingDetailDao.findDetail(serviceOffering.getId(), StorPoolUtil.SP_TEMPLATE);
if (serviceOfferingDetail != null) {
template = serviceOfferingDetail.getValue();
}
}
} else {
template = diskOfferingDetail.getValue();
}
return template;
}
private String getTierFromOfferingDetail(Long diskOfferingId) {
String tier = null;
DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TIER);
if (diskOfferingDetail == null ) {
return tier;
} else {
tier = diskOfferingDetail.getValue();
}
return tier;
}
} }

View File

@ -163,11 +163,12 @@ public class StorPoolHelper {
return null; return null;
} }
public static Map<String, String> addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy) { public static Map<String, String> addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy, String qcTier) {
Map<String, String> tags = new HashMap<>(); Map<String, String> tags = new HashMap<>();
tags.put("uuid", name); tags.put("uuid", name);
tags.put("cvm", vmUuid); tags.put("cvm", vmUuid);
tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy); tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy);
tags.put("qc", qcTier);
if (csTag != null) { if (csTag != null) {
tags.put("cs", csTag); tags.put("cs", csTag);
} }

View File

@ -30,6 +30,7 @@ import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive; import com.google.gson.JsonPrimitive;
import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef; import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef;
import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
@ -135,6 +136,7 @@ public class StorPoolUtil {
public static final String DELAY_DELETE = "delayDelete"; public static final String DELAY_DELETE = "delayDelete";
public static final String SP_TIER = "SP_QOSCLASS";
public static enum StorpoolRights { public static enum StorpoolRights {
RO("ro"), RW("rw"), DETACH("detach"); RO("ro"), RW("rw"), DETACH("detach");
@ -499,7 +501,19 @@ public class StorPoolUtil {
json.put("parent", parentName); json.put("parent", parentName);
json.put("size", size); json.put("size", size);
json.put("template", conn.getTemplateName()); json.put("template", conn.getTemplateName());
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy); Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
public static SpApiResponse volumeCreate(Long size, String parentName, String template, Map<String,String> tags, SpConnectionDesc conn) {
template = template != null ? template : conn.getTemplateName();
Map<String, Object> json = new LinkedHashMap<>();
json.put("name", "");
json.put("parent", parentName);
json.put("size", size);
json.put("template", template);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn); return POST("MultiCluster/VolumeCreate", json, conn);
} }
@ -523,7 +537,7 @@ public class StorPoolUtil {
json.put("iops", iops); json.put("iops", iops);
} }
json.put("template", conn.getTemplateName()); json.put("template", conn.getTemplateName());
Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag); Map<String, String> tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag, null);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn); return POST("MultiCluster/VolumeCreate", json, conn);
} }
@ -551,7 +565,7 @@ public class StorPoolUtil {
public static SpApiResponse volumeRemoveTags(String name, SpConnectionDesc conn) { public static SpApiResponse volumeRemoveTags(String name, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>(); Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, "", null, ""); Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, "", null, "", null);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn); return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
} }
@ -559,7 +573,7 @@ public class StorPoolUtil {
public static SpApiResponse volumeUpdateIopsAndTags(final String name, final String uuid, Long iops, public static SpApiResponse volumeUpdateIopsAndTags(final String name, final String uuid, Long iops,
SpConnectionDesc conn, String vcPolicy) { SpConnectionDesc conn, String vcPolicy) {
Map<String, Object> json = new HashMap<>(); Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy); Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy, null);
json.put("iops", iops); json.put("iops", iops);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn); return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
@ -567,14 +581,14 @@ public class StorPoolUtil {
public static SpApiResponse volumeUpdateCvmTags(final String name, final String uuid, SpConnectionDesc conn) { public static SpApiResponse volumeUpdateCvmTags(final String name, final String uuid, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>(); Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null); Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null, null);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn); return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
} }
public static SpApiResponse volumeUpdateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) { public static SpApiResponse volumeUpdateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) {
Map<String, Object> json = new HashMap<>(); Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy); Map<String, String> tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy, null);
json.put("tags", tags); json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn); return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
} }
@ -585,10 +599,14 @@ public class StorPoolUtil {
return POST("MultiCluster/VolumeUpdate/" + name, json, conn); return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
} }
public static SpApiResponse volumeUpdate(StorPoolVolumeDef volume, SpConnectionDesc conn) {
return POST("MultiCluster/VolumeUpdate/" + volume.getName(), volume, conn);
}
public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid, public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid,
String csTag, String vcPolicy, SpConnectionDesc conn) { String csTag, String vcPolicy, SpConnectionDesc conn) {
Map<String, Object> json = new HashMap<>(); Map<String, Object> json = new HashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy); Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy, null);
json.put("name", ""); json.put("name", "");
json.put("tags", tags); json.put("tags", tags);
@ -602,7 +620,7 @@ public class StorPoolUtil {
public static SpApiResponse volumesGroupSnapshot(final List<VolumeObjectTO> volumeTOs, final String vmUuid, public static SpApiResponse volumesGroupSnapshot(final List<VolumeObjectTO> volumeTOs, final String vmUuid,
final String snapshotName, String csTag, SpConnectionDesc conn) { final String snapshotName, String csTag, SpConnectionDesc conn) {
Map<String, Object> json = new LinkedHashMap<>(); Map<String, Object> json = new LinkedHashMap<>();
Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null); Map<String, String> tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null, null);
List<Map<String, Object>> volumes = new ArrayList<>(); List<Map<String, Object>> volumes = new ArrayList<>();
for (VolumeObjectTO volumeTO : volumeTOs) { for (VolumeObjectTO volumeTO : volumeTOs) {
Map<String, Object> vol = new LinkedHashMap<>(); Map<String, Object> vol = new LinkedHashMap<>();

View File

@ -46,4 +46,12 @@ public class ResizeVolumePayload {
this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged); this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
this.newDiskOfferingId = newDiskOfferingId; this.newDiskOfferingId = newDiskOfferingId;
} }
public Long getNewDiskOfferingId() {
return newDiskOfferingId;
}
public void setNewDiskOfferingId(Long newDiskOfferingId) {
this.newDiskOfferingId = newDiskOfferingId;
}
} }

View File

@ -2062,6 +2062,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (!volumeMigrateRequired && !volumeResizeRequired) { if (!volumeMigrateRequired && !volumeResizeRequired) {
_volsDao.updateDiskOffering(volume.getId(), newDiskOffering.getId()); _volsDao.updateDiskOffering(volume.getId(), newDiskOffering.getId());
volume = _volsDao.findById(volume.getId()); volume = _volsDao.findById(volume.getId());
updateStorageWithTheNewDiskOffering(volume, newDiskOffering);
return volume; return volume;
} }
@ -2098,6 +2100,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return volume; return volume;
} }
private void updateStorageWithTheNewDiskOffering(VolumeVO volume, DiskOfferingVO newDiskOffering) {
DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary);
DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null;
if (dataStoreDriver instanceof PrimaryDataStoreDriver) {
PrimaryDataStoreDriver storageDriver = (PrimaryDataStoreDriver)dataStoreDriver;
if (storageDriver.informStorageForDiskOfferingChange()) {
storageDriver.updateStorageWithTheNewDiskOffering(volume, newDiskOffering);
}
}
}
/** /**
* This method is to compare long values, in miniops and maxiops a or b can be null or 0. * This method is to compare long values, in miniops and maxiops a or b can be null or 0.
* Use this method to treat 0 and null as same * Use this method to treat 0 and null as same
@ -2331,7 +2345,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
* the actual disk size. * the actual disk size.
*/ */
if (currentSize > newSize) { if (currentSize > newSize) {
if (volume != null && ImageFormat.QCOW2.equals(volume.getFormat()) && !Volume.State.Allocated.equals(volume.getState())) { if (volume != null && ImageFormat.QCOW2.equals(volume.getFormat()) && !Volume.State.Allocated.equals(volume.getState()) && !StoragePoolType.StorPool.equals(volume.getPoolType())) {
String message = "Unable to shrink volumes of type QCOW2"; String message = "Unable to shrink volumes of type QCOW2";
logger.warn(message); logger.warn(message);
throw new InvalidParameterValueException(message); throw new InvalidParameterValueException(message);

View File

@ -79,6 +79,11 @@ class TestData():
diskOfferingEncrypted2 = "diskOfferingEncrypted2" diskOfferingEncrypted2 = "diskOfferingEncrypted2"
cephDiskOffering = "cephDiskOffering" cephDiskOffering = "cephDiskOffering"
nfsDiskOffering = "nfsDiskOffering" nfsDiskOffering = "nfsDiskOffering"
diskOfferingTier1Tag = "diskOfferingTier1Tag"
diskOfferingTier2Tag = "diskOfferingTier2Tag"
diskOfferingTier1Template = "diskOfferingTier1Template"
diskOfferingTier2Template = "diskOfferingTier2Template"
diskOfferingWithTagsAndTempl = "diskOfferingWithTagsAndTempl"
domainId = "domainId" domainId = "domainId"
hypervisor = "hypervisor" hypervisor = "hypervisor"
login = "login" login = "login"
@ -278,6 +283,46 @@ class TestData():
TestData.tags: "nfs", TestData.tags: "nfs",
"storagetype": "shared" "storagetype": "shared"
}, },
TestData.diskOfferingTier1Template: {
"name": "tier1-template",
"displaytext": "Tier1 using different StorPool template",
"custom": True,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOfferingTier2Template: {
"name": "tier2-template",
"displaytext": "Tier2 using different StorPool template",
"custom": True,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOfferingTier1Tag: {
"name": "tier1-tag",
"displaytext": "Tier1 using QOS tags",
"custom": True,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOfferingTier2Tag: {
"name": "tier2-tag",
"displaytext": "Tier2 using QOS tags",
"custom": True,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.diskOfferingWithTagsAndTempl: {
"name": "tier2-tag-template",
"displaytext": "Tier2 using QOS tags and template",
"custom": True,
"hypervisorsnapshotreserve": 200,
TestData.tags: sp_template_1,
"storagetype": "shared"
},
TestData.volume_1: { TestData.volume_1: {
TestData.diskName: "test-volume-1", TestData.diskName: "test-volume-1",
}, },

View File

@ -0,0 +1,544 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pprint
import uuid
from marvin.cloudstackAPI import (listResourceDetails, addResourceDetail, changeOfferingForVolume)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import FAILED
from marvin.lib.base import (DiskOffering,
ServiceOffering,
StoragePool,
VirtualMachine,
SecurityGroup,
ResourceDetails
)
from marvin.lib.common import (get_domain,
get_template,
list_disk_offering,
list_storage_pools,
list_volumes,
list_service_offering,
list_zones)
from marvin.lib.utils import random_gen, cleanup_resources
from nose.plugins.attrib import attr
from storpool import spapi
from sp_util import (TestData, StorPoolHelper)
class TestStorPoolTiers(cloudstackTestCase):
@classmethod
def setUpClass(cls):
super(TestStorPoolTiers, cls).setUpClass()
try:
cls.setUpCloudStack()
except Exception:
raise
@classmethod
def setUpCloudStack(cls):
config = cls.getClsConfig()
StorPoolHelper.logger = cls
zone = config.zones[0]
assert zone is not None
cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True)
testClient = super(TestStorPoolTiers, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.unsupportedHypervisor = False
cls.hypervisor = testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ("hyperv", "lxc"):
cls.unsupportedHypervisor = True
return
cls._cleanup = []
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = list_zones(cls.apiclient, name=zone.name)[0]
td = TestData()
cls.testdata = td.testdata
cls.helper = StorPoolHelper()
disk_offerings_tier1_tags = cls.testdata[TestData.diskOfferingTier1Tag]
disk_offerings_tier2_tags = cls.testdata[TestData.diskOfferingTier2Tag]
disk_offerings_tier1_template = cls.testdata[TestData.diskOfferingTier1Template]
disk_offerings_tier2_template = cls.testdata[TestData.diskOfferingTier2Template]
disk_offerings_tier2_tags_template = cls.testdata[TestData.diskOfferingWithTagsAndTempl]
cls.qos = "SP_QOSCLASS"
cls.spTemplate = "SP_TEMPLATE"
cls.disk_offerings_tier1_tags = cls.getDiskOffering(disk_offerings_tier1_tags, cls.qos, "ssd")
cls.disk_offerings_tier2_tags = cls.getDiskOffering(disk_offerings_tier2_tags, cls.qos, "virtual")
cls.disk_offerings_tier1_template = cls.getDiskOffering(disk_offerings_tier1_template, cls.spTemplate, "ssd")
cls.disk_offerings_tier2_template = cls.getDiskOffering(disk_offerings_tier2_template, cls.spTemplate,
"virtual")
cls.disk_offerings_tier2_tags_template = cls.getDiskOffering(disk_offerings_tier2_tags_template, cls.spTemplate,
"virtual")
cls.resourceDetails(cls.qos, cls.disk_offerings_tier2_tags_template.id, "virtual")
cls.account = cls.helper.create_account(
cls.apiclient,
cls.services["account"],
accounttype=1,
domainid=cls.domain.id,
roleid=1
)
cls._cleanup.append(cls.account)
securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0]
cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid,
id=securitygroup.id)
storpool_primary_storage = cls.testdata[TestData.primaryStorage]
storpool_service_offerings = cls.testdata[TestData.serviceOffering]
cls.template_name = storpool_primary_storage.get("name")
storage_pool = list_storage_pools(
cls.apiclient,
name=cls.template_name
)
service_offerings = list_service_offering(
cls.apiclient,
name=cls.template_name
)
disk_offerings = list_disk_offering(
cls.apiclient,
name="ssd"
)
if storage_pool is None:
storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
else:
storage_pool = storage_pool[0]
cls.storage_pool = storage_pool
cls.debug(pprint.pformat(storage_pool))
if service_offerings is None:
service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
else:
service_offerings = service_offerings[0]
# The version of CentOS has to be supported
template = get_template(
cls.apiclient,
cls.zone.id,
account="system"
)
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = template.ostypeid
cls.services["zoneid"] = cls.zone.id
cls.service_offering = service_offerings
cls.debug(pprint.pformat(cls.service_offering))
cls.template = template
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "/tmp"
cls.random_data = "random.data"
return
@classmethod
def getDiskOffering(cls, dataDiskOffering, qos, resValue):
disk_offerings = list_disk_offering(cls.apiclient, name=dataDiskOffering.get("name"))
if disk_offerings is None:
disk_offerings = DiskOffering.create(cls.apiclient, services=dataDiskOffering, custom=True)
cls.resourceDetails(qos, disk_offerings.id, resValue)
else:
disk_offerings = disk_offerings[0]
cls.resourceDetails(qos, disk_offerings.id, )
return disk_offerings
@classmethod
def tearDownClass(cls):
super(TestStorPoolTiers, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
return
def tearDown(self):
super(TestStorPoolTiers, self).tearDown()
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_check_tags_on_deployed_vm_and_datadisk(self):
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_change_offering_on_attached_root_disk(self):
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, root_volume[0].size)
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
def test_03_change_offering_on_attached_data_disk(self):
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, root_volume[0].size)
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_check_templates_on_deployed_vm_and_datadisk(self):
virtual_machine_template_tier1 = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=self.disk_offerings_tier1_template.id,
diskofferingid=self.disk_offerings_tier1_template.id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_template_tier1.id, listall=True)
for v in volumes:
self.check_storpool_template(v, self.disk_offerings_tier1_template.id, self.spTemplate)
virtual_machine_template_tier1.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_05_check_templates_on_deployed_vm_and_datadisk_tier2(self):
virtual_machine_template_tier2 = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=self.disk_offerings_tier2_template.id,
diskofferingid=self.disk_offerings_tier2_template.id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_template_tier2.id, listall=True)
for v in volumes:
self.check_storpool_template(v, self.disk_offerings_tier2_template.id, self.spTemplate)
virtual_machine_template_tier2.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_06_change_offerings_with_tags_detached_volume(self):
disk_off_id = self.disk_offerings_tier2_tags.id
virtual_machine_tier2_tag = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=disk_off_id,
diskofferingid=disk_off_id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
virtual_machine_tier2_tag.stop(self.apiclient, forced=True)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_tag.id, type="DATADISK",
listall=True)
virtual_machine_tier2_tag.detach_volume(
self.apiclient,
volumes[0]
)
self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_tag, qos_or_template=self.qos,
disk_offering_id=disk_off_id, attached=True)
self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_tags.id, volumes[0].size)
self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_07_change_offerings_with_template_detached_volume(self):
disk_off_id = self.disk_offerings_tier2_template.id
virtual_machine_tier2_template = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=disk_off_id,
diskofferingid=disk_off_id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
virtual_machine_tier2_template.stop(self.apiclient, forced=True)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_template.id, type="DATADISK",
listall=True)
virtual_machine_tier2_template.detach_volume(
self.apiclient,
volumes[0]
)
self.check_storpool_template(volume=volumes[0], disk_offering_id=disk_off_id, qos_or_template=self.spTemplate)
self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_template.id, volumes[0].size)
self.check_storpool_template(volume=volumes[0], disk_offering_id=self.disk_offerings_tier1_template.id,
qos_or_template=self.spTemplate)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_08_deploy_vm_with_tags_and_template_in_offerings(self):
"""
Deploy virtual machine with disk offering on which resource details is set tier2 template and tier2 qos tags
"""
disk_off_id = self.disk_offerings_tier2_tags_template.id
virtual_machine_tier2_template = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=disk_off_id,
diskofferingid=disk_off_id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
virtual_machine_tier2_template.stop(self.apiclient, forced=True)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_template.id, type="DATADISK",
listall=True)
virtual_machine_tier2_template.detach_volume(
self.apiclient,
volumes[0]
)
self.check_storpool_template(volume=volumes[0], disk_offering_id=disk_off_id, qos_or_template=self.spTemplate,
diff_template=True)
self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_template, qos_or_template=self.qos,
disk_offering_id=disk_off_id, attached=True)
self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_tags.id, volumes[0].size)
self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_template, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_09_resize_root_volume(self):
'''
Resize Root volume with changeOfferingForVolume
'''
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size + 1024))
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_10_shrink_root_volume(self):
'''
Shrink Root volume with changeOfferingForVolume
'''
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size - 1024),
True)
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_11_resize_data_volume(self):
'''
Resize DATADISK volume with changeOfferingForVolume
'''
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size + 1024))
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_12_shrink_data_volume(self):
'''
Shrink DATADISK volume with changeOfferingForVolume
'''
virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag()
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size - 1024),
True)
root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK",
listall=True)
self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True)
virtual_machine_tier1_tag.stop(self.apiclient, forced=True)
def deploy_vm_and_check_tier_tag(self):
virtual_machine_tier1_tag = VirtualMachine.create(
self.apiclient,
{"name": "StorPool-%s" % uuid.uuid4()},
zoneid=self.zone.id,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
overridediskofferingid=self.disk_offerings_tier1_tags.id,
diskofferingid=self.disk_offerings_tier1_tags.id,
size=2,
hypervisor=self.hypervisor,
rootdisksize=10
)
volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, listall=True)
self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier1_tag, qos_or_template=self.qos,
disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True)
return virtual_machine_tier1_tag
@classmethod
def resourceDetails(cls, qos, id, resValue=None):
listResourceDetailCmd = listResourceDetails.listResourceDetailsCmd()
listResourceDetailCmd.resourceid = id
listResourceDetailCmd.resourcetype = "DiskOffering"
listResourceDetailCmd.key = qos
details = cls.apiclient.listResourceDetails(listResourceDetailCmd)
if details is None:
resource = addResourceDetail.addResourceDetailCmd()
resource.resourceid = id
resource.resourcetype = "DiskOffering"
resDet = {'key': qos, 'value': resValue}
resource.details = [resDet]
resource.fordisplay = True
details = cls.apiclient.addResourceDetail(resource)
@classmethod
def getZone(cls):
zones = list_zones(cls.apiclient)
for z in zones:
if z.name == cls.getClsConfig().mgtSvr[0].zone:
cls.zone = z
assert cls.zone is not None
def vc_policy_tags(self, volumes, vm, qos_or_template, disk_offering_id, should_tags_exists=None, vm_tags=None,
attached=None):
vc_policy_tag = False
cvm_tag = False
qs_tag = False
id = vm.id
for v in volumes:
name = v.path.split("/")[3]
volume = self.spapi.volumeList(volumeName="~" + name)
tags = volume[0].tags
resource_details_value = ResourceDetails.list(self.apiclient, resourcetype="DiskOffering",
resourceid=disk_offering_id, key=qos_or_template)
for t in tags:
self.debug("TAGS are %s" % t)
if vm_tags:
for vm_tag in vm_tags:
if t == vm_tag.key:
vc_policy_tag = True
self.assertEqual(tags[t], vm_tag.value, "Tags are not equal")
if t == 'cvm':
self.debug("CVM tag %s is not the same as vm UUID %s" % (tags[t], id))
self.debug(type(tags[t]))
self.debug(len(tags[t]))
self.debug(type(id))
self.debug(len(id))
cvm_tag = True
self.assertEqual(tags[t], id, "CVM tag is not the same as vm UUID ")
if t == 'qc':
qs_tag = True
self.assertEqual(tags[t], resource_details_value[0].value, "QOS tags should be the same")
if should_tags_exists:
self.assertTrue(vc_policy_tag, "There aren't volumes with vm tags")
self.assertTrue(cvm_tag, "There aren't volumes with vm tags")
if attached:
self.assertTrue(qs_tag, "The QOS tag isn't set")
else:
self.assertFalse(vc_policy_tag, "The tags should be removed")
self.assertFalse(cvm_tag, "The tags should be removed")
def check_storpool_template(self, volume, disk_offering_id, qos_or_template, diff_template=None):
name = volume.path.split("/")[3]
sp_volume = self.spapi.volumeList(volumeName="~" + name)
template = sp_volume[0].templateName
resource_details_value = ResourceDetails.list(self.apiclient, resourcetype="DiskOffering",
resourceid=disk_offering_id, key=qos_or_template)
if diff_template:
self.assertNotEqual(template, resource_details_value[0].value, "The templates should not be the same")
else:
self.assertEqual(template, resource_details_value[0].value)
def changeOfferingForVolume(self, volume_id, disk_offering_id, size, shrinkok=None):
size = int(size / 1024 / 1024 / 1024)
change_offering_for_volume_cmd = changeOfferingForVolume.changeOfferingForVolumeCmd()
change_offering_for_volume_cmd.id = volume_id
change_offering_for_volume_cmd.diskofferingid = disk_offering_id
change_offering_for_volume_cmd.size = size
change_offering_for_volume_cmd.shrinkok = shrinkok
return self.apiclient.changeOfferingForVolume(change_offering_for_volume_cmd)

View File

@ -527,7 +527,7 @@ class VirtualMachine:
customcpuspeed=None, custommemory=None, rootdisksize=None, customcpuspeed=None, custommemory=None, rootdisksize=None,
rootdiskcontroller=None, vpcid=None, macaddress=None, datadisktemplate_diskoffering_list={}, rootdiskcontroller=None, vpcid=None, macaddress=None, datadisktemplate_diskoffering_list={},
properties=None, nicnetworklist=None, bootmode=None, boottype=None, dynamicscalingenabled=None, properties=None, nicnetworklist=None, bootmode=None, boottype=None, dynamicscalingenabled=None,
userdataid=None, userdatadetails=None, extraconfig=None, size=None): userdataid=None, userdatadetails=None, extraconfig=None, size=None, overridediskofferingid=None):
"""Create the instance""" """Create the instance"""
cmd = deployVirtualMachine.deployVirtualMachineCmd() cmd = deployVirtualMachine.deployVirtualMachineCmd()
@ -537,6 +537,9 @@ class VirtualMachine:
elif "serviceoffering" in services: elif "serviceoffering" in services:
cmd.serviceofferingid = services["serviceoffering"] cmd.serviceofferingid = services["serviceoffering"]
if overridediskofferingid:
cmd.overridediskofferingid = overridediskofferingid
if zoneid: if zoneid:
cmd.zoneid = zoneid cmd.zoneid = zoneid
elif "zoneid" in services: elif "zoneid" in services: