CLOUDSTACK-8239 - Adding support for virtio-scsi on KVM hosts

This adds support for virtio-scsi on KVM hosts, either
for guests that are associated with a new os_type of 'Other PV Virtio-SCSI (64-bit)',
or when a VM or template is regstered with a detail parameter rootDiskController=scsi.

Update cloudstack add template dialog to allow for selecting rootDiskController with KVM

Update cloudstack kvm virtio-scsi to enable discard=unmap
This commit is contained in:
Nathan Johnson 2017-03-01 09:04:46 -06:00
parent 850c07cc8a
commit 5c476492b1
7 changed files with 636 additions and 54 deletions

View File

@ -116,6 +116,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.CpuTuneDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DevicesDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.FeaturesDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.FilesystemDef;
@ -125,6 +126,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GuestResourceDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InputDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef.GuestNetType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SCSIDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SerialDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.TermPolicy;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.VideoDef;
@ -162,6 +164,7 @@ import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VmDetailConstants;
/**
* LibvirtComputingResource execute requests on the computing/routing host using
@ -2059,6 +2062,19 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
final InputDef input = new InputDef("tablet", "usb");
devices.addDevice(input);
DiskDef.DiskBus busT = getDiskModelFromVMDetail(vmTO);
if (busT == null) {
busT = getGuestDiskModel(vmTO.getPlatformEmulator());
}
// If we're using virtio scsi, then we need to add a virtual scsi controller
if (busT == DiskDef.DiskBus.SCSI) {
final SCSIDef sd = new SCSIDef((short)0, 0, 0, 9, 0);
devices.addDevice(sd);
}
vm.addComp(devices);
return vm;
@ -2142,23 +2158,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
// if params contains a rootDiskController key, use its value (this is what other HVs are doing)
DiskDef.DiskBus diskBusType = null;
final Map <String, String> params = vmSpec.getDetails();
if (params != null && params.get("rootDiskController") != null && !params.get("rootDiskController").isEmpty()) {
final String rootDiskController = params.get("rootDiskController");
s_logger.debug("Passed custom disk bus " + rootDiskController);
for (final DiskDef.DiskBus bus : DiskDef.DiskBus.values()) {
if (bus.toString().equalsIgnoreCase(rootDiskController)) {
s_logger.debug("Found matching enum for disk bus " + rootDiskController);
diskBusType = bus;
break;
}
}
}
DiskDef.DiskBus diskBusType = getDiskModelFromVMDetail(vmSpec);
if (diskBusType == null) {
diskBusType = getGuestDiskModel(vmSpec.getPlatformEmulator());
}
// I'm not sure why previously certain DATADISKs were hard-coded VIRTIO and others not, however this
// maintains existing functionality with the exception that SCSI will override VIRTIO.
DiskDef.DiskBus diskBusTypeData = (diskBusType == DiskDef.DiskBus.SCSI) ? diskBusType : DiskDef.DiskBus.VIRTIO;
final DiskDef disk = new DiskDef();
if (volume.getType() == Volume.Type.ISO) {
if (volPath == null) {
@ -2170,6 +2179,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
} else {
final int devId = volume.getDiskSeq().intValue();
if (diskBusType == DiskDef.DiskBus.SCSI ) {
disk.setQemuDriver(true);
disk.setDiscard(DiscardType.UNMAP);
}
if (pool.getType() == StoragePoolType.RBD) {
/*
For RBD pools we use the secret mechanism in libvirt.
@ -2188,7 +2202,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType);
} else {
if (volume.getType() == Volume.Type.DATADISK) {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData, DiskDef.DiskFmtType.QCOW2);
} else {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
}
@ -2216,6 +2230,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase()));
}
}
vm.getDevices().addDevice(disk);
}
@ -2334,13 +2349,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
DiskDef diskdef = null;
final KVMStoragePool attachingPool = attachingDisk.getPool();
try {
if (!attach) {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String xml = dm.getXMLDesc(0);
parser.parseDomainXML(xml);
disks = parser.getDisks();
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String domXml = dm.getXMLDesc(0);
parser.parseDomainXML(domXml);
disks = parser.getDisks();
if (!attach) {
for (final DiskDef disk : disks) {
final String file = disk.getDiskPath();
if (file != null && file.equalsIgnoreCase(attachingDisk.getPath())) {
@ -2352,17 +2367,31 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
throw new InternalErrorException("disk: " + attachingDisk.getPath() + " is not attached before");
}
} else {
DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO;
for (final DiskDef disk : disks) {
if (disk.getDeviceType() == DeviceType.DISK) {
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
busT = DiskDef.DiskBus.SCSI;
}
break;
}
}
diskdef = new DiskDef();
if (busT == DiskDef.DiskBus.SCSI) {
diskdef.setQemuDriver(true);
diskdef.setDiscard(DiscardType.UNMAP);
}
if (attachingPool.getType() == StoragePoolType.RBD) {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(),
attachingPool.getUuid(), devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
} else if (attachingPool.getType() == StoragePoolType.Gluster) {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null,
null, devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, busT, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT);
}
if (bytesReadRate != null && bytesReadRate > 0) {
diskdef.setBytesReadRate(bytesReadRate);
@ -2961,19 +2990,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
boolean isGuestPVEnabled(final String guestOSName) {
if (guestOSName == null) {
return false;
}
if (guestOSName.startsWith("Ubuntu") || guestOSName.startsWith("Fedora 13") || guestOSName.startsWith("Fedora 12") || guestOSName.startsWith("Fedora 11") ||
guestOSName.startsWith("Fedora 10") || guestOSName.startsWith("Fedora 9") || guestOSName.startsWith("CentOS 5.3") || guestOSName.startsWith("CentOS 5.4") ||
guestOSName.startsWith("CentOS 5.5") || guestOSName.startsWith("CentOS") || guestOSName.startsWith("Fedora") ||
guestOSName.startsWith("Red Hat Enterprise Linux 5.3") || guestOSName.startsWith("Red Hat Enterprise Linux 5.4") ||
guestOSName.startsWith("Red Hat Enterprise Linux 5.5") || guestOSName.startsWith("Red Hat Enterprise Linux 6") || guestOSName.startsWith("Debian GNU/Linux") ||
guestOSName.startsWith("FreeBSD 10") || guestOSName.startsWith("Oracle") || guestOSName.startsWith("Other PV")) {
return true;
} else {
return false;
}
DiskDef.DiskBus db = getGuestDiskModel(guestOSName);
return db != DiskDef.DiskBus.IDE;
}
public boolean isCentosHost() {
@ -2984,14 +3002,42 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
public DiskDef.DiskBus getDiskModelFromVMDetail(final VirtualMachineTO vmTO) {
Map<String, String> details = vmTO.getDetails();
if (details == null) {
return null;
}
final String rootDiskController = details.get(VmDetailConstants.ROOT_DISK_CONTROLLER);
if (StringUtils.isNotBlank(rootDiskController)) {
s_logger.debug("Passed custom disk bus " + rootDiskController);
for (final DiskDef.DiskBus bus : DiskDef.DiskBus.values()) {
if (bus.toString().equalsIgnoreCase(rootDiskController)) {
s_logger.debug("Found matching enum for disk bus " + rootDiskController);
return bus;
}
}
}
return null;
}
private DiskDef.DiskBus getGuestDiskModel(final String platformEmulator) {
if (isGuestPVEnabled(platformEmulator)) {
if (platformEmulator == null) {
return DiskDef.DiskBus.IDE;
} else if (platformEmulator.startsWith("Other PV Virtio-SCSI")) {
return DiskDef.DiskBus.SCSI;
} else if (platformEmulator.startsWith("Ubuntu") || platformEmulator.startsWith("Fedora 13") || platformEmulator.startsWith("Fedora 12") || platformEmulator.startsWith("Fedora 11") ||
platformEmulator.startsWith("Fedora 10") || platformEmulator.startsWith("Fedora 9") || platformEmulator.startsWith("CentOS 5.3") || platformEmulator.startsWith("CentOS 5.4") ||
platformEmulator.startsWith("CentOS 5.5") || platformEmulator.startsWith("CentOS") || platformEmulator.startsWith("Fedora") ||
platformEmulator.startsWith("Red Hat Enterprise Linux 5.3") || platformEmulator.startsWith("Red Hat Enterprise Linux 5.4") ||
platformEmulator.startsWith("Red Hat Enterprise Linux 5.5") || platformEmulator.startsWith("Red Hat Enterprise Linux 6") || platformEmulator.startsWith("Debian GNU/Linux") ||
platformEmulator.startsWith("FreeBSD 10") || platformEmulator.startsWith("Oracle") || platformEmulator.startsWith("Other PV")) {
return DiskDef.DiskBus.VIRTIO;
} else {
return DiskDef.DiskBus.IDE;
}
}
}
private void cleanupVMNetworks(final Connect conn, final List<InterfaceDef> nics) {
if (nics != null) {
for (final InterfaceDef nic : nics) {

View File

@ -545,6 +545,23 @@ public class LibvirtVMDef {
}
}
public enum DiscardType {
IGNORE("ignore"), UNMAP("unmap");
String _discardType;
DiscardType(String discardType) {
_discardType = discardType;
}
@Override
public String toString() {
if (_discardType == null) {
return "ignore";
}
return _discardType;
}
}
private DeviceType _deviceType; /* floppy, disk, cdrom */
private DiskType _diskType;
private DiskProtocol _diskProtocol;
@ -566,6 +583,15 @@ public class LibvirtVMDef {
private DiskCacheMode _diskCacheMode;
private String _serial;
private boolean qemuDriver = true;
private DiscardType _discard = DiscardType.IGNORE;
public DiscardType getDiscard() {
return _discard;
}
public void setDiscard(DiscardType discard) {
this._discard = discard;
}
public void setDeviceType(DeviceType deviceType) {
_deviceType = deviceType;
@ -764,7 +790,11 @@ public class LibvirtVMDef {
diskBuilder.append(">\n");
if(qemuDriver) {
diskBuilder.append("<driver name='qemu'" + " type='" + _diskFmtType
+ "' cache='" + _diskCacheMode + "' " + "/>\n");
+ "' cache='" + _diskCacheMode + "' ");
if(_discard != null && _discard != DiscardType.IGNORE) {
diskBuilder.append("discard='" + _discard.toString() + "' ");
}
diskBuilder.append("/>\n");
}
if (_diskType == DiskType.FILE) {
@ -1345,6 +1375,37 @@ public class LibvirtVMDef {
}
}
public static class SCSIDef {
private short index = 0;
private int domain = 0;
private int bus = 0;
private int slot = 9;
private int function = 0;
public SCSIDef(short index, int domain, int bus, int slot, int function) {
this.index = index;
this.domain = domain;
this.bus = bus;
this.slot = slot;
this.function = function;
}
public SCSIDef() {
}
@Override
public String toString() {
StringBuilder scsiBuilder = new StringBuilder();
scsiBuilder.append(String.format("<controller type='scsi' index='%d' mode='virtio-scsi'>\n", this.index ));
scsiBuilder.append(String.format("<address type='pci' domain='0x%04X' bus='0x%02X' slot='0x%02X' function='0x%01X'/>\n",
this.domain, this.bus, this.slot, this.function ) );
scsiBuilder.append("</controller>");
return scsiBuilder.toString();
}
}
public static class InputDef {
private final String _type; /* tablet, mouse */
private final String _bus; /* ps2, usb, xen */

View File

@ -88,6 +88,8 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.storage.JavaStorageLayer;
import com.cloud.storage.Storage.ImageFormat;
@ -972,13 +974,12 @@ public class KVMStorageProcessor implements StorageProcessor {
DiskDef diskdef = null;
final KVMStoragePool attachingPool = attachingDisk.getPool();
try {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String domXml = dm.getXMLDesc(0);
parser.parseDomainXML(domXml);
disks = parser.getDisks();
if (!attach) {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String xml = dm.getXMLDesc(0);
parser.parseDomainXML(xml);
disks = parser.getDisks();
if (attachingPool.getType() == StoragePoolType.RBD) {
if (resource.getHypervisorType() == Hypervisor.HypervisorType.LXC) {
final String device = resource.mapRbdDevice(attachingDisk);
@ -1000,7 +1001,20 @@ public class KVMStorageProcessor implements StorageProcessor {
throw new InternalErrorException("disk: " + attachingDisk.getPath() + " is not attached before");
}
} else {
DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO;
for (final DiskDef disk : disks) {
if (disk.getDeviceType() == DeviceType.DISK) {
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
busT = DiskDef.DiskBus.SCSI;
}
break;
}
}
diskdef = new DiskDef();
if (busT == DiskDef.DiskBus.SCSI) {
diskdef.setQemuDriver(true);
diskdef.setDiscard(DiscardType.UNMAP);
}
diskdef.setSerial(serial);
if (attachingPool.getType() == StoragePoolType.RBD) {
if(resource.getHypervisorType() == Hypervisor.HypervisorType.LXC){
@ -1008,24 +1022,24 @@ public class KVMStorageProcessor implements StorageProcessor {
final String device = resource.mapRbdDevice(attachingDisk);
if (device != null) {
s_logger.debug("RBD device on host is: "+device);
diskdef.defBlockBasedDisk(device, devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(device, devId, busT);
} else {
throw new InternalErrorException("Error while mapping disk "+attachingDisk.getPath()+" on host");
}
} else {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(),
attachingPool.getUuid(), devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
}
} else if (attachingPool.getType() == StoragePoolType.Gluster) {
final String mountpoint = attachingPool.getLocalPath();
final String path = attachingDisk.getPath();
final String glusterVolume = attachingPool.getSourceDir().replace("/", "");
diskdef.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null,
null, devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, busT, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT);
}
if ((bytesReadRate != null) && (bytesReadRate > 0)) {

View File

@ -39,6 +39,7 @@ INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, crea
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (272, UUID(), 4, 'Red Hat Enterprise Linux 7.1', now());
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (273, UUID(), 4, 'Red Hat Enterprise Linux 7.2', now());
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (274, UUID(), 1, 'CentOS 7.2', now());
INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (275, UUID(), 6, 'Other PV Virtio-SCSI (64-bit)', now());
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '6.5.0', 'Windows 10 (32-bit)', 257, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'windows9Guest', 257, now(), 0);
@ -108,6 +109,7 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervi
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '5.5', 'centos64Guest', 274, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'centos64Guest', 274, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 7.2', 274, now(), 0);
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Other PV Virtio-SCSI (64-bit)', 275, now(), 0);
CREATE TABLE `cloud`.`vlan_details` (
`id` bigint unsigned NOT NULL auto_increment,

View File

@ -0,0 +1,380 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Test from the Marvin - Testing in Python wiki
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
# Import Integration Libraries
# base - contains all resources as entities and defines create, delete,
# list operations on them
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
NetworkOffering,
Network,
Template,
DiskOffering,
StoragePool,
Volume,
Host,
GuestOs)
# utils - utility classes for common cleanup, external library wrappers etc
from marvin.lib.utils import cleanup_resources, get_hypervisor_type, validateList
# common - commonly used methods for all tests are listed here
from marvin.lib.common import get_zone, get_domain, get_template, list_hosts, get_pod
from marvin.sshClient import SshClient
from marvin.codes import FAILED, PASS
from nose.plugins.attrib import attr
import xml.etree.ElementTree as ET
import code
import logging
class Templates:
"""Test data for templates
"""
def __init__(self):
self.templates = {
"kvmvirtioscsi": {
"kvm": {
"name": "tiny-kvm-scsi",
"displaytext": "virtio-scsi kvm",
"format": "qcow2",
"hypervisor": "kvm",
"ostype": "Other PV Virtio-SCSI (64-bit)",
"url": "http://dl.openvm.eu/cloudstack/ubuntu/vanilla/16.04/x86_64/ubuntu-16.04-server-cloudimg-amd64-disk1-kvm.qcow2.bz2",
"requireshvm": True,
"ispublic": True,
"passwordenabled": True
}
}
}
class TestDeployVirtioSCSIVM(cloudstackTestCase):
"""
Test deploy a kvm virtio scsi template
"""
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestDeployVirtioSCSIVM')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestDeployVirtioSCSIVM, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.hypervisorNotSupported = False
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.services['mode'] = cls.zone.networktype
if cls.hypervisor.lower() not in ['kvm']:
cls.hypervisorNotSupported = True
return
cls._cleanup = []
kvmvirtioscsi = Templates().templates["kvmvirtioscsi"]
cls.template = Template.register(
cls.apiclient,
kvmvirtioscsi[cls.hypervisor.lower()],
cls.zone.id,
hypervisor=cls.hypervisor.lower(),
domainid=cls.domain.id)
cls.template.download(cls.apiclient)
if cls.template == FAILED:
assert False, "get_template() failed to return template"
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.sparse_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["sparse_disk_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
diskofferingid=cls.sparse_disk_offering.id,
mode=cls.zone.networktype
)
hosts = Host.list(cls.apiclient, id=cls.virtual_machine.hostid)
if len(hosts) != 1:
assert False, "Could not find host with id " + cls.virtual_machine.hostid
cls.vmhost = hosts[0]
password = cls.virtual_machine.resetPassword(cls.apiclient)
cls.virtual_machine.username = "ubuntu"
cls.virtual_machine.password = password
cls._cleanup = [
cls.template,
cls.service_offering,
cls.sparse_disk_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def verifyVirshState(self, diskcount):
host = self.vmhost.ipaddress
instancename = self.virtual_machine.instancename
virshxml = self.getVirshXML(host, instancename)
root = ET.fromstring(virshxml)
scsis = root.findall("./devices/controller[@type='scsi']/alias[@name='scsi0']/..")
self.assertEqual(len(scsis), 1, "SCSI controller not found")
scsiindex = scsis[0].get('index')
self.assertNotEqual(scsiindex, None, "Could not find index of SCSI controller")
# find all scsi disks
disks = root.findall("./devices/disk[@device='disk']/target[@bus='scsi']/..")
self.assertEqual(len(disks), diskcount, "Could not find expected number of disks")
for disk in disks:
for child in disk:
if child.tag.lower() == "target":
dev = child.get("dev")
self.assert_(dev != None and dev.startswith("sd"), "disk dev is invalid")
elif child.tag.lower() == "address":
con = child.get("controller")
self.assertEqual(con, scsiindex, "disk controller not equal to SCSI " \
"controller index")
elif child.tag.lower() == "driver":
discard = child.get("discard")
self.assertEqual(discard, "unmap", "discard settings not unmap")
def verifyGuestState(self, diskcount):
ssh = self.virtual_machine.get_ssh_client(reconnect=True)
output = ssh.execute("lspci | grep \"Virtio SCSI\"")
self.assertTrue(len(output) > 0, "Could not find virtio scsi controller")
output = ssh.execute("lsblk -rS | grep sd")
for disk in output:
self.logger.debug("disk " + disk + " found")
self.assertEqual(len(output), diskcount,
"Could not find appropriate number of scsi disks in guest")
def getVirshXML(self, host, instancename):
if host == None:
self.logger.debug("getVirshXML: host is none")
return ""
else:
self.logger.debug("host is: " + host)
if instancename == None:
self.logger.debug("getVirshXML: instancename is none")
return ""
else:
self.logger.debug("instancename is: " + instancename)
sshc = SshClient(
host=host,
port=self.services['configurableData']['host']["publicport"],
user=self.hostConfig['username'],
passwd=self.hostConfig['password'])
ssh = sshc.ssh
chan = ssh.get_transport().open_session()
chan.exec_command("virsh dumpxml " + instancename)
stdout = ""
while True:
b = chan.recv(10000)
if len(b) == 0:
break
stdout += b
stderr = ""
while True:
b = chan.recv_stderr(10000)
if len(b) == 0:
break
stderr += b
xstatus = chan.recv_exit_status()
chan.close()
if xstatus != 0:
raise CommandNonzeroException(xstatus, stderr)
# rely on sshClient to close ssh
self.logger.debug("xml is: \n\n%s\n\n" % (stdout))
return stdout
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_verify_libvirt(self):
"""Test that libvirt properly created domain with scsi controller
"""
# Validate virsh dumpxml
if self.hypervisorNotSupported:
self.skipTest
self.verifyVirshState(2)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_verify_libvirt_after_restart(self):
""" Verify that libvirt settings are as expected after a VM stop / start
"""
if self.hypervisorNotSupported:
self.skipTest
self.virtual_machine.stop(self.apiclient)
self.virtual_machine.start(self.apiclient)
self.verifyVirshState(2)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_03_verify_libvirt_attach_disk(self):
""" Verify that libvirt settings are expected after a disk add
"""
if self.hypervisorNotSupported:
self.skipTest
self.volume = Volume.create(
self.apiclient,
self.services,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.sparse_disk_offering.id
)
self.virtual_machine.attach_volume(
self.apiclient,
self.volume
)
self.verifyVirshState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_verify_guest_lspci(self):
""" Verify that guest sees scsi controller and disks
"""
if self.hypervisorNotSupported:
self.skipTest
self.verifyGuestState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_05_change_vm_ostype_restart(self):
""" Update os type to Ubuntu, change vm details rootdiskController
explicitly to scsi.
"""
if self.hypervisorNotSupported:
self.skipTest
self.virtual_machine.stop(self.apiclient)
ostypes = GuestOs.listMapping(self.apiclient, hypervisor="kvm")
self.assertTrue(len(ostypes) > 0)
ostypeid = None
for ostype in ostypes:
if ostype.osdisplayname == "Ubuntu 16.04 (64-bit)":
ostypeid = ostype.ostypeid
break
self.assertIsNotNone(ostypeid,
"Could not find ostypeid for Ubuntu 16.0.4 (64-bit) mapped to kvm")
self.virtual_machine.update(self.apiclient, ostypeid=ostypeid,
details=[{"rootDiskController":"scsi"}])
self.virtual_machine.start(self.apiclient)
self.verifyVirshState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_06_verify_guest_lspci_again(self):
""" Verify that guest sees scsi controller and disks after switching ostype and rdc
"""
if self.hypervisorNotSupported:
self.skipTest
self.verifyGuestState(3)
class CommandNonzeroException(Exception):
def __init__(self, code, stderr):
self.code = code
self.stderr = stderr
def __str__(self):
return "Status code %d: %s" % (self.code, self.stderr)

View File

@ -32,7 +32,6 @@ import time
import hashlib
import base64
class Domain:
""" Domain Life Cycle """
def __init__(self, items):
@ -449,7 +448,8 @@ class VirtualMachine:
affinitygroupnames=None, affinitygroupids=None, group=None,
hostid=None, keypair=None, ipaddress=None, mode='default',
method='GET', hypervisor=None, customcpunumber=None,
customcpuspeed=None, custommemory=None, rootdisksize=None):
customcpuspeed=None, custommemory=None, rootdisksize=None,
rootdiskcontroller=None):
"""Create the instance"""
cmd = deployVirtualMachine.deployVirtualMachineCmd()
@ -553,6 +553,9 @@ class VirtualMachine:
if rootdisksize >= 0:
cmd.details[0]["rootdisksize"] = rootdisksize
if rootdiskcontroller:
cmd.details[0]["rootDiskController"] = rootdiskcontroller
if group:
cmd.group = group
@ -2264,6 +2267,33 @@ class SnapshotPolicy:
cmd.listall = True
return(apiclient.listSnapshotPolicies(cmd))
class GuestOs:
"""Guest OS calls (currently read-only implemented)"""
def __init(self, items):
self.__dict__.update(items)
@classmethod
def listMapping(cls, apiclient, **kwargs):
"""List all Guest Os Mappings matching criteria"""
cmd = listGuestOsMapping.listGuestOsMappingCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
return (apiclient.listGuestOsMapping(cmd))
@classmethod
def listCategories(cls, apiclient, **kwargs):
"""List all Os Categories"""
[setattr(cmd, k, v) for k, v in kwargs.items()]
return (apiclient.listOsCategories(cmd))
@classmethod
def list(cls, apiclient, **kwargs):
"""List all Os Types matching criteria"""
cmd = listOsTypes.listOsTypesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
return(apiclient.listOsTypes(cmd))
class Hypervisor:
"""Manage Hypervisor"""

View File

@ -217,21 +217,30 @@
$form.find('.form-item[rel=rootDiskControllerType]').css('display', 'inline-block');
$form.find('.form-item[rel=nicAdapterType]').css('display', 'inline-block');
$form.find('.form-item[rel=keyboardType]').css('display', 'inline-block');
$form.find('.form-item[rel=xenserverToolsVersion61plus]').hide();
$form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide();
} else if ($(this).val() == "XenServer") {
$form.find('.form-item[rel=rootDiskControllerType]').hide();
$form.find('.form-item[rel=nicAdapterType]').hide();
$form.find('.form-item[rel=keyboardType]').hide();
$form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide();
if (isAdmin())
$form.find('.form-item[rel=xenserverToolsVersion61plus]').css('display', 'inline-block');
} else if ($(this).val() == "KVM") {
$form.find('.form-item[rel=rootDiskControllerType]').hide();
$form.find('.form-item[rel=nicAdapterType]').hide();
$form.find('.form-item[rel=keyboardType]').hide();
$form.find('.form-item[rel=xenserverToolsVersion61plus]').hide();
$form.find('.form-item[rel=rootDiskControllerTypeKVM]').css('display', 'inline-block');
} else {
$form.find('.form-item[rel=rootDiskControllerType]').hide();
$form.find('.form-item[rel=nicAdapterType]').hide();
$form.find('.form-item[rel=keyboardType]').hide();
$form.find('.form-item[rel=xenserverToolsVersion61plus]').hide();
$form.find('.form-item[rel=rootDiskControllerTypeKVM]').hide();
}
});
@ -263,6 +272,38 @@
isHidden: true
},
//fields for hypervisor == "KVM" (starts here)
rootDiskControllerTypeKVM: {
label: 'label.root.disk.controller',
isHidden: true,
select: function(args) {
var items = []
items.push({
id: "",
description: ""
});
items.push({
id: "ide",
description: "ide"
});
items.push({
id: "osdefault",
description: "osdefault"
});
items.push({
id: "scsi",
description: "virtio-scsi"
});
items.push({
id: "virtio",
description: "virtio"
});
args.response.success({
data: items
});
}
},
//fields for hypervisor == "VMware" (starts here)
rootDiskControllerType: {
label: 'label.root.disk.controller',
@ -549,6 +590,14 @@
}
//XenServer only (ends here)
// KVM only (starts here)
if (args.$form.find('.form-item[rel=rootDiskControllerTypeKVM]').css("display") != "none" && args.data.rootDiskControllerTypeKVM != "") {
$.extend(data, {
'details[0].rootDiskController': args.data.rootDiskControllerTypeKVM
});
}
// KVM only (ends here)
//VMware only (starts here)
if (args.$form.find('.form-item[rel=rootDiskControllerType]').css("display") != "none" && args.data.rootDiskControllerType != "") {
$.extend(data, {