Making use of the new SolidFire SDK for Java

This commit is contained in:
Mike Tutkowski 2016-09-14 14:03:54 -06:00
parent 054a7178e0
commit 3475ef29f0
10 changed files with 1005 additions and 1787 deletions

View File

@ -20,6 +20,11 @@
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>com.solidfire</groupId>
<artifactId>solidfire-sdk-java</artifactId>
<version>1.2.0.29</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>

View File

@ -133,9 +133,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
private SolidFireUtil.SolidFireAccount createSolidFireAccount(SolidFireUtil.SolidFireConnection sfConnection, String sfAccountName) {
long accountNumber = SolidFireUtil.createSolidFireAccount(sfConnection, sfAccountName);
long accountNumber = SolidFireUtil.createAccount(sfConnection, sfAccountName);
return SolidFireUtil.getSolidFireAccountById(sfConnection, accountNumber);
return SolidFireUtil.getAccountById(sfConnection, accountNumber);
}
@Override
@ -189,11 +189,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
if (vagId != null) {
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, true);
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
}
else {
SolidFireUtil.placeVolumeInVolumeAccessGroup(sfConnection, sfVolumeId, storagePoolId, cluster.getUuid(), hosts, clusterDetailsDao);
@ -241,11 +241,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (vagId != null) {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
}
}
finally {
@ -370,10 +370,10 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
final Iops iops = getIops(minIops, maxIops, storagePoolId);
long sfVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeName), sfAccountId,
long sfVolumeId = SolidFireUtil.createVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeName), sfAccountId,
volumeSize, true, mapAttributes, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
return SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId);
return SolidFireUtil.getVolume(sfConnection, sfVolumeId);
}
private Iops getIops(Long minIops, Long maxIops, long storagePoolId) {
@ -415,7 +415,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
try {
long lVolumeId = Long.parseLong(volume.getFolder());
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, lVolumeId);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, lVolumeId);
long volumeSize = sfVolume.getTotalSize();
@ -626,7 +626,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (accountDetail == null || accountDetail.getValue() == null) {
AccountVO account = accountDao.findById(csAccountId);
String sfAccountName = SolidFireUtil.getSolidFireAccountName(account.getUuid(), account.getAccountId());
SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getSolidFireAccount(sfConnection, sfAccountName);
SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getAccount(sfConnection, sfAccountName);
if (sfAccount == null) {
sfAccount = createSolidFireAccount(sfConnection, sfAccountName);
@ -778,14 +778,14 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
" and data-object type: " + dataObjectType);
}
final long newSfVolumeId = SolidFireUtil.createSolidFireClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfNewVolumeName,
final long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfNewVolumeName,
getVolumeAttributes(volumeInfo));
final Iops iops = getIops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), storagePoolId);
SolidFireUtil.modifySolidFireVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
SolidFireUtil.modifyVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
return SolidFireUtil.getSolidFireVolume(sfConnection, newSfVolumeId);
return SolidFireUtil.getVolume(sfConnection, newSfVolumeId);
}
private Map<String, String> getVolumeAttributes(VolumeInfo volumeInfo) {
@ -824,9 +824,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
long sfSnapshotId = Long.parseLong(snapshotDetails.getValue());
SolidFireUtil.SolidFireSnapshot sfSnapshot = SolidFireUtil.getSolidFireSnapshot(sfConnection, sfVolumeId, sfSnapshotId);
SolidFireUtil.SolidFireSnapshot sfSnapshot = SolidFireUtil.getSnapshot(sfConnection, sfVolumeId, sfSnapshotId);
long newSfVolumeId = SolidFireUtil.createSolidFireClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfSnapshot.getName(), null);
long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfSnapshot.getName(), null);
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.STORAGE_POOL_ID);
@ -834,9 +834,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
final Iops iops = getIops(MIN_IOPS_FOR_TEMP_VOLUME, MAX_IOPS_FOR_TEMP_VOLUME, storagePoolId);
SolidFireUtil.modifySolidFireVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
SolidFireUtil.modifyVolume(sfConnection, newSfVolumeId, null, null, iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
return SolidFireUtil.getSolidFireVolume(sfConnection, newSfVolumeId);
return SolidFireUtil.getVolume(sfConnection, newSfVolumeId);
}
private void updateVolumeDetails(long volumeId, long sfVolumeSize) {
@ -900,7 +900,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
@ -928,7 +928,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
String sfNewSnapshotName = volumeInfo.getName() + "-" + snapshotInfo.getUuid();
long sfNewSnapshotId = SolidFireUtil.createSolidFireSnapshot(sfConnection, sfVolumeId, sfNewSnapshotName, getSnapshotAttributes(snapshotInfo));
long sfNewSnapshotId = SolidFireUtil.createSnapshot(sfConnection, sfVolumeId, sfNewSnapshotName, getSnapshotAttributes(snapshotInfo));
updateSnapshotDetails(snapshotInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize);
@ -941,10 +941,10 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
final Iops iops = getIops(MIN_IOPS_FOR_SNAPSHOT_VOLUME, MAX_IOPS_FOR_SNAPSHOT_VOLUME, storagePoolId);
long sfNewVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, sfNewVolumeName, sfVolume.getAccountId(), sfVolumeSize,
long sfNewVolumeId = SolidFireUtil.createVolume(sfConnection, sfNewVolumeName, sfVolume.getAccountId(), sfVolumeSize,
sfVolume.isEnable512e(), getSnapshotAttributes(snapshotInfo), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfNewVolumeId);
SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getVolume(sfConnection, sfNewVolumeId);
updateSnapshotDetails(snapshotInfo.getId(), sfNewVolumeId, storagePoolId, sfVolumeSize, sfNewVolume.getIqn());
@ -1080,11 +1080,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (volumeSize > sfVolume.getTotalSize()) {
// Expand the volume to include HSR.
SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolume.getId(), volumeSize, getVolumeAttributes(volumeInfo),
SolidFireUtil.modifyVolume(sfConnection, sfVolume.getId(), volumeSize, getVolumeAttributes(volumeInfo),
sfVolume.getMinIops(), sfVolume.getMaxIops(), sfVolume.getBurstIops());
// Get the SolidFire volume from the SAN again because we just updated its size.
sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolume.getId());
sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolume.getId());
}
}
else {
@ -1143,7 +1143,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
else if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("delete")) {
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID);
SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(snapshotDetails.getValue()));
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(snapshotDetails.getValue()));
removeTempVolumeId(csSnapshotId);
@ -1200,7 +1200,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
VolumeVO volumeVO = volumeDao.findById(volumeId);
SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volumeVO.getFolder()));
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volumeVO.getFolder()));
volumeVO.setFolder(String.valueOf(sfVolumeId));
volumeVO.set_iScsiName(iqn);
@ -1217,7 +1217,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'.");
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfVolumeId);
volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID);
volumeDetailsDao.removeDetail(volumeId, BASIC_IQN);
@ -1279,7 +1279,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
long sfVolumeId = Long.parseLong(snapshotDetails.getValue());
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfVolumeId);
}
snapshotDetailsDao.removeDetails(csSnapshotId);
@ -1306,7 +1306,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
long sfTemplateVolumeId = getVolumeIdFrom_iScsiPath(template.getInstallPath());
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfTemplateVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfTemplateVolumeId);
VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, template.getId());
@ -1346,7 +1346,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload();
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
verifySufficientIopsForStoragePool(storagePoolId, volumeInfo.getId(), payload.newMinIops);
verifySufficientBytesForStoragePool(storagePoolId, volumeInfo.getId(), payload.newSize, payload.newHypervisorSnapshotReserve);
@ -1375,7 +1375,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId()));
mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize));
SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes,
SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes,
payload.newMinIops, payload.newMaxIops, getDefaultBurstIops(storagePoolId, payload.newMaxIops));
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
@ -1481,12 +1481,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
if (deleteVolume) {
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfVolumeId);
}
}
private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnection, long csSnapshotId, long sfSnapshotId) {
SolidFireUtil.deleteSolidFireSnapshot(sfConnection, sfSnapshotId);
SolidFireUtil.deleteSnapshot(sfConnection, sfSnapshotId);
SnapshotVO snapshot = snapshotDao.findById(csSnapshotId);
VolumeVO volume = volumeDao.findById(snapshot.getVolumeId());
@ -1513,7 +1513,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (lstSnapshots2.isEmpty()) {
volume = volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId());
SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volume.getFolder()));
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volume.getFolder()));
}
}
}

View File

@ -261,7 +261,7 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
long sfTemplateVolumeId = Long.parseLong(templatePoolRef.getLocalDownloadPath());
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfTemplateVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfTemplateVolumeId);
}
catch (Exception ex) {
s_logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume");

View File

@ -343,17 +343,17 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
String sfAccountName = SolidFireUtil.getSolidFireAccountName(accountVo.getUuid(), csAccountId);
SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getSolidFireAccount(sfConnection, sfAccountName);
SolidFireUtil.SolidFireAccount sfAccount = SolidFireUtil.getAccount(sfConnection, sfAccountName);
if (sfAccount == null) {
long sfAccountNumber = SolidFireUtil.createSolidFireAccount(sfConnection, sfAccountName);
long sfAccountNumber = SolidFireUtil.createAccount(sfConnection, sfAccountName);
sfAccount = SolidFireUtil.getSolidFireAccountById(sfConnection, sfAccountNumber);
sfAccount = SolidFireUtil.getAccountById(sfConnection, sfAccountNumber);
}
long sfVolumeId = SolidFireUtil.createSolidFireVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeName), sfAccount.getId(), volumeSize,
long sfVolumeId = SolidFireUtil.createVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(volumeName), sfAccount.getId(), volumeSize,
true, null, minIops, maxIops, burstIops);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
return new SolidFireCreateVolume(sfVolume, sfAccount);
} catch (Throwable e) {
@ -596,11 +596,11 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
if (vagId != null) {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao);
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getSolidFireVag(sfConnection, Long.parseLong(vagId));
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
long[] volumeIds = SolidFireUtil.getNewVolumeIds(sfVag.getVolumeIds(), sfVolumeId, false);
SolidFireUtil.modifySolidFireVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), sfVag.getInitiators(), volumeIds);
}
}
@ -609,7 +609,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
long sfVolumeId = getVolumeId(storagePoolId);
SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
SolidFireUtil.deleteVolume(sfConnection, sfVolumeId);
}
private long getVolumeId(long storagePoolId) {
@ -691,7 +691,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
}
}
SolidFireUtil.modifySolidFireVolume(sfConnection, getVolumeId(storagePool.getId()), size, null, minIops, maxIops, burstIops);
SolidFireUtil.modifyVolume(sfConnection, getVolumeId(storagePool.getId()), size, null, minIops, maxIops, burstIops);
SolidFireUtil.updateCsDbWithSolidFireIopsInfo(storagePool.getId(), _primaryDataStoreDao, _storagePoolDetailsDao, minIops, maxIops, burstIops);
}

View File

@ -42,14 +42,19 @@ from marvin.lib.utils import cleanup_resources
# Prerequisites:
# Only one zone
# Only one pod
# Only one cluster (two hosts with another added/removed during the tests)
# Only one cluster (two hosts for XenServer / one host for KVM with another added/removed during the tests)
#
# Running the tests:
# Set a breakpoint on each test after the first one. When the breakpoint is hit, reset the third
# Change the "hypervisor_type" variable to control which hypervisor type to test.
# If using XenServer, set a breakpoint on each test after the first one. When the breakpoint is hit, reset the added/removed
# host to a snapshot state and re-start it. Once it's up and running, run the test code.
# Check that ip_address_of_new_xenserver_host / ip_address_of_new_kvm_host is correct.
# If using XenServer, verify the "xen_server_master_hostname" variable is correct.
# If using KVM, verify the "kvm_1_ip_address" variable is correct.
class TestData:
#constants
account = "account"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
@ -59,9 +64,11 @@ class TestData:
diskSize = "disksize"
domainId = "domainId"
hypervisor = "hypervisor"
kvm = "kvm"
mvip = "mvip"
name = "name"
newHost = "newHost"
newXenServerHost = "newXenServerHost"
newKvmHost = "newKvmHost"
newHostDisplayName = "newHostDisplayName"
password = "password"
podId = "podid"
@ -75,7 +82,6 @@ class TestData:
storageTag2 = "SolidFire_Volume_1"
tags = "tags"
url = "url"
urlOfNewHost = "urlOfNewHost"
user = "user"
username = "username"
virtualMachine = "virtualmachine"
@ -83,6 +89,13 @@ class TestData:
xenServer = "xenserver"
zoneId = "zoneid"
# modify to control which hypervisor type to test
hypervisor_type = xenServer
xen_server_master_hostname = "XenServer-6.5-1"
kvm_1_ip_address = "192.168.129.84"
ip_address_of_new_xenserver_host = "192.168.129.243"
ip_address_of_new_kvm_host = "192.168.129.3"
def __init__(self):
self.testdata = {
TestData.solidFire: {
@ -92,11 +105,14 @@ class TestData:
TestData.port: 443,
TestData.url: "https://192.168.139.112:443"
},
TestData.kvm: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.xenServer: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.urlOfNewHost: "https://192.168.129.243",
TestData.account: {
"email": "test@test.com",
"firstname": "John",
@ -111,10 +127,17 @@ class TestData:
TestData.username: "testuser",
TestData.password: "password"
},
TestData.newHost: {
TestData.newXenServerHost: {
TestData.username: "root",
TestData.password: "solidfire",
TestData.url: "http://192.168.129.243",
TestData.url: "http://" + TestData.ip_address_of_new_xenserver_host,
TestData.podId : "1",
TestData.zoneId: "1"
},
TestData.newKvmHost: {
TestData.username: "root",
TestData.password: "solidfire",
TestData.url: "http://" + TestData.ip_address_of_new_kvm_host,
TestData.podId : "1",
TestData.zoneId: "1"
},
@ -164,9 +187,6 @@ class TestData:
TestData.volume_1: {
"diskname": "testvolume",
},
"volume2": {
"diskname": "testvolume2",
},
TestData.newHostDisplayName: "XenServer-6.5-3",
TestData.zoneId: 1,
TestData.clusterId: 1,
@ -190,16 +210,10 @@ class TestAddRemoveHosts(cloudstackTestCase):
cls.testdata = TestData().testdata
cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
if TestData.hypervisor_type == TestData.xenServer:
cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_master_hostname)[0].ipaddress
# Set up XenAPI connection
host_ip = "https://" + cls.xs_pool_master_ip
cls.xen_session = XenAPI.Session(host_ip)
xenserver = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
cls._connect_to_hypervisor()
# Set up SolidFire connection
solidfire = cls.testdata[TestData.solidFire]
@ -296,13 +310,20 @@ class TestAddRemoveHosts(cloudstackTestCase):
startvm=True
)
root_volume = self._get_root_volume(self.virtual_machine)
if TestData.hypervisor_type == TestData.xenServer:
root_volume = self._get_root_volume(self.virtual_machine)
sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
self._perform_add_remove_xenserver_host(primary_storage.id, sf_iscsi_name)
elif TestData.hypervisor_type == TestData.kvm:
self._perform_add_remove_kvm_host(primary_storage.id)
else:
self.assertTrue(False, "Invalid hypervisor type")
def test_add_remove_host_with_solidfire_plugin_2(self):
if TestData.hypervisor_type != TestData.xenServer:
return
primarystorage2 = self.testdata[TestData.primaryStorage2]
primary_storage_2 = StoragePool.create(
@ -322,9 +343,12 @@ class TestAddRemoveHosts(cloudstackTestCase):
sf_iscsi_name = self._get_iqn_2(primary_storage_2)
self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
self._perform_add_remove_xenserver_host(primary_storage_2.id, sf_iscsi_name)
def test_add_remove_host_with_solidfire_plugin_3(self):
if TestData.hypervisor_type != TestData.xenServer:
return
primarystorage = self.testdata[TestData.primaryStorage]
primary_storage = StoragePool.create(
@ -373,9 +397,12 @@ class TestAddRemoveHosts(cloudstackTestCase):
self.cleanup.append(primary_storage_2)
self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
self._perform_add_remove_xenserver_host(primary_storage.id, sf_iscsi_name)
def test_add_remove_host_with_solidfire_plugin_4(self):
if TestData.hypervisor_type != TestData.xenServer:
return
primarystorage2 = self.testdata[TestData.primaryStorage2]
primary_storage_2 = StoragePool.create(
@ -422,9 +449,9 @@ class TestAddRemoveHosts(cloudstackTestCase):
startvm=True
)
self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
self._perform_add_remove_xenserver_host(primary_storage_2.id, sf_iscsi_name)
def _perform_add_remove_host(self, primary_storage_id, sr_name):
def _perform_add_remove_xenserver_host(self, primary_storage_id, sr_name):
xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sr_name)[0]
pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)
@ -435,7 +462,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
sf_vag_id = self._get_sf_vag_id(self.cluster.id, primary_storage_id)
host_iscsi_iqns = self._get_host_iscsi_iqns()
host_iscsi_iqns = self._get_xenserver_host_iscsi_iqns()
sf_vag = self._get_sf_vag(sf_vag_id)
@ -445,7 +472,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
sf_vag_initiators_len_orig = len(sf_vag_initiators)
xen_session = XenAPI.Session(self.testdata[TestData.urlOfNewHost])
xen_session = XenAPI.Session("https://" + TestData.ip_address_of_new_xenserver_host)
xenserver = self.testdata[TestData.xenServer]
@ -482,7 +509,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
host = Host.create(
self.apiClient,
self.cluster,
self.testdata[TestData.newHost],
self.testdata[TestData.newXenServerHost],
hypervisor="XenServer"
)
@ -501,7 +528,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
self._verify_all_pbds_attached(pbds)
host_iscsi_iqns = self._get_host_iscsi_iqns()
host_iscsi_iqns = self._get_xenserver_host_iscsi_iqns()
sf_vag = self._get_sf_vag(sf_vag_id)
@ -529,7 +556,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
self._verify_all_pbds_attached(pbds)
host_iscsi_iqns = self._get_host_iscsi_iqns()
host_iscsi_iqns = self._get_xenserver_host_iscsi_iqns()
sf_vag = self._get_sf_vag(sf_vag_id)
@ -557,7 +584,81 @@ class TestAddRemoveHosts(cloudstackTestCase):
self._verify_all_pbds_attached(pbds)
host_iscsi_iqns = self._get_host_iscsi_iqns()
host_iscsi_iqns = self._get_xenserver_host_iscsi_iqns()
sf_vag = self._get_sf_vag(sf_vag_id)
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
sf_vag_initiators_len_new = len(sf_vag_initiators)
self.assertEqual(
sf_vag_initiators_len_new,
sf_vag_initiators_len_orig,
"sf_vag_initiators_len_new' != sf_vag_initiators_len_orig"
)
def _perform_add_remove_kvm_host(self, primary_storage_id):
sf_vag_id = self._get_sf_vag_id(self.cluster.id, primary_storage_id)
kvm_login = self.testdata[TestData.kvm]
kvm_hosts = []
kvm_hosts.append(TestData.kvm_1_ip_address)
host_iscsi_iqns = self._get_kvm_host_iscsi_iqns(kvm_hosts, kvm_login[TestData.username], kvm_login[TestData.password])
sf_vag = self._get_sf_vag(sf_vag_id)
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
sf_vag_initiators_len_orig = len(sf_vag_initiators)
host = Host.create(
self.apiClient,
self.cluster,
self.testdata[TestData.newKvmHost],
hypervisor="KVM"
)
self.assertTrue(
isinstance(host, Host),
"'host' is not a 'Host'."
)
kvm_hosts = []
kvm_hosts.append(TestData.kvm_1_ip_address)
kvm_hosts.append(TestData.ip_address_of_new_kvm_host)
host_iscsi_iqns = self._get_kvm_host_iscsi_iqns(kvm_hosts, kvm_login[TestData.username], kvm_login[TestData.password])
sf_vag = self._get_sf_vag(sf_vag_id)
sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)
self._verifyVag(host_iscsi_iqns, sf_vag_initiators)
sf_vag_initiators_len_new = len(sf_vag_initiators)
self.assertEqual(
sf_vag_initiators_len_new,
sf_vag_initiators_len_orig + 1,
"sf_vag_initiators_len_new' != sf_vag_initiators_len_orig + 1"
)
host.delete(self.apiClient)
kvm_hosts = []
kvm_hosts.append(TestData.kvm_1_ip_address)
host_iscsi_iqns = self._get_kvm_host_iscsi_iqns(kvm_hosts, kvm_login[TestData.username], kvm_login[TestData.password])
sf_vag = self._get_sf_vag(sf_vag_id)
@ -616,7 +717,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
return sql_result[0][0]
def _get_host_iscsi_iqns(self):
def _get_xenserver_host_iscsi_iqns(self):
hosts = self.xen_session.xenapi.host.get_all()
self.assertEqual(
@ -628,15 +729,41 @@ class TestAddRemoveHosts(cloudstackTestCase):
host_iscsi_iqns = []
for host in hosts:
host_iscsi_iqns.append(self._get_host_iscsi_iqn(host))
host_iscsi_iqns.append(self._get_xenserver_host_iscsi_iqn(host))
return host_iscsi_iqns
def _get_host_iscsi_iqn(self, host):
def _get_xenserver_host_iscsi_iqn(self, host):
other_config = self.xen_session.xenapi.host.get_other_config(host)
return other_config["iscsi_iqn"]
def _get_kvm_host_iscsi_iqns(self, kvm_ip_addresses, common_username, common_password):
host_iscsi_iqns = []
for kvm_ip_address in kvm_ip_addresses:
host_iscsi_iqn = self._get_kvm_iqn(kvm_ip_address, common_username, common_password)
host_iscsi_iqns.append(host_iscsi_iqn)
return host_iscsi_iqns
def _get_kvm_iqn(self, ip_address, username, password):
ssh_connection = sf_util.get_ssh_connection(ip_address, username, password)
searchFor = "InitiatorName="
stdin, stdout, stderr = ssh_connection.exec_command("sudo grep " + searchFor + " /etc/iscsi/initiatorname.iscsi")
result = stdout.read()
ssh_connection.close()
self.assertFalse(result is None, "Unable to locate the IQN of the KVM host (None)")
self.assertFalse(len(result.strip()) == 0, "Unable to locate the IQN of the KVM host (Zero-length string)")
return result[len(searchFor):].strip()
def _get_sf_vag_id(self, cluster_id, primary_storage_id):
# Get SF Volume Access Group ID
sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
@ -679,3 +806,18 @@ class TestAddRemoveHosts(cloudstackTestCase):
for host_iscsi_iqn in host_iscsi_iqns:
# an error should occur if host_iscsi_iqn is not in sf_vag_initiators
sf_vag_initiators.index(host_iscsi_iqn)
@classmethod
def _connect_to_hypervisor(cls):
if TestData.hypervisor_type == TestData.kvm:
pass
elif TestData.hypervisor_type == TestData.xenServer:
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_master_hostname)[0].ipaddress
cls.xen_session = XenAPI.Session(host_ip)
xen_server = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username], xen_server[TestData.password])

View File

@ -29,8 +29,6 @@ from marvin.cloudstackAPI import destroySystemVm
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
from nose.plugins.attrib import attr
# Import Integration Libraries
# base - contains all resources as entities and defines create, delete, list operations on them
@ -50,12 +48,17 @@ from marvin.lib.utils import cleanup_resources, wait_until
# * Only one secondary storage VM and one console proxy VM running on NFS (no virtual router or user VMs exist)
# * Only one pod
# * Only one cluster
# * Set storage.cleanup.enabled to true
# * Set storage.cleanup.interval to 150
# * Set storage.cleanup.delay to 60
#
# Running the tests:
# Change the "hypervisor_type" variable to control which hypervisor type to test.
# If using XenServer, verify the "xen_server_hostname" variable is correct.
# Set the Global Setting "storage.cleanup.enabled" to true.
# Set the Global Setting "storage.cleanup.interval" to 150.
# Set the Global Setting "storage.cleanup.delay" to 60.
class TestData():
# constants
account = "account"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
@ -66,6 +69,7 @@ class TestData():
email = "email"
firstname = "firstname"
hypervisor = "hypervisor"
kvm = "kvm"
lastname = "lastname"
max_iops = "maxiops"
min_iops = "miniops"
@ -88,6 +92,10 @@ class TestData():
xenServer = "xenserver"
zoneId = "zoneid"
# modify to control which hypervisor type to test
hypervisor_type = kvm
xen_server_hostname = "XenServer-6.5-1"
def __init__(self):
self.testdata = {
TestData.solidFire: {
@ -97,6 +105,10 @@ class TestData():
TestData.port: 443,
TestData.url: "https://192.168.139.112:443"
},
TestData.kvm: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.xenServer: {
TestData.username: "root",
TestData.password: "solidfire"
@ -203,16 +215,7 @@ class TestManagedSystemVMs(cloudstackTestCase):
cls.testdata = TestData().testdata
# Set up xenAPI connection
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
# Set up XenAPI connection
cls.xen_session = XenAPI.Session(host_ip)
xenserver = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
cls._connect_to_hypervisor()
# Set up SolidFire connection
solidfire = cls.testdata[TestData.solidFire]
@ -306,7 +309,6 @@ class TestManagedSystemVMs(cloudstackTestCase):
except Exception as e:
logging.debug("Exception in tearDownClass(self): %s" % e)
@attr(hypervisor='XenServer')
def test_01_create_system_vms_on_managed_storage(self):
self._disable_zone_and_delete_system_vms(None, False)
@ -387,7 +389,6 @@ class TestManagedSystemVMs(cloudstackTestCase):
self._wait_for_and_get_running_system_vms(2)
@attr(hypervisor='XenServer')
def test_02_failure_to_create_service_offering_with_customized_iops(self):
try:
ServiceOffering.create(
@ -477,9 +478,19 @@ class TestManagedSystemVMs(cloudstackTestCase):
"The volume should not be in a volume access group."
)
sr_name = sf_util.format_iqn(sf_root_volume.iqn)
if TestData.hypervisor_type == TestData.xenServer:
sr_name = sf_util.format_iqn(sf_root_volume.iqn)
sf_util.check_xen_sr(sr_name, self.xen_session, self, False)
sf_util.check_xen_sr(sr_name, self.xen_session, self, False)
elif TestData.hypervisor_type == TestData.kvm:
list_hosts_response = list_hosts(
self.apiClient,
type="Routing"
)
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm], self, False)
else:
self.assertTrue(False, "Invalid hypervisor type")
def _wait_for_and_get_running_system_vms(self, expected_number_of_system_vms):
retry_interval = 60
@ -523,9 +534,19 @@ class TestManagedSystemVMs(cloudstackTestCase):
sf_util.check_vag(sf_root_volume, sf_vag_id, self)
sr_name = sf_util.format_iqn(sf_root_volume.iqn)
if TestData.hypervisor_type == TestData.xenServer:
sr_name = sf_util.format_iqn(sf_root_volume.iqn)
sf_util.check_xen_sr(sr_name, self.xen_session, self)
sf_util.check_xen_sr(sr_name, self.xen_session, self)
elif TestData.hypervisor_type == TestData.kvm:
list_hosts_response = list_hosts(
self.apiClient,
type="Routing"
)
sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm], self)
else:
self.assertTrue(False, "Invalid hypervisor type")
def _check_iops_against_iops_of_system_offering(self, cs_volume, system_offering):
self.assertEqual(
@ -586,3 +607,17 @@ class TestManagedSystemVMs(cloudstackTestCase):
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
self.dbConnection.execute(sql_query)
@classmethod
def _connect_to_hypervisor(cls):
if TestData.hypervisor_type == TestData.kvm:
pass
elif TestData.hypervisor_type == TestData.xenServer:
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_hostname)[0].ipaddress
cls.xen_session = XenAPI.Session(host_ip)
xen_server = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username], xen_server[TestData.password])

View File

@ -45,9 +45,14 @@ from marvin.lib.utils import cleanup_resources, wait_until
# Only one zone
# Only one pod
# Only one cluster
#
# Running the tests:
# Change the "hypervisor_type" variable to control which hypervisor type to test.
# If using KVM, set the Global Setting "kvm.snapshot.enabled" equal to true.
class TestData():
# constants
account = "account"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
@ -57,6 +62,7 @@ class TestData():
diskOffering = "diskoffering"
domainId = "domainId"
hypervisor = "hypervisor"
kvm = "kvm"
mvip = "mvip"
password = "password"
port = "port"
@ -75,6 +81,9 @@ class TestData():
xenServer = "xenserver"
zoneId = "zoneId"
# modify to control which hypervisor type to test
hypervisor_type = xenServer
def __init__(self):
self.testdata = {
TestData.solidFire: {
@ -84,31 +93,6 @@ class TestData():
TestData.port: 443,
TestData.url: "https://192.168.139.112:443"
},
TestData.xenServer: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.account: {
"email": "test@test.com",
"firstname": "John",
"lastname": "Doe",
"username": "test",
"password": "test"
},
"testaccount": {
"email": "test2@test2.com",
"firstname": "Jane",
"lastname": "Doe",
TestData.username: "test2",
TestData.password: "test"
},
TestData.user: {
"email": "user@test.com",
"firstname": "Jane",
"lastname": "Doe",
TestData.username: "testuser",
TestData.password: "password"
},
TestData.primaryStorage: {
"name": "SolidFire-%d" % random.randint(0, 100),
TestData.scope: "ZONE",
@ -122,6 +106,20 @@ class TestData():
TestData.capacityBytes: 2251799813685248,
TestData.hypervisor: "Any"
},
TestData.account: {
"email": "test@test.com",
"firstname": "John",
"lastname": "Doe",
"username": "test",
"password": "test"
},
TestData.user: {
"email": "user@test.com",
"firstname": "Jane",
"lastname": "Doe",
TestData.username: "testuser",
TestData.password: "password"
},
TestData.virtualMachine: {
"name": "TestVM",
"displayname": "Test VM"
@ -150,71 +148,6 @@ class TestData():
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"testdiskofferings": {
"customiopsdo": {
"name": "SF_Custom_Iops_DO",
"displaytext": "Customized Iops DO",
"disksize": 128,
"customizediops": True,
"miniops": 500,
"maxiops": 1000,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"customsizedo": {
"name": "SF_Custom_Size_DO",
"displaytext": "Customized Size DO",
"disksize": 175,
"customizediops": False,
"miniops": 500,
"maxiops": 1000,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"customsizeandiopsdo": {
"name": "SF_Custom_Iops_Size_DO",
"displaytext": "Customized Size and Iops DO",
"disksize": 200,
"customizediops": True,
"miniops": 400,
"maxiops": 800,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"newiopsdo": {
"name": "SF_New_Iops_DO",
"displaytext": "New Iops (min=350, max = 700)",
"disksize": 128,
"miniops": 350,
"maxiops": 700,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"newsizedo": {
"name": "SF_New_Size_DO",
"displaytext": "New Size: 175",
"disksize": 175,
"miniops": 400,
"maxiops": 800,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
},
"newsizeandiopsdo": {
"name": "SF_New_Size_Iops_DO",
"displaytext": "New Size and Iops",
"disksize": 200,
"miniops": 200,
"maxiops": 400,
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag,
"storagetype": "shared"
}
},
TestData.volume_1: {
TestData.diskName: "test-volume",
},
@ -256,17 +189,6 @@ class TestSnapshots(cloudstackTestCase):
cls.testdata = TestData().testdata
# Set up xenAPI connection
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
# Set up XenAPI connection
cls.xen_session = XenAPI.Session(host_ip)
xenserver = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
# Set up SolidFire connection
solidfire = cls.testdata[TestData.solidFire]
@ -349,9 +271,9 @@ class TestSnapshots(cloudstackTestCase):
def tearDown(self):
cleanup_resources(self.apiClient, self.cleanup)
@attr(hypervisor='XenServer')
def test_01_create_volume_snapshot_using_sf_snapshot(self):
sf_util.set_supports_resign(True, self.dbConnection)
if TestData.hypervisor_type == TestData.xenServer:
sf_util.set_supports_resign(True, self.dbConnection)
virtual_machine = VirtualMachine.create(
self.apiClient,
@ -599,6 +521,9 @@ class TestSnapshots(cloudstackTestCase):
@attr(hypervisor='XenServer')
def test_02_create_volume_snapshot_using_sf_volume(self):
if TestData.hypervisor_type != TestData.xenServer:
return
sf_util.set_supports_resign(False, self.dbConnection)
virtual_machine = VirtualMachine.create(
@ -955,6 +880,9 @@ class TestSnapshots(cloudstackTestCase):
@attr(hypervisor='XenServer')
def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self):
if TestData.hypervisor_type != TestData.xenServer:
return
sf_util.set_supports_resign(False, self.dbConnection)
virtual_machine = VirtualMachine.create(
@ -1185,6 +1113,9 @@ class TestSnapshots(cloudstackTestCase):
@attr(hypervisor='XenServer')
def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self):
if TestData.hypervisor_type != TestData.xenServer:
return
sf_util.set_supports_resign(True, self.dbConnection)
virtual_machine = VirtualMachine.create(
@ -1439,6 +1370,72 @@ class TestSnapshots(cloudstackTestCase):
sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
@attr(hypervisor='KVM')
def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapshot(self):
if TestData.hypervisor_type != TestData.kvm:
return
virtual_machine = VirtualMachine.create(
self.apiClient,
self.testdata[TestData.virtualMachine],
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
)
list_volumes_response = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine.id,
listall=True
)
sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
# Get volume information from SolidFire cluster
sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
virtual_machine.stop(self.apiClient, False)
Volume.revertToSnapshot(self.apiClient, vol_snap_1.id)
virtual_machine.start(self.apiClient)
try:
Volume.revertToSnapshot(self.apiClient, vol_snap_1.id)
self.assertTrue(False, "An exception should have been thrown when trying to revert a volume to a snapshot and the volume is attached to a running VM.")
except:
pass
self._delete_and_test_snapshot(vol_snap_2)
self._delete_and_test_snapshot(vol_snap_1)
virtual_machine.delete(self.apiClient, True)
def _check_list_not_empty(self, in_list):
self.assertEqual(
isinstance(in_list, list),
@ -1636,7 +1633,7 @@ class TestSnapshots(cloudstackTestCase):
vol_snap = Snapshot.create(
self.apiClient,
volume_id=volume_id_for_snapshot,
locationtype=2
locationtype="secondary"
)
self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
@ -1745,3 +1742,4 @@ class TestSnapshots(cloudstackTestCase):
sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)
sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg)

View File

@ -18,6 +18,7 @@
import logging
import random
import SignedAPICall
import time
import XenAPI
from solidfire.factory import ElementFactory
@ -27,8 +28,6 @@ from util import sf_util
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
from nose.plugins.attrib import attr
# Import Integration Libraries
# base - contains all resources as entities and defines create, delete, list operations on them
@ -36,7 +35,7 @@ from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool,
# common - commonly used methods for all tests are listed here
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \
list_volumes
list_volumes, list_hosts
# utils - utility classes for common cleanup, external library wrappers, etc.
from marvin.lib.utils import cleanup_resources
@ -47,10 +46,13 @@ from marvin.lib.utils import cleanup_resources
# Only one cluster
#
# Running the tests:
# Change the "supports_resign" variable to True or False as desired.
# Change the "hypervisor_type" variable to control which hypervisor type to test.
# If using XenServer, verify the "xen_server_hostname" variable is correct.
# If using XenServer, change the "supports_cloning" variable to True or False as desired.
class TestData():
# constants
account = "account"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
@ -60,6 +62,7 @@ class TestData():
diskOffering = "diskoffering"
domainId = "domainId"
hypervisor = "hypervisor"
kvm = "kvm"
login = "login"
mvip = "mvip"
password = "password"
@ -70,7 +73,8 @@ class TestData():
solidFire = "solidfire"
storageTag = "SolidFire_SAN_1"
tags = "tags"
templateCacheName = "centos56-x86-64-xen"
templateCacheNameKvm = "centos55-x86-64"
templateCacheNameXenServer = "centos56-x86-64-xen"
testAccount = "testaccount"
url = "url"
user = "user"
@ -82,6 +86,10 @@ class TestData():
xenServer = "xenserver"
zoneId = "zoneId"
# modify to control which hypervisor type to test
hypervisor_type = xenServer
xen_server_hostname = "XenServer-6.5-1"
def __init__(self):
self.testdata = {
TestData.solidFire: {
@ -91,6 +99,10 @@ class TestData():
TestData.port: 443,
TestData.url: "https://192.168.139.112:443"
},
TestData.kvm: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.xenServer: {
TestData.username: "root",
TestData.password: "solidfire"
@ -177,6 +189,7 @@ class TestData():
class TestVolumes(cloudstackTestCase):
_should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list."
_should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
_should_only_be_one_host_in_list_err_msg = "There should only be one host in this list."
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
_volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer."
_volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
@ -189,26 +202,16 @@ class TestVolumes(cloudstackTestCase):
def setUpClass(cls):
# Set up API client
testclient = super(TestVolumes, cls).getClsTestClient()
cls.apiClient = testclient.getApiClient()
cls.configData = testclient.getParsedTestDataConfig()
cls.dbConnection = testclient.getDbConnection()
cls.testdata = TestData().testdata
cls.supports_resign = True
cls._handle_supports_cloning()
sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection)
# Set up xenAPI connection
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress
# Set up XenAPI connection
cls.xen_session = XenAPI.Session(host_ip)
xenserver = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
cls._connect_to_hypervisor()
# Set up SolidFire connection
solidfire = cls.testdata[TestData.solidFire]
@ -276,9 +279,11 @@ class TestVolumes(cloudstackTestCase):
serviceofferingid=cls.compute_offering.id,
templateid=cls.template.id,
domainid=cls.domain.id,
startvm=True
startvm=False
)
TestVolumes._start_vm(cls.virtual_machine)
cls.volume = Volume.create(
cls.apiClient,
cls.testdata[TestData.volume_1],
@ -319,14 +324,13 @@ class TestVolumes(cloudstackTestCase):
cleanup_resources(self.apiClient, self.cleanup)
@attr(hypervisor='XenServer')
def test_00_check_template_cache(self):
if self.supports_resign == False:
if self._supports_cloning == False:
return
sf_volumes = self._get_active_sf_volumes()
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self)
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, self._get_template_cache_name(), self)
self.assertEqual(
len(sf_volume.volume_access_groups),
@ -346,7 +350,6 @@ class TestVolumes(cloudstackTestCase):
"The template cache volume's account does not end with '_1'."
)
@attr(hypervisor='XenServer')
def test_01_attach_new_volume_to_stopped_VM(self):
'''Attach a volume to a stopped virtual machine, then start VM'''
@ -372,7 +375,7 @@ class TestVolumes(cloudstackTestCase):
newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName])
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
vm = self._get_vm(self.virtual_machine.id)
@ -406,7 +409,7 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
# Detach volume
new_volume = self.virtual_machine.detach_volume(
@ -414,11 +417,10 @@ class TestVolumes(cloudstackTestCase):
new_volume
)
@attr(hypervisor='XenServer')
def test_02_attach_detach_attach_volume(self):
'''Attach, detach, and attach volume to a running VM'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
@ -469,7 +471,7 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
#########################################
#########################################
@ -510,7 +512,7 @@ class TestVolumes(cloudstackTestCase):
"The volume should not be in a VAG."
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, vm.hostid, False)
#######################################
#######################################
@ -547,13 +549,12 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
@attr(hypervisor='XenServer')
def test_03_attached_volume_reboot_VM(self):
'''Attach volume to running VM, then reboot.'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
@ -604,14 +605,14 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
#######################################
#######################################
# STEP 2: Reboot VM with attached vol #
#######################################
#######################################
self.virtual_machine.reboot(self.apiClient)
TestVolumes._reboot_vm(self.virtual_machine)
vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
@ -631,13 +632,12 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
@attr(hypervisor='XenServer')
def test_04_detach_volume_reboot(self):
'''Detach volume from a running VM, then reboot.'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
@ -688,7 +688,7 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
#########################################
#########################################
@ -729,7 +729,7 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, vm.hostid, False)
#######################################
#######################################
@ -753,13 +753,12 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, vm.hostid, False)
@attr(hypervisor='XenServer')
def test_05_detach_vol_stopped_VM_start(self):
'''Detach volume from a stopped VM, then start.'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
@ -810,7 +809,9 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
hostid = vm.hostid
#########################################
#########################################
@ -853,7 +854,7 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, hostid, False)
#######################################
#######################################
@ -861,7 +862,7 @@ class TestVolumes(cloudstackTestCase):
#######################################
#######################################
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
@ -877,9 +878,8 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, vm.hostid, False)
@attr(hypervisor='XenServer')
def test_06_attach_volume_to_stopped_VM(self):
'''Attach a volume to a stopped virtual machine, then start VM'''
@ -934,9 +934,12 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
if TestData.hypervisor_type == TestData.kvm:
self._check_host_side(sf_iscsi_name, None, False)
elif TestData.hypervisor_type == TestData.xenServer:
self._check_host_side(sf_iscsi_name)
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
@ -968,9 +971,8 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
@attr(hypervisor='XenServer')
def test_07_destroy_expunge_VM_with_volume(self):
'''Destroy and expunge VM with attached volume'''
@ -988,9 +990,11 @@ class TestVolumes(cloudstackTestCase):
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
startvm=False
)
TestVolumes._start_vm(test_virtual_machine)
self.volume = test_virtual_machine.attach_volume(
self.apiClient,
self.volume
@ -1032,7 +1036,9 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
hostid = vm.hostid
#######################################
#######################################
@ -1081,13 +1087,12 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, hostid, False)
@attr(hypervisor='XenServer')
def test_08_delete_volume_was_attached(self):
'''Delete volume that was attached to a VM and is detached now'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
#######################################
#######################################
@ -1147,7 +1152,7 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_vag(sf_volume, sf_vag_id, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
#######################################
#######################################
@ -1188,7 +1193,7 @@ class TestVolumes(cloudstackTestCase):
TestVolumes._volume_should_not_be_in_a_vag
)
self._check_xen_sr(sf_iscsi_name, False)
self._check_host_side(sf_iscsi_name, vm.hostid, False)
volume_to_delete_later.delete(self.apiClient)
@ -1207,11 +1212,10 @@ class TestVolumes(cloudstackTestCase):
sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
@attr(hypervisor='XenServer')
def test_09_attach_volumes_multiple_accounts(self):
'''Attach a data disk to a VM in one account and attach another data disk to a VM in another account'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
#######################################
#######################################
@ -1235,9 +1239,11 @@ class TestVolumes(cloudstackTestCase):
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
startvm=False
)
TestVolumes._start_vm(test_virtual_machine)
test_volume = Volume.create(
self.apiClient,
self.testdata[TestData.volume_2],
@ -1315,7 +1321,7 @@ class TestVolumes(cloudstackTestCase):
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
self._check_xen_sr(sf_iscsi_name)
self._check_host_side(sf_iscsi_name, vm.hostid)
sf_util.check_vag(sf_volume, sf_vag_id, self)
@ -1333,15 +1339,14 @@ class TestVolumes(cloudstackTestCase):
sf_test_iscsi_name = sf_util.get_iqn(self.cs_api, test_volume, self)
self._check_xen_sr(sf_test_iscsi_name)
self._check_host_side(sf_test_iscsi_name, test_vm.hostid)
sf_util.check_vag(sf_test_volume, sf_vag_id, self)
@attr(hypervisor='XenServer')
def test_10_attach_more_than_one_disk_to_VM(self):
'''Attach more than one disk to a VM'''
self.virtual_machine.start(self.apiClient)
TestVolumes._start_vm(self.virtual_machine)
volume_2 = Volume.create(
self.apiClient,
@ -1398,7 +1403,9 @@ class TestVolumes(cloudstackTestCase):
sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
self._check_xen_sr(sf_iscsi_name)
vm = self._get_vm(self.virtual_machine.id)
self._check_host_side(sf_iscsi_name, vm.hostid)
sf_util.check_vag(sf_volume, sf_vag_id, self)
@ -1408,20 +1415,12 @@ class TestVolumes(cloudstackTestCase):
sf_iscsi_name_2 = sf_util.get_iqn(self.cs_api, volume_2, self)
self._check_xen_sr(sf_iscsi_name_2)
self._check_host_side(sf_iscsi_name_2, vm.hostid)
sf_util.check_vag(sf_volume_2, sf_vag_id, self)
self.virtual_machine.detach_volume(self.apiClient, volume_2)
'''
@attr(hypervisor = 'XenServer')
def test_11_attach_disk_to_running_vm_change_iops(self):
Attach a disk to a running VM, then change iops
self.custom_iops_disk_offering = DiskOffering.create(
)'''
def _check_volume(self, volume, volume_name):
self.assertTrue(
volume.name.startswith(volume_name),
@ -1469,6 +1468,9 @@ class TestVolumes(cloudstackTestCase):
);
def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr):
if TestData.hypervisor_type == TestData.kvm:
return self._get_bytes_from_gb(cs_volume_size_in_gb)
lowest_hsr = 10
if hsr < lowest_hsr:
@ -1499,3 +1501,98 @@ class TestVolumes(cloudstackTestCase):
)
return sf_volumes
def _get_template_cache_name(self):
if TestData.hypervisor_type == TestData.kvm:
return TestData.templateCacheNameKvm
elif TestData.hypervisor_type == TestData.xenServer:
return TestData.templateCacheNameXenServer
self.assert_(False, "Invalid hypervisor type")
def _get_modified_iscsi_name(self, sf_iscsi_name):
sf_iscsi_name = sf_iscsi_name.replace("/", "")
return sf_iscsi_name[:-1]
def _check_host_side(self, sf_iscsi_name, vm_hostid=None, should_exist=True):
if TestData.hypervisor_type == TestData.kvm:
self._check_kvm_host_side(self._get_modified_iscsi_name(sf_iscsi_name), vm_hostid, should_exist)
elif TestData.hypervisor_type == TestData.xenServer:
self._check_xen_sr(sf_iscsi_name, should_exist)
def _check_kvm_host_side(self, sf_iscsi_name, vm_hostid, should_exist=True):
if vm_hostid is None:
list_hosts_response = list_hosts(
self.apiClient,
type="Routing"
)
else:
list_hosts_response = list_hosts(
self.apiClient,
id=vm_hostid
)
sf_util.check_list(list_hosts_response, 1, self, TestVolumes._should_only_be_one_host_in_list_err_msg)
kvm_login = self.testdata[TestData.kvm]
for cs_host in list_hosts_response:
ssh_connection = sf_util.get_ssh_connection(cs_host.ipaddress, kvm_login[TestData.username], kvm_login[TestData.password])
stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + sf_iscsi_name)
result = stdout.read()
ssh_connection.close()
if should_exist:
self.assertFalse(result is None, "Unable to locate 'by-path' field on the KVM host (None)")
self.assertFalse(len(result.strip()) <= len(sf_iscsi_name), "Unable to locate the 'by-path' field on the KVM host (Zero-length string)")
else:
self.assertTrue(result is None or len(result.strip()) == 0, "Found the 'by-path' field on the KVM host, but did not expect to")
@classmethod
def _start_vm(cls, vm):
vm.start(cls.apiClient)
# Libvirt appears to have an issue detaching a volume from a VM while the VM is booting up.
# The XML sent to update the VM seems correct, but it doesn't appear to update the XML that describes the VM.
# For KVM, just give it 90 seconds to boot up.
if TestData.hypervisor_type == TestData.kvm:
time.sleep(90)
@classmethod
def _reboot_vm(cls, vm):
vm.reboot(cls.apiClient)
# Libvirt appears to have an issue detaching a volume from a VM while the VM is booting up.
# The XML sent to update the VM seems correct, but it doesn't appear to update the XML that describes the VM.
# For KVM, just give it 90 seconds to boot up.
if TestData.hypervisor_type == TestData.kvm:
time.sleep(90)
@classmethod
def _handle_supports_cloning(cls):
if TestData.hypervisor_type == TestData.kvm:
cls._supports_cloning = True
elif TestData.hypervisor_type == TestData.xenServer:
# For XenServer, it is OK to set this to True or False depending on what you'd like tested
cls._supports_cloning = True
sf_util.set_supports_resign(cls._supports_cloning, cls.dbConnection)
@classmethod
def _connect_to_hypervisor(cls):
if TestData.hypervisor_type == TestData.kvm:
pass
elif TestData.hypervisor_type == TestData.xenServer:
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_hostname)[0].ipaddress
cls.xen_session = XenAPI.Session(host_ip)
xen_server = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username], xen_server[TestData.password])

View File

@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
import paramiko
def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
obj_assert.assertEqual(
isinstance(in_list, list),
@ -153,6 +155,26 @@ def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
else:
check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
def check_kvm_access_to_volume(iscsi_name, kvm_hosts, kvm_login, obj_assert, should_exist=True):
count = 0
for kvm_host in kvm_hosts:
ssh_connection = sf_util.get_ssh_connection(kvm_host.ipaddress, kvm_login[TestData.username], kvm_login[TestData.password])
stdin, stdout, stderr = ssh_connection.exec_command("ls /dev/disk/by-path | grep " + iscsi_name)
result = stdout.read()
ssh_connection.close()
if result is not None and len(result.strip()) > len(iscsi_name):
count = count + 1
if should_exist:
obj_assert.assertTrue(count == 1, "Only one KVM host should be connected to the following IQN: " + iscsi_name)
else:
obj_assert.assertTrue(count == 0, "No KVM host should be connected to the following IQN: " + iscsi_name)
def check_vag(sf_volume, sf_vag_id, obj_assert):
obj_assert.assertEqual(
len(sf_volume.volume_access_groups),
@ -215,3 +237,13 @@ def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):
)
return sf_volume_size
def get_ssh_connection(ip_address, username, password):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(ip_address, username=username, password=password)
return ssh_client