From d4ae1ab6b642270cfd0bfb7d0c550b4486bef7f1 Mon Sep 17 00:00:00 2001 From: Mike Tutkowski Date: Fri, 23 Sep 2016 22:47:04 -0700 Subject: [PATCH] Switched to the official SolidFire SDK for Python --- requirements.txt | 3 + .../plugins/solidfire/TestAddRemoveHosts.py | 27 +- .../plugins/solidfire/TestManagedSystemVMs.py | 588 ++++++++++++++++++ .../plugins/solidfire/TestSnapshots.py | 201 +++--- .../solidfire/TestVMMigrationWithStorage.py | 54 +- .../plugins/solidfire/TestVMSnapshots.py | 18 +- .../plugins/solidfire/TestVolumes.py | 41 +- .../plugins/solidfire/util/sf_util.py | 42 +- 8 files changed, 787 insertions(+), 187 deletions(-) create mode 100644 test/integration/plugins/solidfire/TestManagedSystemVMs.py diff --git a/requirements.txt b/requirements.txt index f5c76662311..4485f5d73f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,6 @@ cloudmonkey # Marvin dependencies are installed via its bundle + +# Install the SolidFire SDK for Python +solidfire-sdk-python \ No newline at end of file diff --git a/test/integration/plugins/solidfire/TestAddRemoveHosts.py b/test/integration/plugins/solidfire/TestAddRemoveHosts.py index a13c61a8e7c..4c3d261f6e1 100644 --- a/test/integration/plugins/solidfire/TestAddRemoveHosts.py +++ b/test/integration/plugins/solidfire/TestAddRemoveHosts.py @@ -21,6 +21,8 @@ import SignedAPICall import time import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -37,8 +39,6 @@ from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, li # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -59,12 +59,10 @@ class TestData: diskSize = "disksize" domainId = "domainId" hypervisor = "hypervisor" - login = "login" mvip = "mvip" name = "name" newHost = "newHost" newHostDisplayName = "newHostDisplayName" - osType = "ostype" password = "password" podId = "podid" port = "port" @@ -89,7 +87,7 @@ class TestData: self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -170,7 +168,6 @@ class TestData: "diskname": "testvolume2", }, TestData.newHostDisplayName: "XenServer-6.5-3", - TestData.osType: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -186,7 +183,9 @@ class TestAddRemoveHosts(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestAddRemoveHosts, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -203,12 +202,14 @@ class TestAddRemoveHosts(cloudstackTestCase): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.testdata[TestData.osType]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -249,7 +250,7 @@ class TestAddRemoveHosts(cloudstackTestCase): try: cleanup_resources(cls.apiClient, cls._cleanup) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -423,8 +424,8 @@ class TestAddRemoveHosts(cloudstackTestCase): self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name) - def _perform_add_remove_host(self, primary_storage_id, sf_iscsi_name): - xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0] + def _perform_add_remove_host(self, primary_storage_id, sr_name): + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sr_name)[0] pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) @@ -651,10 +652,10 @@ class TestAddRemoveHosts(cloudstackTestCase): return sf_vag_id def _get_sf_vag(self, sf_vag_id): - return self.sf_client.list_volume_access_groups(sf_vag_id, 1)["volumeAccessGroups"][0] + return self.sfe.list_volume_access_groups(sf_vag_id, 1).volume_access_groups[0] def _get_sf_vag_initiators(self, sf_vag): - return sf_vag["initiators"] + return sf_vag.initiators def _verifyVag(self, host_iscsi_iqns, sf_vag_initiators): self.assertEqual( diff --git a/test/integration/plugins/solidfire/TestManagedSystemVMs.py b/test/integration/plugins/solidfire/TestManagedSystemVMs.py new file mode 100644 index 00000000000..994266c2033 --- /dev/null +++ b/test/integration/plugins/solidfire/TestManagedSystemVMs.py @@ -0,0 +1,588 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import XenAPI + +from solidfire.factory import ElementFactory + +from util import sf_util + +from marvin.cloudstackAPI import destroySystemVm + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +from nose.plugins.attrib import attr + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, Router, ServiceOffering, StoragePool, User, VirtualMachine, Zone + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_ssvms, list_routers + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources, wait_until + +# Prerequisites: +# * Only use one SolidFire cluster for the two primary storages based on the "SolidFire" storage plug-in. +# * Do not run other workloads on the SolidFire cluster while running this test as this test checks at a certain +# point to make sure no active SolidFire volumes exist. +# * Only one zone +# * Only one secondary storage VM and one console proxy VM running on NFS (no virtual router or user VMs exist) +# * Only one pod +# * Only one cluster +# * Set storage.cleanup.enabled to true +# * Set storage.cleanup.interval to 150 +# * Set storage.cleanup.delay to 60 + + +class TestData(): + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterid" + computeOffering = "computeoffering" + diskOffering = "diskoffering" + domainId = "domainid" + email = "email" + firstname = "firstname" + hypervisor = "hypervisor" + lastname = "lastname" + max_iops = "maxiops" + min_iops = "miniops" + mvip = "mvip" + name = "name" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + systemOffering = "systemoffering" + systemOfferingFailure = "systemofferingFailure" + tags = "tags" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + xenServer = "xenserver" + zoneId = "zoneid" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.username: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.account: { + TestData.email: "test@test.com", + TestData.firstname: "John", + TestData.lastname: "Doe", + TestData.username: "test", + TestData.password: "test" + }, + TestData.user: { + TestData.email: "user@test.com", + TestData.firstname: "Jane", + TestData.lastname: "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.primaryStorage: { + TestData.name: TestData.get_name_for_solidfire_storage(), + TestData.scope: "ZONE", + TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any", + TestData.zoneId: 1 + }, + TestData.virtualMachine: { + TestData.name: "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + TestData.name: "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + TestData.min_iops: 10000, + TestData.max_iops: 15000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag + }, + TestData.systemOffering: { + TestData.name: "SF_SO_1", + "displaytext": "Managed SO (Min IOPS = 4,000; Max IOPS = 8,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + TestData.min_iops: 4000, + TestData.max_iops: 8000, + TestData.tags: TestData.storageTag, + "issystem": True + }, + TestData.systemOfferingFailure: { + TestData.name: "SF_SO_2", + "displaytext": "Managed SO (Customized IOPS)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": True, + TestData.tags: TestData.storageTag, + "issystem": True + }, + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + @staticmethod + def get_name_for_solidfire_storage(): + return "SolidFire-%d" % random.randint(0, 100) + + +class TestManagedSystemVMs(cloudstackTestCase): + _unique_name_suffix = "-Temp" + + _secondary_storage_unique_name = "Cloud.com-SecondaryStorage" + _secondary_storage_temp_unique_name = _secondary_storage_unique_name + _unique_name_suffix + + _console_proxy_unique_name = "Cloud.com-ConsoleProxy" + _console_proxy_temp_unique_name = _console_proxy_unique_name + _unique_name_suffix + + _virtual_router_unique_name = "Cloud.com-SoftwareRouter" + _virtual_router_temp_unique_name = _virtual_router_unique_name + _unique_name_suffix + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestManagedSystemVMs, cls).getClsTestClient() + + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + # Set up xenAPI connection + host_ip = "https://" + \ + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + # Set up XenAPI connection + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) + + # Get Resources from Cloud Infrastructure + cls.zone = Zone(get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]).__dict__) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + systemoffering = cls.testdata[TestData.systemOffering] + + systemoffering[TestData.name] = "Managed SSVM" + systemoffering['systemvmtype'] = "secondarystoragevm" + + cls.secondary_storage_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + systemoffering[TestData.name] = "Managed CPVM" + systemoffering['systemvmtype'] = "consoleproxy" + + cls.console_proxy_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + systemoffering[TestData.name] = "Managed VR" + systemoffering['systemvmtype'] = "domainrouter" + + cls.virtual_router_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.secondary_storage_offering, + cls.console_proxy_offering, + cls.virtual_router_offering, + cls.compute_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiClient, self.cleanup) + + sf_util.purge_solidfire_volumes(self.sfe) + except Exception as e: + logging.debug("Exception in tearDownClass(self): %s" % e) + + @attr(hypervisor='XenServer') + def test_01_create_system_vms_on_managed_storage(self): + self._disable_zone_and_delete_system_vms(None, False) + + primary_storage = self.testdata[TestData.primaryStorage] + + primary_storage_1 = StoragePool.create( + self.apiClient, + primary_storage + ) + + self._prepare_to_use_managed_storage_for_system_vms() + + enabled = "Enabled" + + self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled) + + system_vms = self._wait_for_and_get_running_system_vms(2) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + # This virtual machine was only created and started so that the virtual router would be created and started. + # Just delete this virtual machine once it has been created and started. + virtual_machine.delete(self.apiClient, True) + + virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0] + + system_vms.append(virtual_router) + + self._check_system_vms(system_vms, primary_storage_1.id) + + primary_storage[TestData.name] = TestData.get_name_for_solidfire_storage() + + primary_storage_2 = StoragePool.create( + self.apiClient, + primary_storage + ) + + StoragePool.enableMaintenance(self.apiClient, primary_storage_1.id) + + self._wait_for_storage_cleanup_thread(system_vms) + + sf_util.purge_solidfire_volumes(self.sfe) + + system_vms = self._wait_for_and_get_running_system_vms(2) + + virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0] + + system_vms.append(virtual_router) + + self._check_system_vms(system_vms, primary_storage_2.id) + + StoragePool.cancelMaintenance(self.apiClient, primary_storage_1.id) + + primary_storage_1.delete(self.apiClient) + + self._disable_zone_and_delete_system_vms(virtual_router) + + self._wait_for_storage_cleanup_thread(system_vms) + + sf_util.purge_solidfire_volumes(self.sfe) + + primary_storage_2.delete(self.apiClient) + + self._verify_no_active_solidfire_volumes() + + self._prepare_to_stop_using_managed_storage_for_system_vms() + + self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled) + + self._wait_for_and_get_running_system_vms(2) + + @attr(hypervisor='XenServer') + def test_02_failure_to_create_service_offering_with_customized_iops(self): + try: + ServiceOffering.create( + self.apiClient, + self.testdata[TestData.systemOfferingFailure] + ) + + self.assert_(True, "The service offering was created, but should not have been.") + except: + pass + + def _prepare_to_use_managed_storage_for_system_vms(self): + self._update_system_vm_unique_name(TestManagedSystemVMs._secondary_storage_unique_name, TestManagedSystemVMs._secondary_storage_temp_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._console_proxy_unique_name, TestManagedSystemVMs._console_proxy_temp_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._virtual_router_unique_name, TestManagedSystemVMs._virtual_router_temp_unique_name) + + self._update_system_vm_unique_name_based_on_uuid(self.secondary_storage_offering.id, TestManagedSystemVMs._secondary_storage_unique_name) + self._update_system_vm_unique_name_based_on_uuid(self.console_proxy_offering.id, TestManagedSystemVMs._console_proxy_unique_name) + self._update_system_vm_unique_name_based_on_uuid(self.virtual_router_offering.id, TestManagedSystemVMs._virtual_router_unique_name) + + def _prepare_to_stop_using_managed_storage_for_system_vms(self): + self._update_system_vm_unique_name_based_on_uuid(self.secondary_storage_offering.id, None) + self._update_system_vm_unique_name_based_on_uuid(self.console_proxy_offering.id, None) + self._update_system_vm_unique_name_based_on_uuid(self.virtual_router_offering.id, None) + + self._update_system_vm_unique_name(TestManagedSystemVMs._secondary_storage_temp_unique_name, TestManagedSystemVMs._secondary_storage_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._console_proxy_temp_unique_name, TestManagedSystemVMs._console_proxy_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._virtual_router_temp_unique_name, TestManagedSystemVMs._virtual_router_unique_name) + + def _wait_for_storage_cleanup_thread(self, system_vms): + retry_interval = 60 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, self._check_resource_state, system_vms) + + if not wait_result: + raise Exception(return_val) + + def _check_resource_state(self, system_vms): + try: + self._verify_system_vms_deleted(system_vms) + + return True, None + except: + return False, "The system is not in the necessary state." + + def _verify_system_vms_deleted(self, system_vms): + for system_vm in system_vms: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + def _disable_zone_and_delete_system_vms(self, virtual_router, verify_managed_system_vm_deleted=True): + self.zone.update(self.apiClient, id=self.zone.id, allocationstate="Disabled") + + if virtual_router is not None: + Router.destroy(self.apiClient, virtual_router.id) + + if verify_managed_system_vm_deleted: + cs_root_volume = self._get_root_volume_for_system_vm(virtual_router.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + # list_ssvms lists the secondary storage VM and the console proxy VM + system_vms = list_ssvms(self.apiClient) + + for system_vm in system_vms: + destroy_ssvm_cmd = destroySystemVm.destroySystemVmCmd() + + destroy_ssvm_cmd.id = system_vm.id + + self.apiClient.destroySystemVm(destroy_ssvm_cmd) + + if verify_managed_system_vm_deleted: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + def _verify_managed_system_vm_deleted(self, cs_root_volume_name): + sf_not_active_volumes = sf_util.get_not_active_sf_volumes(self.sfe) + + sf_root_volume = sf_util.check_and_get_sf_volume(sf_not_active_volumes, cs_root_volume_name, self) + + self.assertEqual( + len(sf_root_volume.volume_access_groups), + 0, + "The volume should not be in a volume access group." + ) + + sr_name = sf_util.format_iqn(sf_root_volume.iqn) + + sf_util.check_xen_sr(sr_name, self.xen_session, self, False) + + def _wait_for_and_get_running_system_vms(self, expected_number_of_system_vms): + retry_interval = 60 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, self._check_number_of_running_system_vms, expected_number_of_system_vms) + + if not wait_result: + raise Exception(return_val) + + return return_val + + def _check_number_of_running_system_vms(self, expected_number_of_system_vms): + # list_ssvms lists the secondary storage VM and the console proxy VM + system_vms = list_ssvms(self.apiClient, state="Running") + + if system_vms is not None and len(system_vms) == expected_number_of_system_vms: + return True, system_vms + + return False, "Timed out waiting for running system VMs" + + def _verify_no_active_solidfire_volumes(self): + sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) + + sf_util.check_list(sf_active_volumes, 0, self, "There should be no active SolidFire volumes in the cluster.") + + def _check_system_vms(self, system_vms, primary_storage_id): + sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) + + sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, primary_storage_id, self) + + for system_vm in system_vms: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Ready') + sf_root_volume = sf_util.check_and_get_sf_volume(sf_active_volumes, cs_root_volume.name, self) + + sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, cs_root_volume, self) + + sf_util.check_size_and_iops(sf_root_volume, cs_root_volume, sf_volume_size, self) + + self._check_iops_against_iops_of_system_offering(cs_root_volume, self.testdata[TestData.systemOffering]) + + sf_util.check_vag(sf_root_volume, sf_vag_id, self) + + sr_name = sf_util.format_iqn(sf_root_volume.iqn) + + sf_util.check_xen_sr(sr_name, self.xen_session, self) + + def _check_iops_against_iops_of_system_offering(self, cs_volume, system_offering): + self.assertEqual( + system_offering[TestData.min_iops], + cs_volume.miniops, + "Check QoS - Min IOPS: of " + cs_volume.name + " should be " + str(system_offering[TestData.min_iops]) + ) + + self.assertEqual( + system_offering[TestData.max_iops], + cs_volume.maxiops, + "Check QoS - Min IOPS: of " + cs_volume.name + " should be " + str(system_offering[TestData.max_iops]) + ) + + def _get_root_volume_for_system_vm(self, system_vm_id, state): + sql_query = "Select id From vm_instance Where uuid = '" + system_vm_id + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + instance_id = sql_result[0][0] + + sql_query = "Select uuid, name, min_iops, max_iops From volumes Where instance_id = " + str(instance_id) + \ + " and state = '" + state + "' Order by removed desc" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + uuid = sql_result[0][0] + name = sql_result[0][1] + min_iops = sql_result[0][2] + max_iops = sql_result[0][3] + + class CloudStackVolume(object): + pass + + cs_volume = CloudStackVolume() + + cs_volume.id = uuid + cs_volume.name = name + cs_volume.miniops = min_iops + cs_volume.maxiops = max_iops + + return cs_volume + + def _update_system_vm_unique_name(self, unique_name, new_unique_name): + sql_query = "Update disk_offering set unique_name = '" + new_unique_name + "' Where unique_name = '" + unique_name + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + self.dbConnection.execute(sql_query) + + def _update_system_vm_unique_name_based_on_uuid(self, uuid, new_unique_name): + if (new_unique_name is None): + sql_query = "Update disk_offering set unique_name = NULL Where uuid = '" + uuid + "'" + else: + sql_query = "Update disk_offering set unique_name = '" + new_unique_name + "' Where uuid = '" + uuid + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + self.dbConnection.execute(sql_query) + diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py index 9ae10f335fe..df45c6134d1 100644 --- a/test/integration/plugins/solidfire/TestSnapshots.py +++ b/test/integration/plugins/solidfire/TestSnapshots.py @@ -21,6 +21,8 @@ import SignedAPICall import time import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -39,8 +41,6 @@ from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources, wait_until -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -57,7 +57,6 @@ class TestData(): diskOffering = "diskoffering" domainId = "domainId" hypervisor = "hypervisor" - login = "login" mvip = "mvip" password = "password" port = "port" @@ -67,7 +66,6 @@ class TestData(): solidFire = "solidfire" storageTag = "SolidFire_SAN_1" tags = "tags" - templateName = "templatename" url = "url" user = "user" username = "username" @@ -81,7 +79,7 @@ class TestData(): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -223,7 +221,6 @@ class TestData(): TestData.volume_2: { TestData.diskName: "test-volume-2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -252,7 +249,9 @@ class TestSnapshots(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -269,12 +268,14 @@ class TestSnapshots(cloudstackTestCase): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -338,7 +339,7 @@ class TestSnapshots(cloudstackTestCase): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -377,14 +378,14 @@ class TestSnapshots(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -409,7 +410,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -418,7 +419,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_1) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -427,7 +428,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_2) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -454,14 +455,14 @@ class TestSnapshots(cloudstackTestCase): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -502,14 +503,14 @@ class TestSnapshots(cloudstackTestCase): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -522,7 +523,7 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -530,7 +531,7 @@ class TestSnapshots(cloudstackTestCase): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -542,7 +543,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -562,7 +563,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_1) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -572,7 +573,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -592,7 +593,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -625,21 +626,21 @@ class TestSnapshots(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -665,7 +666,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -696,19 +697,19 @@ class TestSnapshots(cloudstackTestCase): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -750,19 +751,19 @@ class TestSnapshots(cloudstackTestCase): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - sf_volume_id_2 = sf_volume_2['volumeID'] - sf_volume_size_2 = sf_volume_2['totalSize'] + sf_volume_id_2 = sf_volume_2.volume_id + sf_volume_size_2 = sf_volume_2.total_size vol_snap_a = self._create_and_test_snapshot_2(vm_2_root_volume.id, sf_volume_id_2, sf_volume_id + 5, primary_storage_db_id, sf_volume_size_2, sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) @@ -774,7 +775,7 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg) @@ -782,7 +783,7 @@ class TestSnapshots(cloudstackTestCase): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -794,7 +795,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -815,7 +816,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -833,7 +834,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -860,7 +861,7 @@ class TestSnapshots(cloudstackTestCase): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -870,14 +871,14 @@ class TestSnapshots(cloudstackTestCase): sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -888,21 +889,21 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_2['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_2.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, volume_created_from_snapshot ) - sf_volume_id_2 = sf_volume_2['volumeID'] - sf_volume_size_2 = sf_volume_2['totalSize'] + sf_volume_id_2 = sf_volume_2.volume_id + sf_volume_size_2 = sf_volume_2.total_size vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 3, primary_storage_db_id, sf_volume_size_2, sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) @@ -914,7 +915,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -935,7 +936,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -981,21 +982,21 @@ class TestSnapshots(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -1040,14 +1041,14 @@ class TestSnapshots(cloudstackTestCase): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1056,7 +1057,7 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -1096,14 +1097,14 @@ class TestSnapshots(cloudstackTestCase): vm_3_root_volume_name = vm_3_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_3['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_3.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1112,7 +1113,7 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg) @@ -1124,7 +1125,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) # should still be 7 volumes because the SolidFire volume for the root disk of the VM just destroyed # is still needed for the SolidFire snapshots @@ -1133,14 +1134,14 @@ class TestSnapshots(cloudstackTestCase): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg) virtual_machine_3.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -1149,7 +1150,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg) @@ -1158,7 +1159,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1167,7 +1168,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_b) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) # should still be 2 volumes because the SolidFire volume for the root disk of the VM just destroyed # is still needed for the SolidFire snapshots @@ -1176,7 +1177,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_a) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1211,14 +1212,14 @@ class TestSnapshots(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1247,7 +1248,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1256,7 +1257,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_archive_snapshot(vol_snap_1_archive) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1265,7 +1266,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_2) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -1292,14 +1293,14 @@ class TestSnapshots(cloudstackTestCase): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1342,14 +1343,14 @@ class TestSnapshots(cloudstackTestCase): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1362,7 +1363,7 @@ class TestSnapshots(cloudstackTestCase): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1370,7 +1371,7 @@ class TestSnapshots(cloudstackTestCase): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -1382,7 +1383,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1404,7 +1405,7 @@ class TestSnapshots(cloudstackTestCase): self._delete_and_test_snapshot(vol_snap_3) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -1414,7 +1415,7 @@ class TestSnapshots(cloudstackTestCase): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1434,7 +1435,7 @@ class TestSnapshots(cloudstackTestCase): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -1511,10 +1512,10 @@ class TestSnapshots(cloudstackTestCase): sf_snapshot_to_return = None for sf_snapshot in sf_snapshots: - if (sf_snapshot['snapshotID'] > most_recent_id): + if (sf_snapshot.snapshot_id > most_recent_id): sf_snapshot_to_return = sf_snapshot - most_recent_id = sf_snapshot['snapshotID'] + most_recent_id = sf_snapshot.snapshot_id if (sf_snapshot_to_return == None): raise Exception("Unable to find the most recent SolidFire snapshot in the provided list") @@ -1541,7 +1542,7 @@ class TestSnapshots(cloudstackTestCase): sf_volume = None for volume in sf_volumes: - if volume['name'] == sf_volume_name: + if volume.name == sf_volume_name: sf_volume = volume break @@ -1560,7 +1561,7 @@ class TestSnapshots(cloudstackTestCase): sf_volume = None for volume in sf_volumes: - if volume['volumeID'] == sf_volume_id: + if volume.volume_id == sf_volume_id: sf_volume = volume break @@ -1582,7 +1583,7 @@ class TestSnapshots(cloudstackTestCase): def _check_sf_snapshot_does_not_exist(self, sf_snapshots, sf_snapshot_id): for sf_snapshot in sf_snapshots: - if sf_snapshot["snapshotID"] == sf_snapshot: + if sf_snapshot.snapshot_id == sf_snapshot: raise Exception("The following SolidFire snapshot ID should not exist: " + sf_snapshot_id) def _check_snapshot_details_do_not_exist(self, vol_snap_db_id): @@ -1606,10 +1607,10 @@ class TestSnapshots(cloudstackTestCase): self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) - sf_volume_id = sf_volume['volumeID'] + sf_volume_id = sf_volume.volume_id # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots sf_util.check_list(sf_snapshots, expected_num_snapshots, self, snapshot_err_msg) @@ -1621,16 +1622,16 @@ class TestSnapshots(cloudstackTestCase): vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) - self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot['snapshotID'], primary_storage_db_id, sf_volume['totalSize']) + self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size) return vol_snap # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage def _create_and_test_archive_snapshot(self, volume_id_for_snapshot, sf_volume): - sf_volume_id = sf_volume['volumeID'] + sf_volume_id = sf_volume.volume_id # Get snapshot information for volume from SolidFire cluster - sf_snapshots_orig = self.sf_client.list_snapshots(sf_volume_id) + sf_snapshots_orig = self.sfe.list_snapshots(sf_volume_id).snapshots vol_snap = Snapshot.create( self.apiClient, @@ -1641,7 +1642,7 @@ class TestSnapshots(cloudstackTestCase): self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(sf_volume_id).snapshots sf_util.check_list(sf_snapshots, len(sf_snapshots_orig), self, "A new SolidFire snapshot was detected.") @@ -1662,7 +1663,7 @@ class TestSnapshots(cloudstackTestCase): self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1675,13 +1676,13 @@ class TestSnapshots(cloudstackTestCase): self._check_snapshot_details_2(sf_snapshot_details, vol_snap_db_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg) sf_volume_for_snapshot = self._get_sf_volume_by_id(sf_volumes, sf_volume_id_for_volume_snapshot) - sf_util.check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_for_snapshot.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) return vol_snap @@ -1722,7 +1723,7 @@ class TestSnapshots(cloudstackTestCase): vol_snap.delete(self.apiClient) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots self._check_sf_snapshot_does_not_exist(sf_snapshots, sf_snapshot_id) @@ -1741,6 +1742,6 @@ class TestSnapshots(cloudstackTestCase): self._check_snapshot_details_do_not_exist(vol_snap_db_id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg) diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py index 255df07a54a..adbb44be950 100644 --- a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py +++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py @@ -20,6 +20,8 @@ import random import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -34,8 +36,6 @@ from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -71,7 +71,6 @@ class TestData(): storageTag2 = "SolidFire_Volume_1" tags = "tags" templateCacheName = "centos56-x86-64-xen" - templateName = "templatename" testAccount = "testaccount" url = "url" user = "user" @@ -86,7 +85,7 @@ class TestData(): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -208,7 +207,6 @@ class TestData(): TestData.volume_1: { TestData.diskName: "test-volume", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId1: 1, TestData.clusterId2: 2, @@ -224,7 +222,9 @@ class TestVMMigrationWithStorage(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -250,13 +250,15 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0] cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -330,6 +332,8 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cleanup_resources(cls.apiClient, cls._cleanup) cls.primary_storage.delete(cls.apiClient) + + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -340,7 +344,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): try: cleanup_resources(self.apiClient, self.cleanup) - sf_util.purge_solidfire_volumes(self.sf_client) + sf_util.purge_solidfire_volumes(self.sfe) except Exception as e: logging.debug("Exception in tearDownClass(self): %s" % e) @@ -366,7 +370,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) @@ -386,7 +390,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cs_data_volume ) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -451,7 +455,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -497,7 +501,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) @@ -517,7 +521,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cs_data_volume ) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -549,7 +553,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id) cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -580,7 +584,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): cs_volume = self._get_updated_cs_volume(cs_volume.id) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_volume.name, self) @@ -624,7 +628,7 @@ class TestVMMigrationWithStorage(cloudstackTestCase): self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume) self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume) - sf_volumes = sf_util.get_not_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_not_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -633,11 +637,11 @@ class TestVMMigrationWithStorage(cloudstackTestCase): self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume) def _verify_different_volume_access_groups(self, src_sf_volume, dest_sf_volume): - src_vags = src_sf_volume['volumeAccessGroups'] + src_vags = src_sf_volume.volume_access_groups sf_util.check_list(src_vags, 1, self, "'src_vags' should be a list with only one element in it.") - dest_vags = dest_sf_volume['volumeAccessGroups'] + dest_vags = dest_sf_volume.volume_access_groups sf_util.check_list(dest_vags, 1, self, "'dest_vags' should be a list with only one element in it.") @@ -647,23 +651,23 @@ class TestVMMigrationWithStorage(cloudstackTestCase): return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0] def _verify_same_account(self, src_sf_volume, dest_sf_volume): - self.assertEqual(src_sf_volume['accountID'], dest_sf_volume['accountID'], "The source and destination volumes should be in the same SolidFire account.") + self.assertEqual(src_sf_volume.account_id, dest_sf_volume.account_id, "The source and destination volumes should be in the same SolidFire account.") def _verifySfVolumeIds(self, src_sf_volume, dest_sf_volume): - self.assert_(src_sf_volume['volumeID'] < dest_sf_volume['volumeID'], + self.assert_(src_sf_volume.volume_id < dest_sf_volume.volume_id, "The destination SolidFire root volume's ID should be greater than the id of the source one.") # verify the name, folder, and iscsi_name def _verifyFields(self, cs_volume, sf_volume): - self.assert_(cs_volume.name == sf_volume['name'], "The CloudStack volume name does not match the SolidFire volume name.") + self.assert_(cs_volume.name == sf_volume.name, "The CloudStack volume name does not match the SolidFire volume name.") cs_volume_folder = self._get_cs_volume_folder(cs_volume.id) - self.assert_(int(cs_volume_folder) == sf_volume['volumeID'], "The CloudStack folder name does not match the SolidFire volume ID.") + self.assert_(int(cs_volume_folder) == sf_volume.volume_id, "The CloudStack folder name does not match the SolidFire volume ID.") cs_volume_iscsi_name = self._get_cs_volume_iscsi_name(cs_volume.id) - self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume['iqn']), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.") + self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume.iqn), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.") def _get_cs_volume_property(self, cs_volume_id, volume_property): sql_query = "Select " + volume_property + " From volumes Where uuid = '" + cs_volume_id + "'" @@ -688,10 +692,10 @@ class TestVMMigrationWithStorage(cloudstackTestCase): sf_util.check_list(sql_result, 0, self, "The cloud.volume_details table should not have any name fields that start with 'basic_'.") def _verify_xenserver_state(self, xen_session_1, sf_volume_1, xen_session_2, sf_volume_2): - sr_name = sf_util.format_iqn(sf_volume_1["iqn"]) + sr_name = sf_util.format_iqn(sf_volume_1.iqn) sf_util.check_xen_sr(sr_name, xen_session_1, self, False) - sr_name = sf_util.format_iqn(sf_volume_2["iqn"]) + sr_name = sf_util.format_iqn(sf_volume_2.iqn) sf_util.check_xen_sr(sr_name, xen_session_2, self) diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py index 14e8e71f789..db2539025dd 100644 --- a/test/integration/plugins/solidfire/TestVMSnapshots.py +++ b/test/integration/plugins/solidfire/TestVMSnapshots.py @@ -20,6 +20,8 @@ import random import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -36,8 +38,6 @@ from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, li # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -63,7 +63,6 @@ class TestData: solidFire = "solidfire" storageTag = "SolidFire_SAN_1" tags = "tags" - templateName = "templatename" url = "url" user = "user" username = "username" @@ -76,7 +75,7 @@ class TestData: self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -211,7 +210,6 @@ class TestData: "volume2": { "diskname": "testvolume2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -237,7 +235,9 @@ class TestVMSnapshots(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestVMSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata @@ -252,11 +252,13 @@ class TestVMSnapshots(cloudstackTestCase): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) - template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -332,7 +334,7 @@ class TestVMSnapshots(cloudstackTestCase): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py index 63b9be11604..b70ac915bee 100644 --- a/test/integration/plugins/solidfire/TestVolumes.py +++ b/test/integration/plugins/solidfire/TestVolumes.py @@ -20,6 +20,8 @@ import random import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -39,8 +41,6 @@ from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -71,7 +71,6 @@ class TestData(): storageTag = "SolidFire_SAN_1" tags = "tags" templateCacheName = "centos56-x86-64-xen" - templateName = "templatename" testAccount = "testaccount" url = "url" user = "user" @@ -87,7 +86,7 @@ class TestData(): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -168,7 +167,6 @@ class TestData(): TestData.volume_2: { TestData.diskName: "test-volume-2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -192,6 +190,7 @@ class TestVolumes(cloudstackTestCase): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -212,12 +211,14 @@ class TestVolumes(cloudstackTestCase): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -304,7 +305,7 @@ class TestVolumes(cloudstackTestCase): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -328,16 +329,16 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) - sf_account_id = sf_volume["accountID"] + sf_account_id = sf_volume.account_id - sf_account = self.sf_client.get_account_by_id(sf_account_id)["account"] + sf_account = self.sfe.get_account_by_id(sf_account_id).account - sf_account_name = sf_account["username"] + sf_account_name = sf_account.username self.assertEqual( sf_account_name.endswith("_1"), @@ -504,7 +505,7 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) @@ -723,7 +724,7 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -747,7 +748,7 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -847,7 +848,7 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -871,7 +872,7 @@ class TestVolumes(cloudstackTestCase): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1075,7 +1076,7 @@ class TestVolumes(cloudstackTestCase): sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1182,7 +1183,7 @@ class TestVolumes(cloudstackTestCase): sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1489,7 +1490,7 @@ class TestVolumes(cloudstackTestCase): sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist) def _get_active_sf_volumes(self, sf_account_id=None): - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py index 66295710333..1b451d5639f 100644 --- a/test/integration/plugins/solidfire/util/sf_util.py +++ b/test/integration/plugins/solidfire/util/sf_util.py @@ -68,20 +68,20 @@ def _set_supports_resign_for_table(supports_resign, db_connection, table): # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench db_connection.execute(sql_query) -def purge_solidfire_volumes(sf_client): - deleted_volumes = sf_client.list_deleted_volumes() +def purge_solidfire_volumes(sfe): + deleted_volumes = sfe.list_deleted_volumes() - for deleted_volume in deleted_volumes: - sf_client.purge_deleted_volume(deleted_volume['volumeID']) + for deleted_volume in deleted_volumes.volumes: + sfe.purge_deleted_volume(deleted_volume.volume_id) -def get_not_active_sf_volumes(sf_client, sf_account_id=None): +def get_not_active_sf_volumes(sfe, sf_account_id=None): if sf_account_id is not None: - sf_volumes = sf_client.list_volumes_for_account(sf_account_id) + sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes if sf_volumes is not None and len(sf_volumes) > 0: sf_volumes = _get_not_active_sf_volumes_only(sf_volumes) else: - sf_volumes = sf_client.list_deleted_volumes() + sf_volumes = sfe.list_deleted_volumes().volumes return sf_volumes @@ -89,19 +89,19 @@ def _get_not_active_sf_volumes_only(sf_volumes): not_active_sf_volumes_only = [] for sf_volume in sf_volumes: - if sf_volume["status"] != "active": + if sf_volume.status != "active": not_active_sf_volumes_only.append(sf_volume) return not_active_sf_volumes_only -def get_active_sf_volumes(sf_client, sf_account_id=None): +def get_active_sf_volumes(sfe, sf_account_id=None): if sf_account_id is not None: - sf_volumes = sf_client.list_volumes_for_account(sf_account_id) + sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes if sf_volumes is not None and len(sf_volumes) > 0: sf_volumes = _get_active_sf_volumes_only(sf_volumes) else: - sf_volumes = sf_client.list_active_volumes() + sf_volumes = sfe.list_active_volumes().volumes return sf_volumes @@ -109,7 +109,7 @@ def _get_active_sf_volumes_only(sf_volumes): active_sf_volumes_only = [] for sf_volume in sf_volumes: - if sf_volume["status"] == "active": + if sf_volume.status == "active": active_sf_volumes_only.append(sf_volume) return active_sf_volumes_only @@ -118,7 +118,7 @@ def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist sf_volume = None for volume in sf_volumes: - if volume['name'] == sf_volume_name: + if volume.name == sf_volume_name: sf_volume = volume break @@ -155,13 +155,13 @@ def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True): def check_vag(sf_volume, sf_vag_id, obj_assert): obj_assert.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 1, "The volume should only be in one VAG." ) obj_assert.assertEqual( - sf_volume['volumeAccessGroups'][0], + sf_volume.volume_access_groups[0], sf_vag_id, "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "." ) @@ -185,21 +185,21 @@ def format_iqn(iqn): def check_size_and_iops(sf_volume, cs_volume, size, obj_assert): obj_assert.assertEqual( - sf_volume['qos']['minIOPS'], + sf_volume.qos.min_iops, cs_volume.miniops, - "Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS']) + "Check QoS - Min IOPS: " + str(sf_volume.qos.min_iops) ) obj_assert.assertEqual( - sf_volume['qos']['maxIOPS'], + sf_volume.qos.max_iops, cs_volume.maxiops, - "Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS']) + "Check QoS - Max IOPS: " + str(sf_volume.qos.max_iops) ) obj_assert.assertEqual( - sf_volume['totalSize'], + sf_volume.total_size, size, - "Check SolidFire volume size: " + str(sf_volume['totalSize']) + "Check SolidFire volume size: " + str(sf_volume.total_size) ) def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):