mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge pull request #2021 from nvazquez/CLOUDSTACK-9854
CLOUDSTACK-9854: Fix test_primary_storage test failure due to live migrationFix for test_primary_storage integration tests on simulator. When finding storage pool migration options for volume on running vm, API returns None as hypervisor doesn't support live migration. ```` 2017-03-28 06:07:55,958 - DEBUG - ========Sending GET Cmd : findStoragePoolsForMigration======= 2017-03-28 06:07:55,977 - DEBUG - Response : None 2017-03-28 06:07:55,983 - CRITICAL - EXCEPTION: test_03_migration_options_storage_tags: ['Traceback (most recent call last):\n', ' File "/opt/python/2.7.12/lib/python2.7/unittest/case.py", line 329, in run\n testMethod()\n', ' File "/home/travis/.local/lib/python2.7/site-packages/marvin/lib/decoratorGenerators.py", line 30, in test_wrapper\n return test(self, *args, **kwargs)\n', ' File "/home/travis/build/apache/cloudstack/test/integration/smoke/test_primary_storage.py", line 547, in test_03_migration_options_storage_tags\n pools_suitable = filter(lambda p : p.suitableformigration, pools_response)\n', "TypeError: 'NoneType' object is not iterable\n"] ```` So we simply stop vm before sending findStoragePoolsForMigration command * pr/2021: CLOUDSTACK-9854: Fix test_primary_storage test failure due to live migration Signed-off-by: Rajani Karuturi <rajani.karuturi@accelerite.com>
This commit is contained in:
commit
85d073b540
@ -306,7 +306,7 @@ class TestStorageTags(cloudstackTestCase):
|
||||
podid=cls.pod.id,
|
||||
tags=cls.services["storage_tags"]["a"]
|
||||
)
|
||||
cls._cleanup.append(cls.storage_pool_1)
|
||||
#PS not appended to _cleanup, it is removed on tearDownClass before cleaning up resources
|
||||
assert cls.storage_pool_1.state == 'Up'
|
||||
storage_pools_response = list_storage_pools(cls.apiclient,
|
||||
id=cls.storage_pool_1.id)
|
||||
@ -369,7 +369,18 @@ class TestStorageTags(cloudstackTestCase):
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
# First expunge vm, so PS can be cleaned up
|
||||
cls.virtual_machine_1.delete(cls.apiclient, expunge=True)
|
||||
cls.virtual_machine_1.delete(cls.apiclient)
|
||||
|
||||
# Force delete primary storage
|
||||
cmd = enableStorageMaintenance.enableStorageMaintenanceCmd()
|
||||
cmd.id = cls.storage_pool_1.id
|
||||
cls.apiclient.enableStorageMaintenance(cmd)
|
||||
time.sleep(30)
|
||||
cmd = deleteStoragePool.deleteStoragePoolCmd()
|
||||
cmd.id = cls.storage_pool_1.id
|
||||
cmd.forced = True
|
||||
cls.apiclient.deleteStoragePool(cmd)
|
||||
|
||||
cleanup_resources(cls.apiclient, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Cleanup failed with %s" % e)
|
||||
@ -539,6 +550,9 @@ class TestStorageTags(cloudstackTestCase):
|
||||
)
|
||||
vol = vm_1_volumes[0]
|
||||
|
||||
if self.hypervisor.lower() not in ["vmware", "xenserver"]:
|
||||
self.virtual_machine_1.stop(self.apiclient)
|
||||
|
||||
# Check migration options for volume
|
||||
pools_response = StoragePool.listForMigration(
|
||||
self.apiclient,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user