tests: Fix k8s test failures on VMware (#4896)

This PR fixes the k8s test failures noticed on vmware.

Co-authored-by: Pearl Dsilva <pearl.dsilva@shapeblue.com>
Co-authored-by: Abhishek Kumar <abhishek.mrt22@gmail.com>
This commit is contained in:
Pearl Dsilva 2021-04-09 12:34:10 +05:30 committed by GitHub
parent 262285639e
commit cd3851946b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 63 additions and 44 deletions

View File

@ -307,7 +307,6 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
Field f = startVm.getClass().getDeclaredField("id"); Field f = startVm.getClass().getDeclaredField("id");
f.setAccessible(true); f.setAccessible(true);
f.set(startVm, vm.getId()); f.set(startVm, vm.getId());
resizeNodeVolume(vm);
userVmService.startVirtualMachine(startVm); userVmService.startVirtualMachine(startVm);
if (LOGGER.isInfoEnabled()) { if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName()));

View File

@ -37,6 +37,7 @@ from marvin.codes import PASS, FAILED
from marvin.lib.base import (Template, from marvin.lib.base import (Template,
ServiceOffering, ServiceOffering,
Account, Account,
StoragePool,
Configurations) Configurations)
from marvin.lib.utils import (cleanup_resources, from marvin.lib.utils import (cleanup_resources,
validateList, validateList,
@ -81,7 +82,7 @@ class TestKubernetesCluster(cloudstackTestCase):
"cloud.kubernetes.service.enabled", "cloud.kubernetes.service.enabled",
"true") "true")
cls.restartServer() cls.restartServer()
cls.updateVmwareSettings(False)
cls.cks_template = None cls.cks_template = None
cls.initial_configuration_cks_template_name = None cls.initial_configuration_cks_template_name = None
cls.cks_service_offering = None cls.cks_service_offering = None
@ -120,12 +121,13 @@ class TestKubernetesCluster(cloudstackTestCase):
(cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e)) (cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e))
if cls.setup_failed == False: if cls.setup_failed == False:
cls.cks_template = cls.getKubernetesTemplate() cls.cks_template, existAlready = cls.getKubernetesTemplate()
if cls.cks_template == FAILED: if cls.cks_template == FAILED:
assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor
cls.setup_failed = True cls.setup_failed = True
else: else:
cls._cleanup.append(cls.cks_template) if not existAlready:
cls._cleanup.append(cls.cks_template)
if cls.setup_failed == False: if cls.setup_failed == False:
cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient, cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient,
@ -162,8 +164,6 @@ class TestKubernetesCluster(cloudstackTestCase):
cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e) cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e)
try: try:
# Restore original CKS template # Restore original CKS template
if cls.cks_template != None:
cls.cks_template.delete(cls.apiclient)
if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None: if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None:
Configurations.update(cls.apiclient, Configurations.update(cls.apiclient,
cls.cks_template_name_key, cls.cks_template_name_key,
@ -176,6 +176,8 @@ class TestKubernetesCluster(cloudstackTestCase):
"false") "false")
cls.restartServer() cls.restartServer()
cls.updateVmwareSettings(True)
cleanup_resources(cls.apiclient, cls._cleanup) cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e: except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e) raise Exception("Warning: Exception during cleanup : %s" % e)
@ -183,6 +185,24 @@ class TestKubernetesCluster(cloudstackTestCase):
raise Exception("Warning: Exception during cleanup, unable to delete Kubernetes supported versions") raise Exception("Warning: Exception during cleanup, unable to delete Kubernetes supported versions")
return return
@classmethod
def updateVmwareSettings(cls, tearDown):
value = "false"
if not tearDown:
value = "true"
if cls.hypervisor.lower() == 'vmware':
Configurations.update(cls.apiclient,
"vmware.create.full.clone",
value)
allStoragePools = StoragePool.list(
cls.apiclient
)
for pool in allStoragePools:
Configurations.update(cls.apiclient,
storageid=pool.id,
name="vmware.create.full.clone",
value=value)
@classmethod @classmethod
def restartServer(cls): def restartServer(cls):
"""Restart management server""" """Restart management server"""
@ -227,7 +247,7 @@ class TestKubernetesCluster(cloudstackTestCase):
if hypervisor not in cks_templates.keys(): if hypervisor not in cks_templates.keys():
cls.debug("Provided hypervisor has no CKS template") cls.debug("Provided hypervisor has no CKS template")
return FAILED return FAILED, False
cks_template = cks_templates[hypervisor] cks_template = cks_templates[hypervisor]
@ -244,13 +264,13 @@ class TestKubernetesCluster(cloudstackTestCase):
details = [{"keyboard": "us"}] details = [{"keyboard": "us"}]
template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details) template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details)
template.download(cls.apiclient) template.download(cls.apiclient)
return template return template, False
for template in templates: for template in templates:
if template.isready and template.ispublic: if template.isready and template.ispublic:
return Template(template.__dict__) return Template(template.__dict__), True
return FAILED return FAILED, False
@classmethod @classmethod
def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60): def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60):
@ -313,38 +333,7 @@ class TestKubernetesCluster(cloudstackTestCase):
@attr(tags=["advanced", "smoke"], required_hardware="true") @attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported") @skipTestIf("hypervisorNotSupported")
def test_01_deploy_kubernetes_cluster(self): def test_01_invalid_upgrade_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. stopKubernetesCluster should stop the cluster
"""
if self.setup_failed == True:
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster()
self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % k8s_cluster.id)
self.stopAndVerifyKubernetesCluster(k8s_cluster.id)
self.debug("Kubernetes cluster with ID: %s successfully stopped, now starting it again" % k8s_cluster.id)
try:
k8s_cluster = self.startKubernetesCluster(k8s_cluster.id)
except Exception as e:
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Failed to start Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterState(k8s_cluster, 'Running')
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_02_invalid_upgrade_kubernetes_cluster(self):
"""Test to check for failure while tying to upgrade a Kubernetes cluster to a lower version """Test to check for failure while tying to upgrade a Kubernetes cluster to a lower version
# Validate the following: # Validate the following:
@ -364,12 +353,13 @@ class TestKubernetesCluster(cloudstackTestCase):
self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.")
except Exception as e: except Exception as e:
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
return return
@attr(tags=["advanced", "smoke"], required_hardware="true") @attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported") @skipTestIf("hypervisorNotSupported")
def test_03_deploy_and_upgrade_kubernetes_cluster(self): def test_02_deploy_and_upgrade_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and upgrade it to newer version """Test to deploy a new Kubernetes cluster and upgrade it to newer version
# Validate the following: # Validate the following:
@ -395,7 +385,7 @@ class TestKubernetesCluster(cloudstackTestCase):
@attr(tags=["advanced", "smoke"], required_hardware="true") @attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported") @skipTestIf("hypervisorNotSupported")
def test_04_deploy_and_scale_kubernetes_cluster(self): def test_03_deploy_and_scale_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and check for failure while tying to scale it """Test to deploy a new Kubernetes cluster and check for failure while tying to scale it
# Validate the following: # Validate the following:
@ -431,6 +421,36 @@ class TestKubernetesCluster(cloudstackTestCase):
return return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_04_basic_lifecycle_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. stopKubernetesCluster should stop the cluster
"""
if self.setup_failed == True:
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster()
self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % k8s_cluster.id)
self.stopAndVerifyKubernetesCluster(k8s_cluster.id)
self.debug("Kubernetes cluster with ID: %s successfully stopped, now starting it again" % k8s_cluster.id)
try:
k8s_cluster = self.startKubernetesCluster(k8s_cluster.id)
except Exception as e:
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Failed to start Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterState(k8s_cluster, 'Running')
return
@attr(tags=["advanced", "smoke"], required_hardware="true") @attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported") @skipTestIf("hypervisorNotSupported")
def test_05_delete_kubernetes_cluster(self): def test_05_delete_kubernetes_cluster(self):