mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge remote-tracking branch 'origin/4.14'
This commit is contained in:
commit
b3bafffff3
@ -95,6 +95,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
|
||||
protected SearchBuilder<VMTemplateVO> AccountIdSearch;
|
||||
protected SearchBuilder<VMTemplateVO> NameSearch;
|
||||
protected SearchBuilder<VMTemplateVO> ValidNameSearch;
|
||||
protected SearchBuilder<VMTemplateVO> TmpltsInZoneSearch;
|
||||
protected SearchBuilder<VMTemplateVO> ActiveTmpltSearch;
|
||||
private SearchBuilder<VMTemplateVO> PublicSearch;
|
||||
@ -138,8 +139,9 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
|
||||
@Override
|
||||
public VMTemplateVO findValidByTemplateName(String templateName) {
|
||||
SearchCriteria<VMTemplateVO> sc = NameSearch.create();
|
||||
SearchCriteria<VMTemplateVO> sc = ValidNameSearch.create();
|
||||
sc.setParameters("name", templateName);
|
||||
sc.setParameters("state", VirtualMachineTemplate.State.Active);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@ -319,6 +321,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
UniqueNameSearch.and("uniqueName", UniqueNameSearch.entity().getUniqueName(), SearchCriteria.Op.EQ);
|
||||
NameSearch = createSearchBuilder();
|
||||
NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
ValidNameSearch = createSearchBuilder();
|
||||
ValidNameSearch.and("name", ValidNameSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
ValidNameSearch.and("state", ValidNameSearch.entity().getState(), SearchCriteria.Op.EQ);
|
||||
ValidNameSearch.and("removed", ValidNameSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
|
||||
|
||||
NameAccountIdSearch = createSearchBuilder();
|
||||
NameAccountIdSearch.and("name", NameAccountIdSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
|
||||
@ -294,7 +294,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey));
|
||||
return false;
|
||||
}
|
||||
final VMTemplateVO template = templateDao.findByTemplateName(templateName);
|
||||
final VMTemplateVO template = templateDao.findValidByTemplateName(templateName);
|
||||
if (template == null) {
|
||||
LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName));
|
||||
return false;
|
||||
@ -377,22 +377,22 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
}
|
||||
|
||||
private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) {
|
||||
String tempalteName = null;
|
||||
String templateName = null;
|
||||
switch (hypervisorType) {
|
||||
case Hyperv:
|
||||
tempalteName = KubernetesClusterHyperVTemplateName.value();
|
||||
templateName = KubernetesClusterHyperVTemplateName.value();
|
||||
break;
|
||||
case KVM:
|
||||
tempalteName = KubernetesClusterKVMTemplateName.value();
|
||||
templateName = KubernetesClusterKVMTemplateName.value();
|
||||
break;
|
||||
case VMware:
|
||||
tempalteName = KubernetesClusterVMwareTemplateName.value();
|
||||
templateName = KubernetesClusterVMwareTemplateName.value();
|
||||
break;
|
||||
case XenServer:
|
||||
tempalteName = KubernetesClusterXenserverTemplateName.value();
|
||||
templateName = KubernetesClusterXenserverTemplateName.value();
|
||||
break;
|
||||
}
|
||||
return templateDao.findValidByTemplateName(tempalteName);
|
||||
return templateDao.findValidByTemplateName(templateName);
|
||||
}
|
||||
|
||||
private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) {
|
||||
@ -516,7 +516,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
}
|
||||
boolean suitable_host_found = false;
|
||||
Cluster planCluster = null;
|
||||
for (int i = 1; i <= nodesCount + 1; i++) {
|
||||
for (int i = 1; i <= nodesCount; i++) {
|
||||
suitable_host_found = false;
|
||||
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
|
||||
Pair<HostVO, Integer> hp = hostEntry.getValue();
|
||||
@ -993,7 +993,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
try {
|
||||
deployDestination = plan(totalNodeCount, zone, serviceOffering);
|
||||
} catch (InsufficientCapacityException e) {
|
||||
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d cluster nodes in zone ID: %s with service offering ID: %s", totalNodeCount, zone.getUuid(), serviceOffering.getUuid()));
|
||||
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d nodes cluster in zone ID: %s with service offering ID: %s", totalNodeCount, zone.getUuid(), serviceOffering.getUuid()));
|
||||
}
|
||||
if (deployDestination == null || deployDestination.getCluster() == null) {
|
||||
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone ID: %s", zone.getUuid()));
|
||||
|
||||
@ -198,9 +198,7 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod
|
||||
}
|
||||
if (cleanupNetwork) { // if network has additional VM, cannot proceed with cluster destroy
|
||||
NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId());
|
||||
if (network == null) {
|
||||
logAndThrow(Level.ERROR, String.format("Failed to find network for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
|
||||
}
|
||||
if (network != null) {
|
||||
List<VMInstanceVO> networkVMs = vmInstanceDao.listNonRemovedVmsByTypeAndNetwork(network.getId(), VirtualMachine.Type.User);
|
||||
if (networkVMs.size() > clusterVMs.size()) {
|
||||
logAndThrow(Level.ERROR, String.format("Network ID: %s for Kubernetes cluster ID: %s has instances using it which are not part of the Kubernetes cluster", network.getUuid(), kubernetesCluster.getUuid()));
|
||||
@ -217,6 +215,9 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod
|
||||
logAndThrow(Level.ERROR, String.format("VM ID: %s which is not a part of Kubernetes cluster ID: %s is using Kubernetes cluster network ID: %s", vm.getUuid(), kubernetesCluster.getUuid(), network.getUuid()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.error(String.format("Failed to find network for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
|
||||
}
|
||||
}
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info(String.format("Destroying Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
|
||||
|
||||
@ -207,7 +207,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
||||
hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
|
||||
}
|
||||
boolean suitable_host_found = false;
|
||||
for (int i = 1; i <= nodesCount + 1; i++) {
|
||||
for (int i = 1; i <= nodesCount; i++) {
|
||||
suitable_host_found = false;
|
||||
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
|
||||
Pair<HostVO, Integer> hp = hostEntry.getValue();
|
||||
|
||||
@ -76,10 +76,19 @@ import com.google.common.base.Strings;
|
||||
|
||||
public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
|
||||
|
||||
private KubernetesSupportedVersion kubernetesClusterVersion;
|
||||
|
||||
public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
|
||||
super(kubernetesCluster, clusterManager);
|
||||
}
|
||||
|
||||
public KubernetesSupportedVersion getKubernetesClusterVersion() {
|
||||
if (kubernetesClusterVersion == null) {
|
||||
kubernetesClusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
|
||||
}
|
||||
return kubernetesClusterVersion;
|
||||
}
|
||||
|
||||
private Pair<String, Map<Long, Network.IpAddresses>> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException {
|
||||
String masterIp = null;
|
||||
Map<Long, Network.IpAddresses> requestedIps = null;
|
||||
@ -105,7 +114,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
||||
|
||||
private boolean isKubernetesVersionSupportsHA() {
|
||||
boolean haSupported = false;
|
||||
final KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
|
||||
KubernetesSupportedVersion version = getKubernetesClusterVersion();
|
||||
if (version != null) {
|
||||
try {
|
||||
if (KubernetesVersionManagerImpl.compareSemanticVersions(version.getSemanticVersion(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT) >= 0) {
|
||||
@ -161,6 +170,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
||||
KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
|
||||
}
|
||||
initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp);
|
||||
initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion());
|
||||
k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs);
|
||||
k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
||||
return k8sMasterConfig;
|
||||
|
||||
@ -181,10 +181,10 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
|
||||
throw new IllegalArgumentException(String.format("Invalid version comparision with versions %s, %s", v1, v2));
|
||||
}
|
||||
if(!isSemanticVersion(v1)) {
|
||||
throw new IllegalArgumentException(String.format("Invalid version format, %s", v1));
|
||||
throw new IllegalArgumentException(String.format("Invalid version format, %s. Semantic version should be specified in MAJOR.MINOR.PATCH format", v1));
|
||||
}
|
||||
if(!isSemanticVersion(v2)) {
|
||||
throw new IllegalArgumentException(String.format("Invalid version format, %s", v2));
|
||||
throw new IllegalArgumentException(String.format("Invalid version format, %s. Semantic version should be specified in MAJOR.MINOR.PATCH format", v2));
|
||||
}
|
||||
String[] thisParts = v1.split("\\.");
|
||||
String[] thatParts = v2.split("\\.");
|
||||
@ -287,10 +287,10 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
|
||||
final Integer minimumCpu = cmd.getMinimumCpu();
|
||||
final Integer minimumRamSize = cmd.getMinimumRamSize();
|
||||
if (minimumCpu == null || minimumCpu < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_CPU_NUMBER));
|
||||
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter. Minimum %d vCPUs required.", ApiConstants.MIN_CPU_NUMBER, KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU));
|
||||
}
|
||||
if (minimumRamSize == null || minimumRamSize < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_MEMORY));
|
||||
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter. Minimum %dMB memory required", ApiConstants.MIN_MEMORY, KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
|
||||
}
|
||||
if (compareSemanticVersions(semanticVersion, MIN_KUBERNETES_VERSION) < 0) {
|
||||
throw new InvalidParameterValueException(String.format("New supported Kubernetes version cannot be added as %s is minimum version supported by Kubernetes Service", MIN_KUBERNETES_VERSION));
|
||||
|
||||
@ -61,7 +61,7 @@ public class AddKubernetesSupportedVersionCmd extends BaseCmd implements AdminCm
|
||||
private String name;
|
||||
|
||||
@Parameter(name = ApiConstants.SEMANTIC_VERSION, type = CommandType.STRING, required = true,
|
||||
description = "the semantic version of the Kubernetes version")
|
||||
description = "the semantic version of the Kubernetes version. It needs to be specified in MAJOR.MINOR.PATCH format")
|
||||
private String semanticVersion;
|
||||
|
||||
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID,
|
||||
|
||||
@ -83,7 +83,14 @@ public class DeleteKubernetesSupportedVersionCmd extends BaseAsyncCmd implements
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "Deleting Kubernetes supported version " + getId();
|
||||
String description = "Deleting Kubernetes supported version";
|
||||
KubernetesSupportedVersion version = _entityMgr.findById(KubernetesSupportedVersion.class, getId());
|
||||
if (version != null) {
|
||||
description += String.format(" ID: %s", version.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ -259,7 +259,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
return "creating Kubernetes cluster. Cluster Id: " + getEntityId();
|
||||
return "Creating Kubernetes cluster. Cluster Id: " + getEntityId();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -102,8 +102,14 @@ public class DeleteKubernetesClusterCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
String description = "Deleting Kubernetes cluster";
|
||||
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
|
||||
return String.format("Deleting Kubernetes cluster ID: %s", cluster.getUuid());
|
||||
if (cluster != null) {
|
||||
description += String.format(" ID: %s", cluster.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -94,8 +94,14 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
String description = "Scaling Kubernetes cluster";
|
||||
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
|
||||
return String.format("Scaling Kubernetes cluster ID: %s", cluster.getUuid());
|
||||
if (cluster != null) {
|
||||
description += String.format(" ID: %s", cluster.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -73,8 +73,14 @@ public class StartKubernetesClusterCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
String description = "Starting Kubernetes cluster";
|
||||
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
|
||||
return String.format("Starting Kubernetes cluster ID: %s", cluster.getUuid());
|
||||
if (cluster != null) {
|
||||
description += String.format(" ID: %s", cluster.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -74,8 +74,14 @@ public class StopKubernetesClusterCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
String description = "Stopping Kubernetes cluster";
|
||||
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
|
||||
return String.format("Stopping Kubernetes cluster ID: %s", cluster.getUuid());
|
||||
if (cluster != null) {
|
||||
description += String.format(" ID: %s", cluster.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -84,8 +84,14 @@ public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventDescription() {
|
||||
String description = "Upgrading Kubernetes cluster";
|
||||
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
|
||||
return String.format("Upgrading Kubernetes cluster ID: %s", cluster.getUuid());
|
||||
if (cluster != null) {
|
||||
description += String.format(" ID: %s", cluster.getUuid());
|
||||
} else {
|
||||
description += String.format(" ID: %d", getId());
|
||||
}
|
||||
return description;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -86,7 +86,7 @@ if [ $? -ne 0 ]; then
|
||||
fi
|
||||
fi
|
||||
mkdir -p "${working_dir}/docker"
|
||||
output=`${k8s_dir}/kubeadm config images list`
|
||||
output=`${k8s_dir}/kubeadm config images list --kubernetes-version=${RELEASE}`
|
||||
while read -r line; do
|
||||
echo "Downloading docker image $line ---"
|
||||
sudo docker pull "$line"
|
||||
|
||||
@ -89,10 +89,12 @@ import com.cloud.exception.AffinityConflictException;
|
||||
import com.cloud.exception.ConnectionException;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.gpu.GPU;
|
||||
import com.cloud.host.DetailVO;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.host.dao.HostDetailsDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
@ -102,6 +104,7 @@ import com.cloud.resource.ResourceState;
|
||||
import com.cloud.service.ServiceOfferingDetailsVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
@ -167,6 +170,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
private long _hostReservationReleasePeriod = 60L * 60L * 1000L; // one hour by default
|
||||
@Inject
|
||||
protected VMReservationDao _reservationDao;
|
||||
@Inject
|
||||
HostDetailsDao _hostDetailsDao;
|
||||
|
||||
private static final long INITIAL_RESERVATION_RELEASE_CHECKER_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds
|
||||
protected long _nodeId = -1;
|
||||
@ -413,14 +418,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
}
|
||||
} else {
|
||||
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
|
||||
boolean hostTagsMatch = true;
|
||||
if(offering.getHostTag() != null){
|
||||
_hostDao.loadHostTags(host);
|
||||
if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
|
||||
hostTagsMatch = false;
|
||||
}
|
||||
}
|
||||
if (hostTagsMatch) {
|
||||
if (checkVmProfileAndHost(vmProfile, host)) {
|
||||
long cluster_id = host.getClusterId();
|
||||
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
|
||||
"cpuOvercommitRatio");
|
||||
@ -491,8 +489,6 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
} else {
|
||||
s_logger.debug("The last host of this VM does not have enough capacity");
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Service Offering host tag does not match the last host of this VM");
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +
|
||||
@ -571,6 +567,31 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean checkVmProfileAndHost(final VirtualMachineProfile vmProfile, final HostVO host) {
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
if (offering.getHostTag() != null) {
|
||||
_hostDao.loadHostTags(host);
|
||||
if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
|
||||
s_logger.debug("Service Offering host tag does not match the last host of this VM");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
long guestOSId = vmProfile.getTemplate().getGuestOSId();
|
||||
GuestOSVO guestOS = _guestOSDao.findById(guestOSId);
|
||||
if (guestOS != null) {
|
||||
long guestOSCategoryId = guestOS.getCategoryId();
|
||||
DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), "guest.os.category.id");
|
||||
if (hostDetail != null) {
|
||||
String guestOSCategoryIdString = hostDetail.getValue();
|
||||
if (String.valueOf(guestOSCategoryId) != guestOSCategoryIdString) {
|
||||
s_logger.debug("The last host has different guest.os.category.id than guest os category of VM, skipping");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) {
|
||||
boolean isExplicit = false;
|
||||
|
||||
@ -19,29 +19,40 @@
|
||||
#Import Local Modules
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
|
||||
from marvin.cloudstackAPI import (listInfrastructure,
|
||||
listTemplates,
|
||||
listKubernetesSupportedVersions,
|
||||
addKubernetesSupportedVersion,
|
||||
deleteKubernetesSupportedVersion,
|
||||
listKubernetesClusters,
|
||||
createKubernetesCluster,
|
||||
stopKubernetesCluster,
|
||||
startKubernetesCluster,
|
||||
deleteKubernetesCluster,
|
||||
upgradeKubernetesCluster,
|
||||
scaleKubernetesCluster)
|
||||
scaleKubernetesCluster,
|
||||
destroyVirtualMachine,
|
||||
deleteNetwork)
|
||||
from marvin.cloudstackException import CloudstackAPIException
|
||||
from marvin.codes import FAILED
|
||||
from marvin.codes import PASS, FAILED
|
||||
from marvin.lib.base import (Template,
|
||||
ServiceOffering,
|
||||
Account,
|
||||
Configurations)
|
||||
from marvin.lib.utils import (cleanup_resources,
|
||||
validateList,
|
||||
random_gen)
|
||||
from marvin.lib.common import (get_zone)
|
||||
from marvin.lib.common import (get_zone,
|
||||
get_domain)
|
||||
from marvin.sshClient import SshClient
|
||||
from nose.plugins.attrib import attr
|
||||
from marvin.lib.decoratorGenerators import skipTestIf
|
||||
|
||||
import time
|
||||
|
||||
_multiprocess_shared_ = True
|
||||
|
||||
k8s_cluster = None
|
||||
|
||||
class TestKubernetesCluster(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
@ -54,8 +65,14 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
|
||||
cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower()
|
||||
|
||||
cls.hypervisorNotSupported = False
|
||||
if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
cls.hypervisorNotSupported = True
|
||||
cls.setup_failed = False
|
||||
cls._cleanup = []
|
||||
cls.kubernetes_version_ids = []
|
||||
|
||||
if cls.hypervisorNotSupported == False:
|
||||
cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
|
||||
name="cloud.kubernetes.service.enabled")[0].value
|
||||
if cls.initial_configuration_cks_enabled not in ["true", True]:
|
||||
@ -69,96 +86,68 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
cls.initial_configuration_cks_template_name = None
|
||||
cls.cks_service_offering = None
|
||||
|
||||
cls.kubernetes_version_ids = []
|
||||
if cls.setup_failed == False:
|
||||
try:
|
||||
cls.kubernetes_version_1 = cls.addKubernetesSupportedVersion('1.14.9', 'http://download.cloudstack.org/cks/setup-1.14.9.iso')
|
||||
cls.kubernetes_version_1 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.14.9"])
|
||||
cls.kubernetes_version_ids.append(cls.kubernetes_version_1.id)
|
||||
except Exception as e:
|
||||
cls.setup_failed = True
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, http://download.cloudstack.org/cks/setup-1.14.9.iso, %s" % e)
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
||||
(cls.services["cks_kubernetes_versions"]["1.14.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.14.9"]["url"], e))
|
||||
if cls.setup_failed == False:
|
||||
try:
|
||||
cls.kubernetes_version_2 = cls.addKubernetesSupportedVersion('1.15.0', 'http://download.cloudstack.org/cks/setup-1.15.0.iso')
|
||||
cls.kubernetes_version_2 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"])
|
||||
cls.kubernetes_version_ids.append(cls.kubernetes_version_2.id)
|
||||
except Exception as e:
|
||||
cls.setup_failed = True
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, http://download.cloudstack.org/cks/setup-1.15.0.iso, %s" % e)
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
||||
(cls.services["cks_kubernetes_versions"]["1.15.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.15.0"]["url"], e))
|
||||
if cls.setup_failed == False:
|
||||
try:
|
||||
cls.kubernetes_version_3 = cls.addKubernetesSupportedVersion('1.16.0', 'http://download.cloudstack.org/cks/setup-1.16.0.iso')
|
||||
cls.kubernetes_version_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"])
|
||||
cls.kubernetes_version_ids.append(cls.kubernetes_version_3.id)
|
||||
except Exception as e:
|
||||
cls.setup_failed = True
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, http://download.cloudstack.org/cks/setup-1.16.0.iso, %s" % e)
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
||||
(cls.services["cks_kubernetes_versions"]["1.16.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.0"]["url"], e))
|
||||
if cls.setup_failed == False:
|
||||
try:
|
||||
cls.kubernetes_version_4 = cls.addKubernetesSupportedVersion('1.16.3', 'http://download.cloudstack.org/cks/setup-1.16.3.iso')
|
||||
cls.kubernetes_version_4 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"])
|
||||
cls.kubernetes_version_ids.append(cls.kubernetes_version_4.id)
|
||||
except Exception as e:
|
||||
cls.setup_failed = True
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, http://download.cloudstack.org/cks/setup-1.16.3.iso, %s" % e)
|
||||
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
||||
(cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e))
|
||||
|
||||
cks_template_data = {
|
||||
"name": "Kubernetes-Service-Template",
|
||||
"displaytext": "Kubernetes-Service-Template",
|
||||
"format": "qcow2",
|
||||
"hypervisor": "kvm",
|
||||
"ostype": "CoreOS",
|
||||
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2",
|
||||
"ispublic": "True",
|
||||
"isextractable": "True"
|
||||
}
|
||||
cks_template_data_details = []
|
||||
if cls.hypervisor.lower() == "vmware":
|
||||
cks_template_data["url"] = "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova"
|
||||
cks_template_data["format"] = "OVA"
|
||||
cks_template_data_details = [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}]
|
||||
elif cls.hypervisor.lower() == "xenserver":
|
||||
cks_template_data["url"] = "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2"
|
||||
cks_template_data["format"] = "VHD"
|
||||
elif cls.hypervisor.lower() == "kvm":
|
||||
cks_template_data["requireshvm"] = "True"
|
||||
if cls.setup_failed == False:
|
||||
cls.cks_template = Template.register(
|
||||
cls.apiclient,
|
||||
cks_template_data,
|
||||
zoneid=cls.zone.id,
|
||||
hypervisor=cls.hypervisor,
|
||||
details=cks_template_data_details
|
||||
)
|
||||
cls.debug("Waiting for CKS template with ID %s to be ready" % cls.cks_template.id)
|
||||
try:
|
||||
cls.waitForTemplateReadyState(cls.cks_template.id)
|
||||
except Exception as e:
|
||||
cls.cks_template = cls.getKubernetesTemplate()
|
||||
if cls.cks_template == FAILED:
|
||||
assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor
|
||||
cls.setup_failed = True
|
||||
cls.debug("Failed to get CKS template in ready state, {}, {}".format(cks_template_data["url"], e))
|
||||
else:
|
||||
cls._cleanup.append(cls.cks_template)
|
||||
|
||||
if cls.setup_failed == False:
|
||||
cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient,
|
||||
name=cls.cks_template_name_key)[0].value
|
||||
Configurations.update(cls.apiclient,
|
||||
cls.cks_template_name_key,
|
||||
cls.cks_template.name)
|
||||
|
||||
cks_offering_data = {
|
||||
"name": "CKS-Instance",
|
||||
"displaytext": "CKS Instance",
|
||||
"cpunumber": 2,
|
||||
"cpuspeed": 1000,
|
||||
"memory": 2048,
|
||||
}
|
||||
cks_offering_data["name"] = cks_offering_data["name"] + '-' + random_gen()
|
||||
if cls.setup_failed == False:
|
||||
cks_offering_data = cls.services["cks_service_offering"]
|
||||
cks_offering_data["name"] = 'CKS-Instance-' + random_gen()
|
||||
cls.cks_service_offering = ServiceOffering.create(
|
||||
cls.apiclient,
|
||||
cks_offering_data
|
||||
)
|
||||
|
||||
cls._cleanup = []
|
||||
if cls.cks_template != None:
|
||||
cls._cleanup.append(cls.cks_template)
|
||||
if cls.cks_service_offering != None:
|
||||
cls._cleanup.append(cls.cks_service_offering)
|
||||
cls.domain = get_domain(cls.apiclient)
|
||||
cls.account = Account.create(
|
||||
cls.apiclient,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls._cleanup.append(cls.account)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
@ -173,14 +162,12 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e)
|
||||
try:
|
||||
# Restore original CKS template
|
||||
if cls.initial_configuration_cks_template_name != None:
|
||||
if cls.cks_template != None:
|
||||
cls.cks_template.delete(cls.apiclient)
|
||||
if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None:
|
||||
Configurations.update(cls.apiclient,
|
||||
cls.cks_template_name_key,
|
||||
cls.initial_configuration_cks_template_name)
|
||||
# Delete created CKS template
|
||||
if cls.setup_failed == False and cls.cks_template != None:
|
||||
cls.cks_template.delete(cls.apiclient,
|
||||
cls.zone.id)
|
||||
# Restore CKS enabled
|
||||
if cls.initial_configuration_cks_enabled not in ["true", True]:
|
||||
cls.debug("Restoring Kubernetes Service enabled value")
|
||||
@ -231,28 +218,39 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def waitForTemplateReadyState(cls, template_id, retries=30, interval=60):
|
||||
"""Check if template download will finish"""
|
||||
while retries > 0:
|
||||
time.sleep(interval)
|
||||
template_response = Template.list(
|
||||
cls.apiclient,
|
||||
id=template_id,
|
||||
zoneid=cls.zone.id,
|
||||
templatefilter='self'
|
||||
)
|
||||
def getKubernetesTemplate(cls, cks_templates=None):
|
||||
|
||||
if isinstance(template_response, list):
|
||||
template = template_response[0]
|
||||
if not hasattr(template, 'status') or not template or not template.status:
|
||||
retries = retries - 1
|
||||
continue
|
||||
if 'Failed' == template.status:
|
||||
raise Exception("Failed to download template: status - %s" % template.status)
|
||||
elif template.status == 'Download Complete' and template.isready:
|
||||
return
|
||||
retries = retries - 1
|
||||
raise Exception("Template download timed out")
|
||||
if cks_templates is None:
|
||||
cks_templates = cls.services["cks_templates"]
|
||||
|
||||
hypervisor = cls.hypervisor.lower()
|
||||
|
||||
if hypervisor not in cks_templates.keys():
|
||||
cls.debug("Provided hypervisor has no CKS template")
|
||||
return FAILED
|
||||
|
||||
cks_template = cks_templates[hypervisor]
|
||||
|
||||
cmd = listTemplates.listTemplatesCmd()
|
||||
cmd.name = cks_template['name']
|
||||
cmd.templatefilter = 'all'
|
||||
cmd.zoneid = cls.zone.id
|
||||
cmd.hypervisor = hypervisor
|
||||
templates = cls.apiclient.listTemplates(cmd)
|
||||
|
||||
if validateList(templates)[0] != PASS:
|
||||
details = None
|
||||
if hypervisor in ["vmware"] and "details" in cks_template:
|
||||
details = cks_template["details"]
|
||||
template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details)
|
||||
template.download(cls.apiclient)
|
||||
return template
|
||||
|
||||
for template in templates:
|
||||
if template.isready and template.ispublic:
|
||||
return Template(template.__dict__)
|
||||
|
||||
return FAILED
|
||||
|
||||
@classmethod
|
||||
def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60):
|
||||
@ -279,13 +277,13 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
return versionResponse[0]
|
||||
|
||||
@classmethod
|
||||
def addKubernetesSupportedVersion(cls, semantic_version, iso_url):
|
||||
def addKubernetesSupportedVersion(cls, version_service):
|
||||
addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd()
|
||||
addKubernetesSupportedVersionCmd.semanticversion = semantic_version
|
||||
addKubernetesSupportedVersionCmd.name = 'v' + semantic_version + '-' + random_gen()
|
||||
addKubernetesSupportedVersionCmd.url = iso_url
|
||||
addKubernetesSupportedVersionCmd.mincpunumber = 2
|
||||
addKubernetesSupportedVersionCmd.minmemory = 2048
|
||||
addKubernetesSupportedVersionCmd.semanticversion = version_service["semanticversion"]
|
||||
addKubernetesSupportedVersionCmd.name = 'v' + version_service["semanticversion"] + '-' + random_gen()
|
||||
addKubernetesSupportedVersionCmd.url = version_service["url"]
|
||||
addKubernetesSupportedVersionCmd.mincpunumber = version_service["mincpunumber"]
|
||||
addKubernetesSupportedVersionCmd.minmemory = version_service["minmemory"]
|
||||
kubernetes_version = cls.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd)
|
||||
cls.debug("Waiting for Kubernetes version with ID %s to be ready" % kubernetes_version.id)
|
||||
cls.waitForKubernetesSupportedVersionIsoReadyState(kubernetes_version.id)
|
||||
@ -308,14 +306,13 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
#Clean up, terminate the created templates
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_01_deploy_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster
|
||||
|
||||
@ -324,75 +321,156 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 3. stopKubernetesCluster should stop the cluster
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster()
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id)
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % k8s_cluster.id)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_2.id)
|
||||
self.stopAndVerifyKubernetesCluster(k8s_cluster.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % cluster_response.id)
|
||||
self.debug("Kubernetes cluster with ID: %s successfully stopped, now starting it again" % k8s_cluster.id)
|
||||
|
||||
self.stopAndVerifyKubernetesCluster(cluster_response.id)
|
||||
try:
|
||||
k8s_cluster = self.startKubernetesCluster(k8s_cluster.id)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Failed to start Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully stopped, now deleting it" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
self.verifyKubernetesClusterState(k8s_cluster, 'Running')
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_02_deploy_kubernetes_ha_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_02_invalid_upgrade_kubernetes_cluster(self):
|
||||
"""Test to check for failure while tying to upgrade a Kubernetes cluster to a lower version
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 1. upgradeKubernetesCluster should fail
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster()
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_3.id, 1, 2)
|
||||
self.debug("Upgrading Kubernetes cluster with ID: %s to a lower version" % k8s_cluster.id)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_3.id, 1, 2)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now deleting it" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
try:
|
||||
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1.id)
|
||||
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kubernetes_version_1.id)
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.")
|
||||
except Exception as e:
|
||||
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_03_deploy_invalid_kubernetes_ha_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_03_deploy_and_upgrade_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster and upgrade it to newer version
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 1. upgradeKubernetesCluster should return valid info for the cluster
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster()
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||
|
||||
try:
|
||||
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_3.id)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_3.id)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_04_deploy_and_scale_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster and check for failure while tying to scale it
|
||||
|
||||
# Validate the following:
|
||||
# 1. scaleKubernetesCluster should return valid info for the cluster when it is scaled up
|
||||
# 2. scaleKubernetesCluster should return valid info for the cluster when it is scaled down
|
||||
"""
|
||||
if self.setup_failed == True:
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster()
|
||||
|
||||
self.debug("Upscaling Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||
|
||||
try:
|
||||
k8s_cluster = self.scaleKubernetesCluster(k8s_cluster.id, 2)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Failed to upscale Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterScale(k8s_cluster, 2)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % k8s_cluster.id)
|
||||
|
||||
try:
|
||||
k8s_cluster = self.scaleKubernetesCluster(k8s_cluster.id, 1)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Failed to downscale Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterScale(k8s_cluster)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully downscaled" % k8s_cluster.id)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_05_delete_kubernetes_cluster(self):
|
||||
"""Test to delete an existing Kubernetes cluster
|
||||
|
||||
# Validate the following:
|
||||
# 1. deleteKubernetesCluster should delete an existing Kubernetes cluster
|
||||
"""
|
||||
if self.setup_failed == True:
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster()
|
||||
|
||||
self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id)
|
||||
|
||||
k8s_cluster = None
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_06_deploy_invalid_kubernetes_ha_cluster(self):
|
||||
"""Test to deploy an invalid HA Kubernetes cluster
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should fail as version doesn't support HA
|
||||
"""
|
||||
if self.setup_failed == True:
|
||||
self.fail("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
|
||||
try:
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id, 1, 2)
|
||||
self.debug("Invslid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id)
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id)
|
||||
self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True)
|
||||
self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.")
|
||||
except CloudstackAPIException as e:
|
||||
self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
|
||||
@ -400,174 +478,79 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_04_deploy_and_upgrade_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster and upgrade it to newer version
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_07_deploy_kubernetes_ha_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 3. upgradeKubernetesCluster should return valid info for the cluster
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster(1, 2)
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_2.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id)
|
||||
|
||||
try:
|
||||
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kubernetes_version_3.id)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterUpgrade(cluster_response, self.kubernetes_version_3.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
self.debug("HA Kubernetes cluster with ID: %s successfully deployed" % k8s_cluster.id)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_05_deploy_and_upgrade_kubernetes_ha_cluster(self):
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self):
|
||||
"""Test to deploy a new HA Kubernetes cluster and upgrade it to newer version
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 3. upgradeKubernetesCluster should return valid info for the cluster
|
||||
# 1. upgradeKubernetesCluster should return valid info for the cluster
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_3.id, 1, 2)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_3.id, 1, 2)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id)
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster(1, 2)
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||
try:
|
||||
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kubernetes_version_4.id)
|
||||
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||
self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterUpgrade(cluster_response, self.kubernetes_version_4.id)
|
||||
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_4.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_06_deploy_and_invalid_upgrade_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster and check for failure while tying to upgrade it to a lower version
|
||||
@skipTestIf("hypervisorNotSupported")
|
||||
def test_09_delete_kubernetes_ha_cluster(self):
|
||||
"""Test to delete a HA Kubernetes cluster
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 3. upgradeKubernetesCluster should fail
|
||||
# 1. deleteKubernetesCluster should delete an existing HA Kubernetes cluster
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
self.fail("Setup incomplete")
|
||||
global k8s_cluster
|
||||
k8s_cluster = self.getValidKubernetesCluster(1, 2)
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id)
|
||||
self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_2.id)
|
||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now scaling it" % cluster_response.id)
|
||||
|
||||
try:
|
||||
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kubernetes_version_1.id)
|
||||
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kubernetes_version_1.id)
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.")
|
||||
except Exception as e:
|
||||
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
|
||||
|
||||
self.debug("Deleting Kubernetes cluster with ID: %s" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||
def test_07_deploy_and_scale_kubernetes_cluster(self):
|
||||
"""Test to deploy a new Kubernetes cluster and check for failure while tying to scale it
|
||||
|
||||
# Validate the following:
|
||||
# 1. createKubernetesCluster should return valid info for new cluster
|
||||
# 2. The Cloud Database contains the valid information
|
||||
# 3. scaleKubernetesCluster should return valid info for the cluster when it is scaled up
|
||||
# 4. scaleKubernetesCluster should return valid info for the cluster when it is scaled down
|
||||
"""
|
||||
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
|
||||
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
|
||||
if self.setup_failed == True:
|
||||
self.skipTest("Setup incomplete")
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
|
||||
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id)
|
||||
|
||||
self.verifyKubernetesCluster(cluster_response, name, self.kubernetes_version_2.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upscaling it" % cluster_response.id)
|
||||
|
||||
try:
|
||||
cluster_response = self.scaleKubernetesCluster(cluster_response.id, 2)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.fail("Failed to upscale Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterScale(cluster_response, 2)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % cluster_response.id)
|
||||
|
||||
try:
|
||||
cluster_response = self.scaleKubernetesCluster(cluster_response.id, 1)
|
||||
except Exception as e:
|
||||
self.deleteKubernetesCluster(cluster_response.id)
|
||||
self.fail("Failed to downscale Kubernetes cluster due to: %s" % e)
|
||||
|
||||
self.verifyKubernetesClusterScale(cluster_response)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully downscaled, now deleting it" % cluster_response.id)
|
||||
|
||||
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
|
||||
|
||||
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
|
||||
|
||||
return
|
||||
|
||||
def listKubernetesCluster(self, cluster_id):
|
||||
def listKubernetesCluster(self, cluster_id = None):
|
||||
listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd()
|
||||
if cluster_id != None:
|
||||
listKubernetesClustersCmd.id = cluster_id
|
||||
clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd)
|
||||
if cluster_id != None and clusterResponse != None:
|
||||
return clusterResponse[0]
|
||||
return clusterResponse
|
||||
|
||||
def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1):
|
||||
createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd()
|
||||
@ -579,6 +562,8 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
createKubernetesClusterCmd.serviceofferingid = self.cks_service_offering.id
|
||||
createKubernetesClusterCmd.zoneid = self.zone.id
|
||||
createKubernetesClusterCmd.noderootdisksize = 10
|
||||
createKubernetesClusterCmd.account = self.account.name
|
||||
createKubernetesClusterCmd.domainid = self.domain.id
|
||||
clusterResponse = self.apiclient.createKubernetesCluster(createKubernetesClusterCmd)
|
||||
if not clusterResponse:
|
||||
self.cleanup.append(clusterResponse)
|
||||
@ -590,6 +575,12 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd)
|
||||
return response
|
||||
|
||||
def startKubernetesCluster(self, cluster_id):
|
||||
startKubernetesClusterCmd = startKubernetesCluster.startKubernetesClusterCmd()
|
||||
startKubernetesClusterCmd.id = cluster_id
|
||||
response = self.apiclient.startKubernetesCluster(startKubernetesClusterCmd)
|
||||
return response
|
||||
|
||||
def deleteKubernetesCluster(self, cluster_id):
|
||||
deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd()
|
||||
deleteKubernetesClusterCmd.id = cluster_id
|
||||
@ -610,17 +601,57 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd)
|
||||
return response
|
||||
|
||||
def verifyKubernetesCluster(self, cluster_response, name, version_id, size=1, master_nodes=1):
|
||||
def getValidKubernetesCluster(self, size=1, master_nodes=1):
|
||||
cluster = k8s_cluster
|
||||
version = self.kubernetes_version_2
|
||||
if master_nodes != 1:
|
||||
version = self.kubernetes_version_3
|
||||
valid = True
|
||||
if cluster == None:
|
||||
valid = False
|
||||
self.debug("No existing cluster available, k8s_cluster: %s" % cluster)
|
||||
if valid == True and cluster.id == None:
|
||||
valid = False
|
||||
self.debug("ID for existing cluster not found, k8s_cluster ID: %s" % cluster.id)
|
||||
if valid == True:
|
||||
cluster_id = cluster.id
|
||||
cluster = self.listKubernetesCluster(cluster_id)
|
||||
if cluster == None:
|
||||
valid = False
|
||||
self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id)
|
||||
if valid == True:
|
||||
try:
|
||||
self.verifyKubernetesCluster(cluster, cluster.name, None, size, master_nodes)
|
||||
self.debug("Existing Kubernetes cluster available with name %s" % cluster.name)
|
||||
except AssertionError as error:
|
||||
valid = False
|
||||
self.debug("Existing cluster failed verification due to %s, need to deploy a new one" % error)
|
||||
if valid == False:
|
||||
name = 'testcluster-' + random_gen()
|
||||
self.debug("Creating for Kubernetes cluster with name %s" % name)
|
||||
try:
|
||||
self.deleteAllLeftoverClusters()
|
||||
cluster = self.createKubernetesCluster(name, version.id, size, master_nodes)
|
||||
self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes)
|
||||
except Exception as ex:
|
||||
self.fail("Kubernetes cluster deployment failed: %s" % ex)
|
||||
except AssertionError as err:
|
||||
self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err)
|
||||
return cluster
|
||||
|
||||
def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, master_nodes=1):
|
||||
"""Check if Kubernetes cluster is valid"""
|
||||
|
||||
self.verifyKubernetesClusterState(cluster_response, 'Running')
|
||||
|
||||
if name != None:
|
||||
self.assertEqual(
|
||||
cluster_response.name,
|
||||
name,
|
||||
"Check KubernetesCluster name {}, {}".format(cluster_response.name, name)
|
||||
)
|
||||
|
||||
if version_id != None:
|
||||
self.verifyKubernetesClusterVersion(cluster_response, version_id)
|
||||
|
||||
self.assertEqual(
|
||||
@ -703,11 +734,35 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
"KubernetesCluster not stopped in DB, {}".format(db_cluster_state)
|
||||
)
|
||||
|
||||
def deleteAndVerifyKubernetesCluster(self, cluster_id):
|
||||
def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False):
|
||||
"""Delete Kubernetes cluster and check if it is really deleted"""
|
||||
|
||||
forceDeleted = False
|
||||
try:
|
||||
delete_response = self.deleteKubernetesCluster(cluster_id)
|
||||
except Exception as e:
|
||||
if forced:
|
||||
cluster = self.listKubernetesCluster(cluster_id)
|
||||
if cluster != None:
|
||||
if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']:
|
||||
self.stopKubernetesCluster(cluster_id)
|
||||
self.deleteKubernetesCluster(cluster_id)
|
||||
else:
|
||||
forceDeleted = True
|
||||
for cluster_vm_id in cluster.virtualmachineids:
|
||||
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
cmd.id = cluster_vm_id
|
||||
cmd.expunge = True
|
||||
self.apiclient.destroyVirtualMachine(cmd)
|
||||
cmd = deleteNetwork.deleteNetworkCmd()
|
||||
cmd.id = cluster.networkid
|
||||
cmd.forced = True
|
||||
self.apiclient.deleteNetwork(cmd)
|
||||
self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id)
|
||||
else:
|
||||
raise Exception("Error: Exception during delete cluster : %s" % e)
|
||||
|
||||
if verify == True and forceDeleted == False:
|
||||
self.assertEqual(
|
||||
delete_response.success,
|
||||
True,
|
||||
@ -721,3 +776,9 @@ class TestKubernetesCluster(cloudstackTestCase):
|
||||
None,
|
||||
"KubernetesCluster not removed in DB, {}".format(db_cluster_removed)
|
||||
)
|
||||
|
||||
def deleteAllLeftoverClusters(self):
|
||||
clusters = self.listKubernetesCluster()
|
||||
if clusters != None:
|
||||
for cluster in clusters:
|
||||
self.deleteKubernetesClusterAndVerify(cluster.id, False, True)
|
||||
|
||||
@ -130,12 +130,12 @@ class TestKubernetesSupportedVersion(cloudstackTestCase):
|
||||
# 2. The Cloud Database contains the valid information when listKubernetesSupportedVersions is called
|
||||
"""
|
||||
|
||||
version = '1.16.3'
|
||||
name = 'v' + version + '-' + random_gen()
|
||||
version = self.services["cks_kubernetes_versions"]["1.16.3"]
|
||||
name = 'v' + version["semanticversion"] + '-' + random_gen()
|
||||
|
||||
self.debug("Adding Kubernetes supported version with name: %s" % name)
|
||||
|
||||
version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url)
|
||||
version_response = self.addKubernetesSupportedVersion(version["semanticversion"], name, self.zone.id, version["url"], version["mincpunumber"], version["minmemory"])
|
||||
|
||||
list_versions_response = self.listKubernetesSupportedVersion(version_response.id)
|
||||
|
||||
@ -147,8 +147,8 @@ class TestKubernetesSupportedVersion(cloudstackTestCase):
|
||||
|
||||
self.assertEqual(
|
||||
list_versions_response.semanticversion,
|
||||
version,
|
||||
"Check KubernetesSupportedVersion version {}, {}".format(list_versions_response.semanticversion, version)
|
||||
version["semanticversion"],
|
||||
"Check KubernetesSupportedVersion version {}, {}".format(list_versions_response.semanticversion, version["semanticversion"])
|
||||
)
|
||||
self.assertEqual(
|
||||
list_versions_response.zoneid,
|
||||
@ -228,14 +228,14 @@ class TestKubernetesSupportedVersion(cloudstackTestCase):
|
||||
self.debug("Unsupported version error check successful, API failure: %s" % e)
|
||||
return
|
||||
|
||||
def addKubernetesSupportedVersion(self, version, name, zoneId, isoUrl):
|
||||
def addKubernetesSupportedVersion(self, version, name, zoneId, isoUrl, mincpunumber=2, minmemory=2048):
|
||||
addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd()
|
||||
addKubernetesSupportedVersionCmd.semanticversion = version
|
||||
addKubernetesSupportedVersionCmd.name = name
|
||||
addKubernetesSupportedVersionCmd.zoneid = zoneId
|
||||
addKubernetesSupportedVersionCmd.url = isoUrl
|
||||
addKubernetesSupportedVersionCmd.mincpunumber = 2
|
||||
addKubernetesSupportedVersionCmd.minmemory = 2048
|
||||
addKubernetesSupportedVersionCmd.mincpunumber = mincpunumber
|
||||
addKubernetesSupportedVersionCmd.minmemory = minmemory
|
||||
versionResponse = self.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd)
|
||||
if not versionResponse:
|
||||
self.cleanup.append(versionResponse)
|
||||
|
||||
@ -1961,5 +1961,73 @@ test_data = {
|
||||
"ostype": 'CentOS 5.3 (64-bit)',
|
||||
"mode": 'HTTP_DOWNLOAD'
|
||||
}
|
||||
},
|
||||
"cks_kubernetes_versions": {
|
||||
"1.14.9": {
|
||||
"semanticversion": "1.14.9",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.14.9.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
},
|
||||
"1.15.0": {
|
||||
"semanticversion": "1.15.0",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.15.0.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
},
|
||||
"1.16.0": {
|
||||
"semanticversion": "1.16.0",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.16.0.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
},
|
||||
"1.16.3": {
|
||||
"semanticversion": "1.16.3",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.16.3.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
}
|
||||
},
|
||||
"cks_templates": {
|
||||
"kvm": {
|
||||
"name": "Kubernetes-Service-Template-kvm",
|
||||
"displaytext": "Kubernetes-Service-Template kvm",
|
||||
"format": "qcow2",
|
||||
"hypervisor": "kvm",
|
||||
"ostype": "CoreOS",
|
||||
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2",
|
||||
"requireshvm": "True",
|
||||
"ispublic": "True",
|
||||
"isextractable": "True"
|
||||
},
|
||||
"xenserver": {
|
||||
"name": "Kubernetes-Service-Template-xen",
|
||||
"displaytext": "Kubernetes-Service-Template xen",
|
||||
"format": "vhd",
|
||||
"hypervisor": "xenserver",
|
||||
"ostype": "CoreOS",
|
||||
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2",
|
||||
"requireshvm": "True",
|
||||
"ispublic": "True",
|
||||
"isextractable": "True"
|
||||
},
|
||||
"vmware": {
|
||||
"name": "Kubernetes-Service-Template-vmware",
|
||||
"displaytext": "Kubernetes-Service-Template vmware",
|
||||
"format": "ova",
|
||||
"hypervisor": "vmware",
|
||||
"ostype": "CoreOS",
|
||||
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova",
|
||||
"requireshvm": "True",
|
||||
"ispublic": "True",
|
||||
"details": [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}]
|
||||
}
|
||||
},
|
||||
"cks_service_offering": {
|
||||
"name": "CKS-Instance",
|
||||
"displaytext": "CKS Instance",
|
||||
"cpunumber": 2,
|
||||
"cpuspeed": 1000,
|
||||
"memory": 2048
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user