diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 1c6fd8b9e94..384293f4e08 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -225,8 +225,8 @@ public class ApiConstants { public static final String LOCK = "lock"; public static final String LUN = "lun"; public static final String LBID = "lbruleid"; - public static final String MAX = "max"; public static final String MAC_ADDRESS = "macaddress"; + public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; public static final String MAX_CPU_NUMBER = "maxcpunumber"; public static final String MAX_MEMORY = "maxmemory"; @@ -782,6 +782,23 @@ public class ApiConstants { public static final String LAST_UPDATED = "lastupdated"; public static final String PERFORM_FRESH_CHECKS = "performfreshchecks"; + public static final String CONSOLE_END_POINT = "consoleendpoint"; + public static final String EXTERNAL_LOAD_BALANCER_IP_ADDRESS = "externalloadbalanceripaddress"; + public static final String DOCKER_REGISTRY_USER_NAME = "dockerregistryusername"; + public static final String DOCKER_REGISTRY_PASSWORD = "dockerregistrypassword"; + public static final String DOCKER_REGISTRY_URL = "dockerregistryurl"; + public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail"; + public static final String ISO_NAME = "isoname"; + public static final String ISO_STATE = "isostate"; + public static final String SEMANTIC_VERSION = "semanticversion"; + public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; + public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; + public static final String MASTER_NODES = "masternodes"; + public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; + public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; + public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; + public static final String SUPPORTS_HA = "supportsha"; + public enum HostDetails { all, capacity, events, stats, min; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index 3f1b9a23de3..566be64cc94 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -61,6 +61,8 @@ public class ListCapabilitiesCmd extends BaseCmd { response.setAllowUserExpungeRecoverVM((Boolean)capabilities.get("allowUserExpungeRecoverVM")); response.setAllowUserExpungeRecoverVolume((Boolean)capabilities.get("allowUserExpungeRecoverVolume")); response.setAllowUserViewAllDomainAccounts((Boolean)capabilities.get("allowUserViewAllDomainAccounts")); + response.setKubernetesServiceEnabled((Boolean)capabilities.get("kubernetesServiceEnabled")); + response.setKubernetesClusterExperimentalFeaturesEnabled((Boolean)capabilities.get("kubernetesClusterExperimentalFeaturesEnabled")); if (capabilities.containsKey("apiLimitInterval")) { response.setApiLimitInterval((Integer)capabilities.get("apiLimitInterval")); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java index 103e9227c24..b38a24f8ad7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java @@ -61,6 +61,10 @@ public class DeleteIsoCmd extends BaseAsyncCmd { return id; } + public void setId(Long id) { + this.id = id; + } + public Long getZoneId() { return zoneId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index a06b54f1a01..1c1a767aa3b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -127,10 +127,18 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { return bootable; } + public void setBootable(Boolean bootable) { + this.bootable = bootable; + } + public String getDisplayText() { return displayText; } + public void setDisplayText(String displayText) { + this.displayText = displayText; + } + public Boolean isFeatured() { return featured; } @@ -139,6 +147,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { return publicIso; } + public void setPublic(Boolean publicIso) { + this.publicIso = publicIso; + } + public Boolean isExtractable() { return extractable; } @@ -147,6 +159,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { return isoName; } + public void setIsoName(String isoName) { + this.isoName = isoName; + } + public Long getOsTypeId() { return osTypeId; } @@ -155,22 +171,42 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { return url; } + public void setUrl(String url) { + this.url = url; + } + public Long getZoneId() { return zoneId; } + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + public Long getDomainId() { return domainId; } + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + public String getAccountName() { return accountName; } + public void setAccountName(String accountName) { + this.accountName = accountName; + } + public String getChecksum() { return checksum; } + public void setChecksum(String checksum) { + this.checksum = checksum; + } + public String getImageStoreUuid() { return imageStoreUuid; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java index 352f559125f..26b3fd53d54 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java @@ -92,6 +92,14 @@ public class CapabilitiesResponse extends BaseResponse { @Param(description = "true if users can see all accounts within the same domain, false otherwise") private boolean allowUserViewAllDomainAccounts; + @SerializedName("kubernetesserviceenabled") + @Param(description = "true if Kubernetes Service plugin is enabled, false otherwise") + private boolean kubernetesServiceEnabled; + + @SerializedName("kubernetesclusterexperimentalfeaturesenabled") + @Param(description = "true if experimental features for Kubernetes cluster such as Docker private registry are enabled, false otherwise") + private boolean kubernetesClusterExperimentalFeaturesEnabled; + public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) { this.securityGroupsEnabled = securityGroupsEnabled; } @@ -159,4 +167,12 @@ public class CapabilitiesResponse extends BaseResponse { public void setAllowUserViewAllDomainAccounts(boolean allowUserViewAllDomainAccounts) { this.allowUserViewAllDomainAccounts = allowUserViewAllDomainAccounts; } + + public void setKubernetesServiceEnabled(boolean kubernetesServiceEnabled) { + this.kubernetesServiceEnabled = kubernetesServiceEnabled; + } + + public void setKubernetesClusterExperimentalFeaturesEnabled(boolean kubernetesClusterExperimentalFeaturesEnabled) { + this.kubernetesClusterExperimentalFeaturesEnabled = kubernetesClusterExperimentalFeaturesEnabled; + } } diff --git a/client/pom.xml b/client/pom.xml index 29ecdece278..bd58b05cdd2 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -483,6 +483,11 @@ cloud-plugin-backup-dummy ${project.version} + + org.apache.cloudstack + cloud-plugin-integrations-kubernetes-service + ${project.version} + diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java index 229248ac3f2..d4c5bf61487 100644 --- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java @@ -175,6 +175,9 @@ public interface IpAddressManager { PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, boolean isSystem) throws InsufficientAddressCapacityException; + PublicIp getAvailablePublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, boolean isSystem) + throws InsufficientAddressCapacityException; + @DB void allocateNicValues(NicProfile nic, DataCenter dc, VirtualMachineProfile vm, Network network, String requestedIpv4, String requestedIpv6) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql b/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql index db7482b5eed..2c9c7ab42d4 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41300to41400.sql @@ -304,3 +304,74 @@ CREATE TABLE `cloud`.`router_health_check` ( UNIQUE `i_router_health_checks__router_id__check_name__check_type`(`router_id`, `check_name`, `check_type`), INDEX `i_router_health_checks__router_id`(`router_id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + +-- Kubernetes service +CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_supported_version` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40) DEFAULT NULL, + `name` varchar(255) NOT NULL COMMENT 'the name of this Kubernetes version', + `semantic_version` varchar(32) NOT NULL COMMENT 'the semantic version for this Kubernetes version', + `iso_id` bigint unsigned NOT NULL COMMENT 'the ID of the binaries ISO for this Kubernetes version', + `zone_id` bigint unsigned DEFAULT NULL COMMENT 'the ID of the zone for which this Kubernetes version is made available', + `state` char(32) DEFAULT NULL COMMENT 'the enabled or disabled state for this Kubernetes version', + `min_cpu` int(10) unsigned NOT NULL COMMENT 'the minimum CPU needed by cluster nodes for using this Kubernetes version', + `min_ram_size` bigint(20) unsigned NOT NULL COMMENT 'the minimum RAM in MB needed by cluster nodes for this Kubernetes version', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed or null, if still present', + + PRIMARY KEY(`id`), + CONSTRAINT `fk_kubernetes_supported_version__iso_id` FOREIGN KEY `fk_kubernetes_supported_version__iso_id`(`iso_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_kubernetes_supported_version__zone_id` FOREIGN KEY `fk_kubernetes_supported_version__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40) DEFAULT NULL, + `name` varchar(255) NOT NULL, + `description` varchar(4096) COMMENT 'display text for this Kubernetes cluster', + `zone_id` bigint unsigned NOT NULL COMMENT 'the ID of the zone in which this Kubernetes cluster is deployed', + `kubernetes_version_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes version of this Kubernetes cluster', + `service_offering_id` bigint unsigned COMMENT 'service offering id for the cluster VM', + `template_id` bigint unsigned COMMENT 'the ID of the template used by this Kubernetes cluster', + `network_id` bigint unsigned COMMENT 'the ID of the network used by this Kubernetes cluster', + `master_node_count` bigint NOT NULL default '0' COMMENT 'the number of the master nodes deployed for this Kubernetes cluster', + `node_count` bigint NOT NULL default '0' COMMENT 'the number of the worker nodes deployed for this Kubernetes cluster', + `account_id` bigint unsigned NOT NULL COMMENT 'the ID of owner account of this Kubernetes cluster', + `domain_id` bigint unsigned NOT NULL COMMENT 'the ID of the domain of this cluster', + `state` char(32) NOT NULL COMMENT 'the current state of this Kubernetes cluster', + `key_pair` varchar(40), + `cores` bigint unsigned NOT NULL COMMENT 'total number of CPU cores used by this Kubernetes cluster', + `memory` bigint unsigned NOT NULL COMMENT 'total memory used by this Kubernetes cluster', + `node_root_disk_size` bigint(20) unsigned DEFAULT 0 COMMENT 'root disk size of root disk for each node', + `endpoint` varchar(255) COMMENT 'url endpoint of the Kubernetes cluster manager api access', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed or null, if still present', + `gc` tinyint unsigned NOT NULL DEFAULT 1 COMMENT 'gc this Kubernetes cluster or not', + + PRIMARY KEY(`id`), + CONSTRAINT `fk_cluster__zone_id` FOREIGN KEY `fk_cluster__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_cluster__kubernetes_version_id` FOREIGN KEY `fk_cluster__kubernetes_version_id`(`kubernetes_version_id`) REFERENCES `kubernetes_supported_version` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_cluster__service_offering_id` FOREIGN KEY `fk_cluster__service_offering_id`(`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_cluster__template_id` FOREIGN KEY `fk_cluster__template_id`(`template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_cluster__network_id` FOREIGN KEY `fk_cluster__network_id`(`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_vm_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster', + `vm_id` bigint unsigned NOT NULL COMMENT 'the ID of the VM', + + PRIMARY KEY(`id`), + CONSTRAINT `fk_kubernetes_cluster_vm_map__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_vm_map__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster', + `name` varchar(255) NOT NULL, + `value` varchar(10240) NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user else false', + + PRIMARY KEY(`id`), + CONSTRAINT `fk_kubernetes_cluster_details__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_details__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/plugins/integrations/kubernetes-service/pom.xml b/plugins/integrations/kubernetes-service/pom.xml new file mode 100644 index 00000000000..9fb2a4391a9 --- /dev/null +++ b/plugins/integrations/kubernetes-service/pom.xml @@ -0,0 +1,135 @@ + + + + 4.0.0 + cloud-plugin-integrations-kubernetes-service + Apache CloudStack Plugin - Kubernetes Service + + org.apache.cloudstack + cloudstack-plugins + 4.14.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-core + ${project.version} + + + org.apache.cloudstack + cloud-framework-db + ${project.version} + + + org.apache.cloudstack + cloud-framework-ca + ${project.version} + + + org.apache.cloudstack + cloud-framework-security + ${project.version} + + + org.apache.cloudstack + cloud-engine-schema + ${project.version} + + + org.apache.cloudstack + cloud-engine-api + ${project.version} + + + org.apache.cloudstack + cloud-engine-components-api + ${project.version} + + + org.apache.cloudstack + cloud-framework-managed-context + ${project.version} + + + org.eclipse.persistence + javax.persistence + ${cs.jpa.version} + + + com.google.code.gson + gson + ${cs.gson.version} + + + com.google.guava + guava + ${cs.guava.version} + + + log4j + log4j + ${cs.log4j.version} + + + org.springframework + spring-context + ${org.springframework.version} + + + org.springframework + spring-aop + ${org.springframework.version} + + + org.springframework + spring-beans + ${org.springframework.version} + + + org.springframework + spring-test + ${org.springframework.version} + + + commons-codec + commons-codec + ${cs.codec.version} + + + org.hamcrest + hamcrest-library + ${cs.hamcrest.version} + test + + + org.bouncycastle + bcprov-jdk15on + ${cs.bcprov.version} + + + joda-time + joda-time + ${cs.joda-time.version} + + + diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java new file mode 100644 index 00000000000..aef304a03d3 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import java.util.Date; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Displayable; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import com.cloud.utils.fsm.StateMachine2; + +/** + * KubernetesCluster describes the properties of a Kubernetes cluster + * StateMachine maintains its states. + * + */ +public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm.StateObject, Identity, InternalIdentity, Displayable { + + enum Event { + StartRequested, + StopRequested, + DestroyRequested, + RecoveryRequested, + ScaleUpRequested, + ScaleDownRequested, + UpgradeRequested, + OperationSucceeded, + OperationFailed, + CreateFailed, + FaultsDetected; + } + + enum State { + Created("Initial State of Kubernetes cluster. At this state its just a logical/DB entry with no resources consumed"), + Starting("Resources needed for Kubernetes cluster are being provisioned"), + Running("Necessary resources are provisioned and Kubernetes cluster is in operational ready state to launch Kubernetes"), + Stopping("Resources for the Kubernetes cluster are being destroyed"), + Stopped("All resources for the Kubernetes cluster are destroyed, Kubernetes cluster may still have ephemeral resource like persistent volumes provisioned"), + Scaling("Transient state in which resources are either getting scaled up/down"), + Upgrading("Transient state in which cluster is getting upgraded"), + Alert("State to represent Kubernetes clusters which are not in expected desired state (operationally in active control place, stopped cluster VM's etc)."), + Recovering("State in which Kubernetes cluster is recovering from alert state"), + Destroyed("End state of Kubernetes cluster in which all resources are destroyed, cluster will not be usable further"), + Destroying("State in which resources for the Kubernetes cluster is getting cleaned up or yet to be cleaned up by garbage collector"), + Error("State of the failed to create Kubernetes clusters"); + + protected static final StateMachine2 s_fsm = new StateMachine2(); + + public static StateMachine2 getStateMachine() { return s_fsm; } + + static { + s_fsm.addTransition(State.Created, Event.StartRequested, State.Starting); + + s_fsm.addTransition(State.Starting, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.Starting, Event.OperationFailed, State.Alert); + s_fsm.addTransition(State.Starting, Event.CreateFailed, State.Error); + s_fsm.addTransition(State.Starting, Event.StopRequested, State.Stopping); + + s_fsm.addTransition(State.Running, Event.StopRequested, State.Stopping); + s_fsm.addTransition(State.Alert, Event.StopRequested, State.Stopping); + s_fsm.addTransition(State.Stopping, Event.OperationSucceeded, State.Stopped); + s_fsm.addTransition(State.Stopping, Event.OperationFailed, State.Alert); + + s_fsm.addTransition(State.Stopped, Event.StartRequested, State.Starting); + + s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert); + + s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling); + s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling); + s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.Scaling, Event.OperationFailed, State.Alert); + + s_fsm.addTransition(State.Running, Event.UpgradeRequested, State.Upgrading); + s_fsm.addTransition(State.Upgrading, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.Upgrading, Event.OperationFailed, State.Alert); + + s_fsm.addTransition(State.Alert, Event.RecoveryRequested, State.Recovering); + s_fsm.addTransition(State.Recovering, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.Recovering, Event.OperationFailed, State.Alert); + + s_fsm.addTransition(State.Running, Event.DestroyRequested, State.Destroying); + s_fsm.addTransition(State.Stopped, Event.DestroyRequested, State.Destroying); + s_fsm.addTransition(State.Alert, Event.DestroyRequested, State.Destroying); + s_fsm.addTransition(State.Error, Event.DestroyRequested, State.Destroying); + + s_fsm.addTransition(State.Destroying, Event.OperationSucceeded, State.Destroyed); + + } + String _description; + + State(String description) { + _description = description; + } + } + + long getId(); + String getName(); + String getDescription(); + long getZoneId(); + long getKubernetesVersionId(); + long getServiceOfferingId(); + long getTemplateId(); + long getNetworkId(); + long getDomainId(); + long getAccountId(); + long getMasterNodeCount(); + long getNodeCount(); + long getTotalNodeCount(); + String getKeyPair(); + long getCores(); + long getMemory(); + long getNodeRootDiskSize(); + String getEndpoint(); + boolean isCheckForGc(); + @Override + State getState(); + Date getCreated(); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java new file mode 100644 index 00000000000..30b28646376 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterDetailsVO.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + + +import javax.persistence.Column; + +import javax.persistence.Entity; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name = "kubernetes_cluster_details") +public class KubernetesClusterDetailsVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "cluster_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value", length = 10240) + private String value; + + @Column(name = "display") + private boolean display; + + public KubernetesClusterDetailsVO() { + } + + public KubernetesClusterDetailsVO(long id, String name, String value, boolean display) { + this.resourceId = id; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java new file mode 100755 index 00000000000..a947e4273be --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +public class KubernetesClusterEventTypes { + public static final String EVENT_KUBERNETES_CLUSTER_CREATE = "KUBERNETES.CLUSTER.CREATE"; + public static final String EVENT_KUBERNETES_CLUSTER_DELETE = "KUBERNETES.CLUSTER.DELETE"; + public static final String EVENT_KUBERNETES_CLUSTER_START = "KUBERNETES.CLUSTER.START"; + public static final String EVENT_KUBERNETES_CLUSTER_STOP = "KUBERNETES.CLUSTER.STOP"; + public static final String EVENT_KUBERNETES_CLUSTER_SCALE = "KUBERNETES.CLUSTER.SCALE"; + public static final String EVENT_KUBERNETES_CLUSTER_UPGRADE = "KUBERNETES.CLUSTER.UPGRADE"; +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java new file mode 100644 index 00000000000..358fa034a8b --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -0,0 +1,1500 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import java.math.BigInteger; +import java.net.MalformedURLException; +import java.net.URL; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.StopKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd; +import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +import com.cloud.api.ApiDBUtils; +import com.cloud.api.query.dao.NetworkOfferingJoinDao; +import com.cloud.api.query.dao.TemplateJoinDao; +import com.cloud.api.query.vo.NetworkOfferingJoinVO; +import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.domain.Domain; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.host.Host.Type; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterActionWorker; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterDestroyWorker; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterScaleWorker; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterStartWorker; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterStopWorker; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterUpgradeWorker; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesSupportedVersionVO; +import com.cloud.kubernetes.version.KubernetesVersionManagerImpl; +import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.Network.Service; +import com.cloud.network.NetworkModel; +import com.cloud.network.NetworkService; +import com.cloud.network.Networks; +import com.cloud.network.PhysicalNetwork; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.offering.NetworkOffering; +import com.cloud.offering.ServiceOffering; +import com.cloud.offerings.NetworkOfferingServiceMapVO; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; +import com.cloud.org.Cluster; +import com.cloud.org.Grouping; +import com.cloud.projects.Project; +import com.cloud.resource.ResourceManager; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; +import com.cloud.user.SSHKeyPairVO; +import com.cloud.user.dao.SSHKeyPairDao; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; +import com.google.common.base.Strings; + +public class KubernetesClusterManagerImpl extends ManagerBase implements KubernetesClusterService { + + private static final Logger LOGGER = Logger.getLogger(KubernetesClusterManagerImpl.class); + private static final String DEFAULT_NETWORK_OFFERING_FOR_KUBERNETES_SERVICE_NAME = "DefaultNetworkOfferingforKubernetesService"; + + protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); + + ScheduledExecutorService _gcExecutor; + ScheduledExecutorService _stateScanner; + + @Inject + public KubernetesClusterDao kubernetesClusterDao; + @Inject + public KubernetesClusterVmMapDao kubernetesClusterVmMapDao; + @Inject + public KubernetesClusterDetailsDao kubernetesClusterDetailsDao; + @Inject + public KubernetesSupportedVersionDao kubernetesSupportedVersionDao; + @Inject + protected SSHKeyPairDao sshKeyPairDao; + @Inject + protected DataCenterDao dataCenterDao; + @Inject + protected ClusterDao clusterDao; + @Inject + protected ClusterDetailsDao clusterDetailsDao; + @Inject + protected ServiceOfferingDao serviceOfferingDao; + @Inject + protected VMTemplateDao templateDao; + @Inject + protected VMTemplateZoneDao templateZoneDao; + @Inject + protected TemplateJoinDao templateJoinDao; + @Inject + protected AccountService accountService; + @Inject + protected AccountManager accountManager; + @Inject + protected VMInstanceDao vmInstanceDao; + @Inject + protected UserVmDao userVmDao; + @Inject + protected NetworkOfferingDao networkOfferingDao; + @Inject + protected NetworkOfferingJoinDao networkOfferingJoinDao; + @Inject + protected NetworkOfferingServiceMapDao networkOfferingServiceMapDao; + @Inject + protected NetworkService networkService; + @Inject + protected NetworkModel networkModel; + @Inject + protected PhysicalNetworkDao physicalNetworkDao; + @Inject + protected NetworkOrchestrationService networkMgr; + @Inject + protected NetworkDao networkDao; + @Inject + protected CapacityManager capacityManager; + @Inject + protected ResourceManager resourceManager; + @Inject + protected FirewallRulesDao firewallRulesDao; + + private void logMessage(final Level logLevel, final String message, final Exception e) { + if (logLevel == Level.WARN) { + if (e != null) { + LOGGER.warn(message, e); + } else { + LOGGER.warn(message); + } + } else { + if (e != null) { + LOGGER.error(message, e); + } else { + LOGGER.error(message); + } + } + } + + private void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { + logMessage(logLevel, message, e); + if (kubernetesClusterId != null && event != null) { + stateTransitTo(kubernetesClusterId, event); + } + if (e == null) { + throw new CloudRuntimeException(message); + } + throw new CloudRuntimeException(message, e); + } + + private void logAndThrow(final Level logLevel, final String message) throws CloudRuntimeException { + logTransitStateAndThrow(logLevel, message, null, null, null); + } + + private void logAndThrow(final Level logLevel, final String message, final Exception ex) throws CloudRuntimeException { + logTransitStateAndThrow(logLevel, message, null, null, ex); + } + + private boolean isKubernetesServiceTemplateConfigured(DataCenter zone) { + // Check Kubernetes VM template for zone + boolean isHyperVAvailable = false; + boolean isKVMAvailable = false; + boolean isVMwareAvailable = false; + boolean isXenserverAvailable = false; + List clusters = clusterDao.listByZoneId(zone.getId()); + for (ClusterVO clusterVO : clusters) { + if (Hypervisor.HypervisorType.Hyperv.equals(clusterVO.getHypervisorType())) { + isHyperVAvailable = true; + } + if (Hypervisor.HypervisorType.KVM.equals(clusterVO.getHypervisorType())) { + isKVMAvailable = true; + } + if (Hypervisor.HypervisorType.VMware.equals(clusterVO.getHypervisorType())) { + isVMwareAvailable = true; + } + if (Hypervisor.HypervisorType.XenServer.equals(clusterVO.getHypervisorType())) { + isXenserverAvailable = true; + } + } + List> templatePairs = new ArrayList<>(); + if (isHyperVAvailable) { + templatePairs.add(new Pair<>(KubernetesClusterHyperVTemplateName.key(), KubernetesClusterHyperVTemplateName.value())); + } + if (isKVMAvailable) { + templatePairs.add(new Pair<>(KubernetesClusterKVMTemplateName.key(), KubernetesClusterKVMTemplateName.value())); + } + if (isVMwareAvailable) { + templatePairs.add(new Pair<>(KubernetesClusterVMwareTemplateName.key(), KubernetesClusterVMwareTemplateName.value())); + } + if (isXenserverAvailable) { + templatePairs.add(new Pair<>(KubernetesClusterXenserverTemplateName.key(), KubernetesClusterXenserverTemplateName.value())); + } + for (Pair templatePair : templatePairs) { + String templateKey = templatePair.first(); + String templateName = templatePair.second(); + if (Strings.isNullOrEmpty(templateName)) { + LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey)); + return false; + } + final VMTemplateVO template = templateDao.findByTemplateName(templateName); + if (template == null) { + LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName)); + return false; + } + List listZoneTemplate = templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); + if (listZoneTemplate == null || listZoneTemplate.isEmpty()) { + LOGGER.warn(String.format("The template ID: %s, name: %s is not available for use in zone ID: %s provisioning Kubernetes cluster nodes", template.getUuid(), templateName, zone.getUuid())); + return false; + } + } + return true; + } + + private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { + // Check network offering + String networkOfferingName = KubernetesClusterNetworkOffering.value(); + if (networkOfferingName == null || networkOfferingName.isEmpty()) { + LOGGER.warn(String.format("Global setting %s is empty. Admin has not yet specified the network offering to be used for provisioning isolated network for the cluster", KubernetesClusterNetworkOffering.key())); + return false; + } + NetworkOfferingVO networkOffering = networkOfferingDao.findByUniqueName(networkOfferingName); + if (networkOffering == null) { + LOGGER.warn(String.format("Unable to find the network offering %s to be used for provisioning Kubernetes cluster", networkOfferingName)); + return false; + } + if (networkOffering.getState() == NetworkOffering.State.Disabled) { + LOGGER.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid())); + return false; + } + List services = networkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId()); + if (services == null || services.isEmpty() || !services.contains("SourceNat")) { + LOGGER.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid())); + return false; + } + if (!networkOffering.isEgressDefaultPolicy()) { + LOGGER.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid())); + return false; + } + boolean offeringAvailableForZone = false; + List networkOfferingJoinVOs = networkOfferingJoinDao.findByZoneId(zone.getId(), true); + for (NetworkOfferingJoinVO networkOfferingJoinVO : networkOfferingJoinVOs) { + if (networkOffering.getId() == networkOfferingJoinVO.getId()) { + offeringAvailableForZone = true; + break; + } + } + if (!offeringAvailableForZone) { + LOGGER.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid())); + return false; + } + long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); + PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); + if (physicalNetwork == null) { + LOGGER.warn(String.format("Unable to find physical network with tag: %s", networkOffering.getTags())); + return false; + } + return true; + } + + private boolean isKubernetesServiceConfigured(DataCenter zone) { + if (!isKubernetesServiceTemplateConfigured(zone)) { + return false; + } + if (!isKubernetesServiceNetworkOfferingConfigured(zone)) { + return false; + } + return true; + } + + private IpAddress getSourceNatIp(Network network) { + List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); + if (CollectionUtils.isEmpty(addresses)) { + return null; + } + for (IpAddress address : addresses) { + if (address.isSourceNat()) { + return address; + } + } + return null; + } + + private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) { + String tempalteName = null; + switch (hypervisorType) { + case Hyperv: + tempalteName = KubernetesClusterHyperVTemplateName.value(); + break; + case KVM: + tempalteName = KubernetesClusterKVMTemplateName.value(); + break; + case VMware: + tempalteName = KubernetesClusterVMwareTemplateName.value(); + break; + case XenServer: + tempalteName = KubernetesClusterXenserverTemplateName.value(); + break; + } + return templateDao.findByTemplateName(tempalteName); + } + + private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) { + if (Network.GuestType.Isolated.equals(network.getGuestType())) { + if (Network.State.Allocated.equals(network.getState())) { // Allocated networks won't have IP and rules + return true; + } + IpAddress sourceNatIp = getSourceNatIp(network); + if (sourceNatIp == null) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not have a source NAT IP associated with it. To provision a Kubernetes Cluster, source NAT IP is required", network.getUuid())); + } + List rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.Firewall); + for (FirewallRuleVO rule : rules) { + Integer startPort = rule.getSourcePortStart(); + Integer endPort = rule.getSourcePortEnd(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Network rule : " + startPort + " " + endPort); + } + if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for API access", network.getUuid())); + } + if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for node VM SSH access", network.getUuid())); + } + } + rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.PortForwarding); + for (FirewallRuleVO rule : rules) { + Integer startPort = rule.getSourcePortStart(); + Integer endPort = rule.getSourcePortEnd(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Network rule : " + startPort + " " + endPort); + } + if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for API access", network.getUuid())); + } + if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for node VM SSH access", network.getUuid())); + } + } + } + return true; + } + + private boolean validateNetwork(Network network, int clusterTotalNodeCount) { + NetworkOffering networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId()); + if (networkOffering.isSystemOnly()) { + throw new InvalidParameterValueException(String.format("Network ID: %s is for system use only", network.getUuid())); + } + if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.UserData)) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not support userdata that is required for Kubernetes cluster", network.getUuid())); + } + if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not support firewall that is required for Kubernetes cluster", network.getUuid())); + } + if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.PortForwarding)) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not support port forwarding that is required for Kubernetes cluster", network.getUuid())); + } + if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not support DHCP that is required for Kubernetes cluster", network.getUuid())); + } + validateIsolatedNetwork(network, clusterTotalNodeCount); + return true; + } + + private boolean validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) { + if (serviceOffering.isDynamic()) { + throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid())); + } + if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); + } + if (serviceOffering.getCpu() < version.getMinimumCpu()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes version ID: %s needs minimum %d vCPUs", serviceOffering.getUuid(), version.getUuid(), version.getMinimumCpu())); + } + if (serviceOffering.getRamSize() < version.getMinimumRamSize()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM", serviceOffering.getUuid(), version.getUuid(), version.getMinimumRamSize())); + } + return true; + } + + private void validateDockerRegistryParams(final String dockerRegistryUserName, + final String dockerRegistryPassword, + final String dockerRegistryUrl, + final String dockerRegistryEmail) { + // if no params related to docker registry specified then nothing to validate so return true + if ((dockerRegistryUserName == null || dockerRegistryUserName.isEmpty()) && + (dockerRegistryPassword == null || dockerRegistryPassword.isEmpty()) && + (dockerRegistryUrl == null || dockerRegistryUrl.isEmpty()) && + (dockerRegistryEmail == null || dockerRegistryEmail.isEmpty())) { + return; + } + + // all params related to docker registry must be specified or nothing + if (!((dockerRegistryUserName != null && !dockerRegistryUserName.isEmpty()) && + (dockerRegistryPassword != null && !dockerRegistryPassword.isEmpty()) && + (dockerRegistryUrl != null && !dockerRegistryUrl.isEmpty()) && + (dockerRegistryEmail != null && !dockerRegistryEmail.isEmpty()))) { + throw new InvalidParameterValueException("All the docker private registry parameters (username, password, url, email) required are specified"); + } + + try { + URL url = new URL(dockerRegistryUrl); + } catch (MalformedURLException e) { + throw new InvalidParameterValueException("Invalid docker registry url specified"); + } + + Pattern VALID_EMAIL_ADDRESS_REGEX = Pattern.compile("^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}$", Pattern.CASE_INSENSITIVE); + Matcher matcher = VALID_EMAIL_ADDRESS_REGEX.matcher(dockerRegistryEmail); + if (!matcher.find()) { + throw new InvalidParameterValueException("Invalid docker registry email specified"); + } + } + + private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException { + final int cpu_requested = offering.getCpu() * offering.getSpeed(); + final long ram_requested = offering.getRamSize() * 1024L * 1024L; + List hosts = resourceManager.listAllHostsInOneZoneByType(Type.Routing, zone.getId()); + final Map> hosts_with_resevered_capacity = new ConcurrentHashMap>(); + for (HostVO h : hosts) { + hosts_with_resevered_capacity.put(h.getUuid(), new Pair(h, 0)); + } + boolean suitable_host_found = false; + Cluster planCluster = null; + for (int i = 1; i <= nodesCount + 1; i++) { + suitable_host_found = false; + for (Map.Entry> hostEntry : hosts_with_resevered_capacity.entrySet()) { + Pair hp = hostEntry.getValue(); + HostVO h = hp.first(); + int reserved = hp.second(); + reserved++; + ClusterVO cluster = clusterDao.findById(h.getClusterId()); + ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved)); + } + if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved)); + } + hostEntry.setValue(new Pair(h, reserved)); + suitable_host_found = true; + planCluster = cluster; + break; + } + } + if (!suitable_host_found) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d", zone.getUuid(), i)); + } + break; + } + } + if (suitable_host_found) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); + } + return new DeployDestination(zone, null, planCluster, null); + } + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%1$s memory=%2$s)", + cpu_requested * nodesCount, ram_requested * nodesCount); + LOGGER.warn(msg); + throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); + } + + @Override + public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + KubernetesClusterResponse response = new KubernetesClusterResponse(); + response.setObjectName(KubernetesCluster.class.getSimpleName().toLowerCase()); + response.setId(kubernetesCluster.getUuid()); + response.setName(kubernetesCluster.getName()); + response.setDescription(kubernetesCluster.getDescription()); + DataCenterVO zone = ApiDBUtils.findZoneById(kubernetesCluster.getZoneId()); + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + response.setMasterNodes(kubernetesCluster.getMasterNodeCount()); + response.setClusterSize(kubernetesCluster.getNodeCount()); + VMTemplateVO template = ApiDBUtils.findTemplateById(kubernetesCluster.getTemplateId()); + response.setTemplateId(template.getUuid()); + ServiceOfferingVO offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + response.setServiceOfferingId(offering.getUuid()); + response.setServiceOfferingName(offering.getName()); + KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + if (version != null) { + response.setKubernetesVersionId(version.getUuid()); + response.setKubernetesVersionName(version.getName()); + } + Account account = ApiDBUtils.findAccountById(kubernetesCluster.getAccountId()); + if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) { + Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); + response.setProjectId(project.getUuid()); + response.setProjectName(project.getName()); + } else { + response.setAccountName(account.getAccountName()); + } + Domain domain = ApiDBUtils.findDomainById(kubernetesCluster.getDomainId()); + response.setDomainId(domain.getUuid()); + response.setDomainName(domain.getName()); + response.setKeypair(kubernetesCluster.getKeyPair()); + response.setState(kubernetesCluster.getState().toString()); + response.setCores(String.valueOf(kubernetesCluster.getCores())); + response.setMemory(String.valueOf(kubernetesCluster.getMemory())); + NetworkVO ntwk = networkDao.findByIdIncludingRemoved(kubernetesCluster.getNetworkId()); + response.setEndpoint(kubernetesCluster.getEndpoint()); + response.setNetworkId(ntwk.getUuid()); + response.setAssociatedNetworkName(ntwk.getName()); + List vmIds = new ArrayList(); + List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + if (vmList != null && !vmList.isEmpty()) { + for (KubernetesClusterVmMapVO vmMapVO : vmList) { + UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); + if (userVM != null) { + vmIds.add(userVM.getUuid()); + } + } + } + response.setVirtualMachineIds(vmIds); + return response; + } + + private void validateKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { + final String name = cmd.getName(); + final Long zoneId = cmd.getZoneId(); + final Long kubernetesVersionId = cmd.getKubernetesVersionId(); + final Long serviceOfferingId = cmd.getServiceOfferingId(); + final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); + final Long networkId = cmd.getNetworkId(); + final String sshKeyPair = cmd.getSSHKeyPairName(); + final Long masterNodeCount = cmd.getMasterNodes(); + final Long clusterSize = cmd.getClusterSize(); + final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); + final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); + final String dockerRegistryUrl = cmd.getDockerRegistryUrl(); + final String dockerRegistryEmail = cmd.getDockerRegistryEmail(); + final Long nodeRootDiskSize = cmd.getNodeRootDiskSize(); + final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress(); + + if (name == null || name.isEmpty()) { + throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name); + } + + if (masterNodeCount < 1 || masterNodeCount > 100) { + throw new InvalidParameterValueException("Invalid cluster master nodes count: " + masterNodeCount); + } + + if (clusterSize < 1 || clusterSize > 100) { + throw new InvalidParameterValueException("Invalid cluster size: " + clusterSize); + } + + DataCenter zone = dataCenterDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId); + } + + if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { + throw new PermissionDeniedException(String.format("Cannot perform this operation, zone ID: %s is currently disabled", zone.getUuid())); + } + + if (!isKubernetesServiceConfigured(zone)) { + throw new CloudRuntimeException("Kubernetes service has not been configured properly to provision Kubernetes clusters"); + } + + final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(kubernetesVersionId); + if (clusterKubernetesVersion == null) { + throw new InvalidParameterValueException("Unable to find given Kubernetes version in supported versions"); + } + if (!KubernetesSupportedVersion.State.Enabled.equals(clusterKubernetesVersion.getState())) { + throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is in %s state", clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getState())); + } + if (clusterKubernetesVersion.getZoneId() != null && !clusterKubernetesVersion.getZoneId().equals(zone.getId())) { + throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid())); + } + if (masterNodeCount > 1 ) { + try { + if (KubernetesVersionManagerImpl.compareSemanticVersions(clusterKubernetesVersion.getSemanticVersion(), MIN_KUBERNETES_VERSION_HA_SUPPORT) < 0) { + throw new InvalidParameterValueException(String.format("HA support is available only for Kubernetes version %s and above. Given version ID: %s is %s", MIN_KUBERNETES_VERSION_HA_SUPPORT, clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getSemanticVersion())); + } + } catch (IllegalArgumentException e) { + logAndThrow(Level.WARN, String.format("Unable to compare Kubernetes version for given version ID: %s with %s", clusterKubernetesVersion.getUuid(), MIN_KUBERNETES_VERSION_HA_SUPPORT), e); + } + } + + if (clusterKubernetesVersion.getZoneId() != null && clusterKubernetesVersion.getZoneId() != zone.getId()) { + throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid())); + } + + TemplateJoinVO iso = templateJoinDao.findById(clusterKubernetesVersion.getIsoId()); + if (iso == null) { + throw new InvalidParameterValueException(String.format("Invalid ISO associated with version ID: %s", clusterKubernetesVersion.getUuid())); + } + if (!ObjectInDataStoreStateMachine.State.Ready.equals(iso.getState())) { + throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state", clusterKubernetesVersion.getUuid())); + } + + ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("No service offering with ID: " + serviceOfferingId); + } + + if (sshKeyPair != null && !sshKeyPair.isEmpty()) { + SSHKeyPairVO sshKeyPairVO = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshKeyPairVO == null) { + throw new InvalidParameterValueException(String.format("Given SSH key pair with name: %s was not found for the account %s", sshKeyPair, owner.getAccountName())); + } + } + + if (nodeRootDiskSize != null && nodeRootDiskSize <= 0) { + throw new InvalidParameterValueException(String.format("Invalid value for %s", ApiConstants.NODE_ROOT_DISK_SIZE)); + } + + if (!validateServiceOffering(serviceOffering, clusterKubernetesVersion)) { + throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster"); + } + + validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl, dockerRegistryEmail); + + Network network = null; + if (networkId != null) { + network = networkService.getNetwork(networkId); + if (network == null) { + throw new InvalidParameterValueException("Unable to find network with given ID"); + } + } + + if (!Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { + if (!NetUtils.isValidIp4(externalLoadBalancerIpAddress) && !NetUtils.isValidIp6(externalLoadBalancerIpAddress)) { + throw new InvalidParameterValueException("Invalid external load balancer IP address"); + } + if (network == null) { + throw new InvalidParameterValueException(String.format("%s parameter must be specified along with %s parameter", ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, ApiConstants.NETWORK_ID)); + } + if (Network.GuestType.Shared.equals(network.getGuestType())) { + throw new InvalidParameterValueException(String.format("%s parameter must be specified along with %s type of network", ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, Network.GuestType.Shared.toString())); + } + } + + if (!KubernetesClusterExperimentalFeaturesEnabled.value() && (!Strings.isNullOrEmpty(dockerRegistryUrl) || + !Strings.isNullOrEmpty(dockerRegistryUserName) || !Strings.isNullOrEmpty(dockerRegistryEmail) || !Strings.isNullOrEmpty(dockerRegistryPassword))) { + throw new CloudRuntimeException(String.format("Private registry for the Kubernetes cluster is an experimental feature. Use %s configuration for enabling experimental features", KubernetesClusterExperimentalFeaturesEnabled.key())); + } + } + + private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int masterNodesCount, + final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException { + Network network = null; + if (networkId != null) { + network = networkDao.findById(networkId); + if (Network.GuestType.Isolated.equals(network.getGuestType())) { + if (kubernetesClusterDao.listByNetworkId(network.getId()).isEmpty()) { + if (!validateNetwork(network, masterNodesCount + nodesCount)) { + throw new InvalidParameterValueException(String.format("Network ID: %s is not suitable for Kubernetes cluster", network.getUuid())); + } + networkModel.checkNetworkPermissions(owner, network); + } else { + throw new InvalidParameterValueException(String.format("Network ID: %s is already under use by another Kubernetes cluster", network.getUuid())); + } + } else if (Network.GuestType.Shared.equals(network.getGuestType())) { + if (masterNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { + throw new InvalidParameterValueException(String.format("Multi-master, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used", + network.getGuestType().toString(), network.getUuid(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS)); + } + } + } else { // user has not specified network in which cluster VM's to be provisioned, so create a network for Kubernetes cluster + NetworkOfferingVO networkOffering = networkOfferingDao.findByUniqueName(KubernetesClusterNetworkOffering.value()); + + long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); + PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); + + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName)); + } + + try { + network = networkMgr.createGuestNetwork(networkOffering.getId(), clusterName + "-network", owner.getAccountName() + "-network", + null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, null); + } catch (ConcurrentOperationException | InsufficientCapacityException | ResourceAllocationException e) { + logAndThrow(Level.ERROR, String.format("Unable to create network for the Kubernetes cluster: %s", clusterName)); + } + } + return network; + } + + private void addKubernetesClusterDetails(final KubernetesCluster kubernetesCluster, final Network network, final CreateKubernetesClusterCmd cmd) { + final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress(); + final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); + final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); + final String dockerRegistryUrl = cmd.getDockerRegistryUrl(); + final String dockerRegistryEmail = cmd.getDockerRegistryEmail(); + final boolean networkCleanup = cmd.getNetworkId() == null; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List details = new ArrayList<>(); + if (Network.GuestType.Shared.equals(network.getGuestType()) && !Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, externalLoadBalancerIpAddress, true)); + } + if (!Strings.isNullOrEmpty(dockerRegistryUserName)) { + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_USER_NAME, dockerRegistryUserName, true)); + } + if (!Strings.isNullOrEmpty(dockerRegistryPassword)) { + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_PASSWORD, dockerRegistryPassword, false)); + } + if (!Strings.isNullOrEmpty(dockerRegistryUrl)) { + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_URL, dockerRegistryUrl, true)); + } + if (!Strings.isNullOrEmpty(dockerRegistryEmail)) { + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_EMAIL, dockerRegistryEmail, true)); + } + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.USERNAME, "admin", true)); + SecureRandom random = new SecureRandom(); + String randomPassword = new BigInteger(130, random).toString(32); + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.PASSWORD, randomPassword, false)); + details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), "networkCleanup", String.valueOf(networkCleanup), true)); + kubernetesClusterDetailsDao.saveDetails(details); + } + }); + } + + private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd cmd) { + final Long kubernetesClusterId = cmd.getId(); + final Long serviceOfferingId = cmd.getServiceOfferingId(); + final Long clusterSize = cmd.getClusterSize(); + if (kubernetesClusterId == null || kubernetesClusterId < 1L) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); + } + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); + } + final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + if (zone == null) { + logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + + Account caller = CallContext.current().getCallingAccount(); + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); + + if (serviceOfferingId == null && clusterSize == null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled, either a new service offering or a new cluster size must be passed", kubernetesCluster.getUuid())); + } + + final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + if (clusterVersion == null) { + throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + + ServiceOffering serviceOffering = null; + if (serviceOfferingId != null) { + serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId); + } else { + if (serviceOffering.isDynamic()) { + throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster ID: %s, service offering ID: %s", kubernetesCluster.getUuid(), serviceOffering.getUuid())); + } + if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", + kubernetesCluster.getUuid(), serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); + } + if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d vCPUs", + kubernetesCluster.getUuid(), serviceOffering.getUuid(), clusterVersion.getUuid(), clusterVersion.getMinimumCpu())); + } + if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM", + kubernetesCluster.getUuid(), serviceOffering.getUuid(), clusterVersion.getUuid(), clusterVersion.getMinimumRamSize())); + } + } + final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + if (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() || + serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed()) { + logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering ID: %s offers lesser resources as compared to service offering ID: %s of Kubernetes cluster ID: %s", + serviceOffering.getUuid(), existingServiceOffering.getUuid(), kubernetesCluster.getUuid())); + } + } + + if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) || + kubernetesCluster.getState().equals(KubernetesCluster.State.Running) || + kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) { + throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString())); + } + + if (clusterSize != null) { + if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { // Cannot scale stopped cluster currently for cluster size + throw new PermissionDeniedException(String.format("Kubernetes cluster ID: %s is in %s state", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString())); + } + if (clusterSize < 1) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be scaled for size, %d", kubernetesCluster.getUuid(), clusterSize)); + } + } + } + + private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { + // Validate parameters + final Long kubernetesClusterId = cmd.getId(); + final Long upgradeVersionId = cmd.getKubernetesVersionId(); + if (kubernetesClusterId == null || kubernetesClusterId < 1L) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); + } + if (upgradeVersionId == null || upgradeVersionId < 1L) { + throw new InvalidParameterValueException("Invalid Kubernetes version ID"); + } + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); + } + accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); + if (!KubernetesCluster.State.Running.equals(kubernetesCluster.getState())) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is not in running state", kubernetesCluster.getUuid())); + } + KubernetesSupportedVersionVO upgradeVersion = kubernetesSupportedVersionDao.findById(upgradeVersionId); + if (upgradeVersion == null || upgradeVersion.getRemoved() != null) { + throw new InvalidParameterValueException("Invalid Kubernetes version ID"); + } + if (!KubernetesSupportedVersion.State.Enabled.equals(upgradeVersion.getState())) { + throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s for upgrade is in %s state", upgradeVersion.getUuid(), upgradeVersion.getState())); + } + KubernetesSupportedVersionVO clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + if (clusterVersion == null || clusterVersion.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster ID: %s", + kubernetesCluster.getUuid())); + } + final ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(kubernetesCluster.getServiceOfferingId()); + if (serviceOffering == null) { + throw new CloudRuntimeException(String.format("Invalid service offering associated with Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + if (serviceOffering.getCpu() < upgradeVersion.getMinimumCpu()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be upgraded with Kubernetes version ID: %s which needs minimum %d vCPUs while associated service offering ID: %s offers only %d vCPUs", + kubernetesCluster.getUuid(), upgradeVersion.getUuid(), upgradeVersion.getMinimumCpu(), serviceOffering.getUuid(), serviceOffering.getCpu())); + } + if (serviceOffering.getRamSize() < upgradeVersion.getMinimumRamSize()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s cannot be upgraded with Kubernetes version ID: %s which needs minimum %d MB RAM while associated service offering ID: %s offers only %d MB RAM", + kubernetesCluster.getUuid(), upgradeVersion.getUuid(), upgradeVersion.getMinimumRamSize(), serviceOffering.getUuid(), serviceOffering.getRamSize())); + } + // Check upgradeVersion is either patch upgrade or immediate minor upgrade + try { + KubernetesVersionManagerImpl.canUpgradeKubernetesVersion(clusterVersion.getSemanticVersion(), upgradeVersion.getSemanticVersion()); + } catch (IllegalArgumentException e) { + throw new InvalidParameterValueException(e.getMessage()); + } + + TemplateJoinVO iso = templateJoinDao.findById(upgradeVersion.getIsoId()); + if (iso == null) { + throw new InvalidParameterValueException(String.format("Invalid ISO associated with version ID: %s", upgradeVersion.getUuid())); + } + if (!ObjectInDataStoreStateMachine.State.Ready.equals(iso.getState())) { + throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state", upgradeVersion.getUuid())); + } + } + + protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + try { + return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); + } catch (NoTransitionException nte) { + LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster ID: %s in state %s on event %s", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString(), e.toString()), nte); + return false; + } + } + + @Override + public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + + validateKubernetesClusterCreateParameters(cmd); + + final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); + final long masterNodeCount = cmd.getMasterNodes(); + final long clusterSize = cmd.getClusterSize(); + final long totalNodeCount = masterNodeCount + clusterSize; + final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId()); + final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); + final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()); + + DeployDestination deployDestination = null; + try { + deployDestination = plan(totalNodeCount, zone, serviceOffering); + } catch (InsufficientCapacityException e) { + logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d cluster nodes in zone ID: %s with service offering ID: %s", totalNodeCount, zone.getUuid(), serviceOffering.getUuid())); + } + if (deployDestination == null || deployDestination.getCluster() == null) { + logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone ID: %s", zone.getUuid())); + } + + final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)masterNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); + final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType()); + final long cores = serviceOffering.getCpu() * (masterNodeCount + clusterSize); + final long memory = serviceOffering.getRamSize() * (masterNodeCount + clusterSize); + + final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback() { + @Override + public KubernetesClusterVO doInTransaction(TransactionStatus status) { + KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(), + serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(), + owner.getAccountId(), masterNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), ""); + kubernetesClusterDao.persist(newCluster); + return newCluster; + } + }); + + addKubernetesClusterDetails(cluster, defaultNetwork, cmd); + + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); + } + return cluster; + } + + /** + * Start operation can be performed at two different life stages of Kubernetes cluster. First when a freshly created cluster + * in which case there are no resources provisioned for the Kubernetes cluster. So during start all the resources + * are provisioned from scratch. Second kind of start, happens on Stopped Kubernetes cluster, in which all resources + * are provisioned (like volumes, nics, networks etc). It just that VM's are not in running state. So just + * start the VM's (which can possibly implicitly start the network also). + * @param kubernetesClusterId + * @param onCreate + * @return + * @throws CloudRuntimeException + */ + + @Override + public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + final KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (kubernetesCluster == null) { + throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID"); + } + if (kubernetesCluster.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is already deleted", kubernetesCluster.getUuid())); + } + accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); + if (kubernetesCluster.getState().equals(KubernetesCluster.State.Running)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Kubernetes cluster ID: %s is in running state", kubernetesCluster.getUuid())); + } + return true; + } + if (kubernetesCluster.getState().equals(KubernetesCluster.State.Starting)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Kubernetes cluster ID: %s is already in starting state", kubernetesCluster.getUuid())); + } + return true; + } + final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + if (zone == null) { + logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + KubernetesClusterStartWorker startWorker = + new KubernetesClusterStartWorker(kubernetesCluster, this); + startWorker = ComponentContext.inject(startWorker); + if (onCreate) { + // Start for Kubernetes cluster in 'Created' state + return startWorker.startKubernetesClusterOnCreate(); + } else { + // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started + return startWorker.startStoppedKubernetesCluster(); + } + } + + @Override + public boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + final KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (kubernetesCluster == null) { + throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID"); + } + if (kubernetesCluster.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster ID: %s is already deleted", kubernetesCluster.getUuid())); + } + accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); + if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Kubernetes cluster ID: %s is already stopped", kubernetesCluster.getUuid())); + } + return true; + } + if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopping)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Kubernetes cluster ID: %s is getting stopped", kubernetesCluster.getUuid())); + } + return true; + } + KubernetesClusterStopWorker stopWorker = new KubernetesClusterStopWorker(kubernetesCluster, this); + stopWorker = ComponentContext.inject(stopWorker); + return stopWorker.stop(); + } + + @Override + public boolean deleteKubernetesCluster(Long kubernetesClusterId) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + KubernetesClusterVO cluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (cluster == null) { + throw new InvalidParameterValueException("Invalid cluster id specified"); + } + accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, cluster); + KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(cluster, this); + destroyWorker = ComponentContext.inject(destroyWorker); + return destroyWorker.destroy(); + } + + @Override + public ListResponse listKubernetesClusters(ListKubernetesClustersCmd cmd) { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + final CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + final Long clusterId = cmd.getId(); + final String state = cmd.getState(); + final String name = cmd.getName(); + final String keyword = cmd.getKeyword(); + List responsesList = new ArrayList(); + List permittedAccounts = new ArrayList(); + Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); + accountManager.buildACLSearchParameters(caller, clusterId, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + Long domainId = domainIdRecursiveListProject.first(); + Boolean isRecursive = domainIdRecursiveListProject.second(); + Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); + Filter searchFilter = new Filter(KubernetesClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); + SearchBuilder sb = kubernetesClusterDao.createSearchBuilder(); + accountManager.buildACLSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("keyword", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("state", sb.entity().getState(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + accountManager.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); + if (state != null) { + sc.setParameters("state", state); + } + if(keyword != null){ + sc.setParameters("keyword", "%" + keyword + "%"); + } + if (clusterId != null) { + sc.setParameters("id", clusterId); + } + if (name != null) { + sc.setParameters("name", name); + } + List kubernetesClusters = kubernetesClusterDao.search(sc, searchFilter); + for (KubernetesClusterVO cluster : kubernetesClusters) { + KubernetesClusterResponse clusterResponse = createKubernetesClusterResponse(cluster.getId()); + responsesList.add(clusterResponse); + } + ListResponse response = new ListResponse(); + response.setResponses(responsesList); + return response; + } + + public KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesClusterConfigCmd cmd) { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + final Long clusterId = cmd.getId(); + KubernetesCluster kubernetesCluster = kubernetesClusterDao.findById(clusterId); + if (kubernetesCluster == null) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID specified"); + } + KubernetesClusterConfigResponse response = new KubernetesClusterConfigResponse(); + response.setId(kubernetesCluster.getUuid()); + response.setName(kubernetesCluster.getName()); + String configData = ""; + KubernetesClusterDetailsVO clusterDetailsVO = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData"); + if (clusterDetailsVO != null && !Strings.isNullOrEmpty(clusterDetailsVO.getValue())) { + configData = new String(Base64.decodeBase64(clusterDetailsVO.getValue())); + } else { + if (KubernetesCluster.State.Starting.equals(kubernetesCluster.getState())) { + throw new CloudRuntimeException(String.format("Setup is in progress for Kubernetes cluster ID: %s, config not available at this moment", kubernetesCluster.getUuid())); + } else { + throw new CloudRuntimeException((String.format("Config not found for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()))); + } + } + response.setConfigData(configData); + response.setObjectName("clusterconfig"); + return response; + } + + @Override + public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + validateKubernetesClusterScaleParameters(cmd); + KubernetesClusterScaleWorker scaleWorker = + new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), + serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), this); + scaleWorker = ComponentContext.inject(scaleWorker); + return scaleWorker.scaleCluster(); + } + + @Override + public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + validateKubernetesClusterUpgradeParameters(cmd); + KubernetesClusterUpgradeWorker upgradeWorker = + new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), + kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this); + upgradeWorker = ComponentContext.inject(upgradeWorker); + return upgradeWorker.upgradeCluster(); + } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + if (!KubernetesServiceEnabled.value()) { + return cmdList; + } + cmdList.add(CreateKubernetesClusterCmd.class); + cmdList.add(StartKubernetesClusterCmd.class); + cmdList.add(StopKubernetesClusterCmd.class); + cmdList.add(DeleteKubernetesClusterCmd.class); + cmdList.add(ListKubernetesClustersCmd.class); + cmdList.add(GetKubernetesClusterConfigCmd.class); + cmdList.add(ScaleKubernetesClusterCmd.class); + cmdList.add(UpgradeKubernetesClusterCmd.class); + return cmdList; + } + + @Override + public KubernetesCluster findById(final Long id) { + return kubernetesClusterDao.findById(id); + } + + // Garbage collector periodically run through the Kubernetes clusters marked for GC. For each Kubernetes cluster + // marked for GC, attempt is made to destroy cluster. + public class KubernetesClusterGarbageCollector extends ManagedContextRunnable { + @Override + protected void runInContext() { + GlobalLock gcLock = GlobalLock.getInternLock("KubernetesCluster.GC.Lock"); + try { + if (gcLock.lock(3)) { + try { + reallyRun(); + } finally { + gcLock.unlock(); + } + } + } finally { + gcLock.releaseRef(); + } + } + + public void reallyRun() { + try { + List kubernetesClusters = kubernetesClusterDao.findKubernetesClustersToGarbageCollect(); + for (KubernetesCluster kubernetesCluster : kubernetesClusters) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + try { + KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); + destroyWorker = ComponentContext.inject(destroyWorker); + if (destroyWorker.destroy()) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Garbage collection complete for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + } else { + LOGGER.warn(String.format("Garbage collection failed for Kubernetes cluster ID: %s, it will be attempted to garbage collected in next run", kubernetesCluster.getUuid())); + } + } catch (CloudRuntimeException e) { + LOGGER.warn(String.format("Failed to destroy Kubernetes cluster ID: %s during GC", kubernetesCluster.getUuid()), e); + // proceed further with rest of the Kubernetes cluster garbage collection + } + } + } catch (Exception e) { + LOGGER.warn("Caught exception while running Kubernetes cluster gc: ", e); + } + } + } + + /* Kubernetes cluster scanner checks if the Kubernetes cluster is in desired state. If it detects Kubernetes cluster + is not in desired state, it will trigger an event and marks the Kubernetes cluster to be 'Alert' state. For e.g a + Kubernetes cluster in 'Running' state should mean all the cluster of node VM's in the custer should be running and + number of the node VM's should be of cluster size, and the master node VM's is running. It is possible due to + out of band changes by user or hosts going down, we may end up one or more VM's in stopped state. in which case + scanner detects these changes and marks the cluster in 'Alert' state. Similarly cluster in 'Stopped' state means + all the cluster VM's are in stopped state any mismatch in states should get picked up by Kubernetes cluster and + mark the Kubernetes cluster to be 'Alert' state. Through recovery API, or reconciliation clusters in 'Alert' will + be brought back to known good state or desired state. + */ + public class KubernetesClusterStatusScanner extends ManagedContextRunnable { + private boolean firstRun = true; + @Override + protected void runInContext() { + GlobalLock gcLock = GlobalLock.getInternLock("KubernetesCluster.State.Scanner.Lock"); + try { + if (gcLock.lock(3)) { + try { + reallyRun(); + } finally { + gcLock.unlock(); + } + } + } finally { + gcLock.releaseRef(); + } + } + + public void reallyRun() { + try { + // run through Kubernetes clusters in 'Running' state and ensure all the VM's are Running in the cluster + List runningKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Running); + for (KubernetesCluster kubernetesCluster : runningKubernetesClusters) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + try { + if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e); + } + } + + // run through Kubernetes clusters in 'Stopped' state and ensure all the VM's are Stopped in the cluster + List stoppedKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Stopped); + for (KubernetesCluster kubernetesCluster : stoppedKubernetesClusters) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Stopped.toString())); + } + try { + if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e); + } + } + + // run through Kubernetes clusters in 'Alert' state and reconcile state as 'Running' if the VM's are running or 'Stopped' if VM's are stopped + List alertKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Alert); + for (KubernetesClusterVO kubernetesCluster : alertKubernetesClusters) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Alert.toString())); + } + try { + if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { + KubernetesClusterStartWorker startWorker = + new KubernetesClusterStartWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); + startWorker = ComponentContext.inject(startWorker); + startWorker.reconcileAlertCluster(); + } else if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e); + } + } + + + if (firstRun) { + // run through Kubernetes clusters in 'Starting' state and reconcile state as 'Alert' or 'Error' if the VM's are running + List startingKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Starting); + for (KubernetesCluster kubernetesCluster : startingKubernetesClusters) { + if ((new Date()).getTime() - kubernetesCluster.getCreated().getTime() < 10*60*1000) { + continue; + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Starting.toString())); + } + try { + if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); + } else { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e); + } + } + List destroyingKubernetesClusters = kubernetesClusterDao.findKubernetesClustersInState(KubernetesCluster.State.Destroying); + for (KubernetesCluster kubernetesCluster : destroyingKubernetesClusters) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster ID: %s for state: %s", kubernetesCluster.getUuid(), KubernetesCluster.State.Destroying.toString())); + } + try { + KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); + destroyWorker = ComponentContext.inject(destroyWorker); + destroyWorker.destroy(); + } catch (Exception e) { + LOGGER.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster ID: %s status scanner", kubernetesCluster.getUuid()), e); + } + } + } + } catch (Exception e) { + LOGGER.warn("Caught exception while running Kubernetes cluster state scanner", e); + } + firstRun = false; + } + } + + // checks if Kubernetes cluster is in desired state + boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualMachine.State state) { + List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + + // check cluster is running at desired capacity include master nodes as well + if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s", + clusterVMs.size(), kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), state.toString())); + } + return false; + } + // check if all the VM's are in same state + for (KubernetesClusterVmMapVO clusterVm : clusterVMs) { + VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId()); + if (vm.getState() != state) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Found VM ID: %s in the Kubernetes cluster ID: %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation", + vm.getUuid(), kubernetesCluster.getUuid(), vm.getState().toString(), state.toString())); + } + return false; + } + } + + return true; + } + + @Override + public boolean start() { + final Map defaultKubernetesServiceNetworkOfferingProviders = new HashMap(); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Dhcp, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Dns, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.UserData, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Firewall, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Gateway, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Lb, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.SourceNat, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.StaticNat, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.PortForwarding, Network.Provider.VirtualRouter); + defaultKubernetesServiceNetworkOfferingProviders.put(Service.Vpn, Network.Provider.VirtualRouter); + + NetworkOfferingVO defaultKubernetesServiceNetworkOffering = + new NetworkOfferingVO(DEFAULT_NETWORK_OFFERING_FOR_KUBERNETES_SERVICE_NAME, + "Network Offering used for CloudStack Kubernetes service", Networks.TrafficType.Guest, + false, false, null, null, true, + NetworkOffering.Availability.Required, null, Network.GuestType.Isolated, true, + true, false, false, false, false, + false, false, false, true, true, false, + false, true, false, false); + defaultKubernetesServiceNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultKubernetesServiceNetworkOffering = networkOfferingDao.persistDefaultNetworkOffering(defaultKubernetesServiceNetworkOffering); + + for (Service service : defaultKubernetesServiceNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = + new NetworkOfferingServiceMapVO(defaultKubernetesServiceNetworkOffering.getId(), service, + defaultKubernetesServiceNetworkOfferingProviders.get(service)); + networkOfferingServiceMapDao.persist(offService); + LOGGER.trace("Added service for the network offering: " + offService); + } + + _gcExecutor.scheduleWithFixedDelay(new KubernetesClusterGarbageCollector(), 300, 300, TimeUnit.SECONDS); + _stateScanner.scheduleWithFixedDelay(new KubernetesClusterStatusScanner(), 300, 30, TimeUnit.SECONDS); + + return true; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _name = name; + _configParams = params; + _gcExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Kubernetes-Cluster-Scavenger")); + _stateScanner = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Kubernetes-Cluster-State-Scanner")); + + return true; + } + + @Override + public String getConfigComponentName() { + return KubernetesClusterService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { + KubernetesServiceEnabled, + KubernetesClusterHyperVTemplateName, + KubernetesClusterKVMTemplateName, + KubernetesClusterVMwareTemplateName, + KubernetesClusterXenserverTemplateName, + KubernetesClusterNetworkOffering, + KubernetesClusterStartTimeout, + KubernetesClusterScaleTimeout, + KubernetesClusterUpgradeTimeout, + KubernetesClusterExperimentalFeaturesEnabled + }; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java new file mode 100644 index 00000000000..db5ab91b3d1 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd; +import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; + +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.exception.CloudRuntimeException; + +public interface KubernetesClusterService extends PluggableService, Configurable { + static final String MIN_KUBERNETES_VERSION_HA_SUPPORT = "1.16.0"; + static final int MIN_KUBERNETES_CLUSTER_NODE_CPU = 2; + static final int MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE = 2048; + + static final ConfigKey KubernetesServiceEnabled = new ConfigKey("Advanced", Boolean.class, + "cloud.kubernetes.service.enabled", + "false", + "Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change", + false); + static final ConfigKey KubernetesClusterHyperVTemplateName = new ConfigKey("Advanced", String.class, + "cloud.kubernetes.cluster.template.name.hyperv", + "Kubernetes-Service-Template-HyperV", + "Name of the template to be used for creating Kubernetes cluster nodes on HyperV", + true); + static final ConfigKey KubernetesClusterKVMTemplateName = new ConfigKey("Advanced", String.class, + "cloud.kubernetes.cluster.template.name.kvm", + "Kubernetes-Service-Template-KVM", + "Name of the template to be used for creating Kubernetes cluster nodes on KVM", + true); + static final ConfigKey KubernetesClusterVMwareTemplateName = new ConfigKey("Advanced", String.class, + "cloud.kubernetes.cluster.template.name.vmware", + "Kubernetes-Service-Template-VMware", + "Name of the template to be used for creating Kubernetes cluster nodes on VMware", + true); + static final ConfigKey KubernetesClusterXenserverTemplateName = new ConfigKey("Advanced", String.class, + "cloud.kubernetes.cluster.template.name.xenserver", + "Kubernetes-Service-Template-Xenserver", + "Name of the template to be used for creating Kubernetes cluster nodes on Xenserver", + true); + static final ConfigKey KubernetesClusterNetworkOffering = new ConfigKey("Advanced", String.class, + "cloud.kubernetes.cluster.network.offering", + "DefaultNetworkOfferingforKubernetesService", + "Name of the network offering that will be used to create isolated network in which Kubernetes cluster VMs will be launched", + false); + static final ConfigKey KubernetesClusterStartTimeout = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.cluster.start.timeout", + "3600", + "Timeout interval (in seconds) in which start operation for a Kubernetes cluster should be completed", + true); + static final ConfigKey KubernetesClusterScaleTimeout = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.cluster.scale.timeout", + "3600", + "Timeout interval (in seconds) in which scale operation for a Kubernetes cluster should be completed", + true); + static final ConfigKey KubernetesClusterUpgradeTimeout = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.cluster.upgrade.timeout", + "3600", + "Timeout interval (in seconds) in which upgrade operation for a Kubernetes cluster should be completed. Not strictly obeyed while upgrade is in progress on a node", + true); + static final ConfigKey KubernetesClusterExperimentalFeaturesEnabled = new ConfigKey("Advanced", Boolean.class, + "cloud.kubernetes.cluster.experimental.features.enabled", + "false", + "Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not", + true); + + KubernetesCluster findById(final Long id); + + KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException; + + boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate) throws CloudRuntimeException; + + boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException; + + boolean deleteKubernetesCluster(Long kubernetesClusterId) throws CloudRuntimeException; + + ListResponse listKubernetesClusters(ListKubernetesClustersCmd cmd); + + KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesClusterConfigCmd cmd); + + KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId); + + boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException; + + boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException; +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java new file mode 100644 index 00000000000..9ff0be335f3 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -0,0 +1,340 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import java.util.Date; +import java.util.UUID; + + +import javax.persistence.Column; + +import javax.persistence.Entity; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "kubernetes_cluster") +public class KubernetesClusterVO implements KubernetesCluster { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "description", length = 4096) + private String description; + + @Column(name = "zone_id") + private long zoneId; + + @Column(name = "kubernetes_version_id") + private long kubernetesVersionId; + + @Column(name = "service_offering_id") + private long serviceOfferingId; + + @Column(name = "template_id") + private long templateId; + + @Column(name = "network_id") + private long networkId; + + @Column(name = "domain_id") + private long domainId; + + @Column(name = "account_id") + private long accountId; + + @Column(name = "master_node_count") + private long masterNodeCount; + + @Column(name = "node_count") + private long nodeCount; + + @Column(name = "cores") + private long cores; + + @Column(name = "memory") + private long memory; + + @Column(name = "node_root_disk_size") + private long nodeRootDiskSize; + + @Column(name = "state") + private State state; + + @Column(name = "key_pair") + private String keyPair; + + @Column(name = "endpoint") + private String endpoint; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + @Column(name = "gc") + private boolean checkForGc; + + @Override + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + @Override + public long getZoneId() { + return zoneId; + } + + public void setZoneId(long zoneId) { + this.zoneId = zoneId; + } + + @Override + public long getKubernetesVersionId() { + return kubernetesVersionId; + } + + public void setKubernetesVersionId(long kubernetesVersionId) { + this.kubernetesVersionId = kubernetesVersionId; + } + + @Override + public long getServiceOfferingId() { + return serviceOfferingId; + } + + public void setServiceOfferingId(long serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + @Override + public long getTemplateId() { + return templateId; + } + + public void setTemplateId(long templateId) { + this.templateId = templateId; + } + + @Override + public long getNetworkId() { + return networkId; + } + + public void setNetworkId(long networkId) { + this.networkId = networkId; + } + + @Override + public long getDomainId() { + return domainId; + } + + public void setDomainId(long domainId) { + this.domainId = domainId; + } + + @Override + public long getAccountId() { + return accountId; + } + + public void setAccountId(long accountId) { + this.accountId = accountId; + } + + @Override + public long getMasterNodeCount() { + return masterNodeCount; + } + + public void setMasterNodeCount(long masterNodeCount) { + this.masterNodeCount = masterNodeCount; + } + + @Override + public long getNodeCount() { + return nodeCount; + } + + public void setNodeCount(long nodeCount) { + this.nodeCount = nodeCount; + } + + @Override + public long getTotalNodeCount() { + return this.masterNodeCount + this.nodeCount; + } + + @Override + public long getCores() { + return cores; + } + + public void setCores(long cores) { + this.cores = cores; + } + + @Override + public long getMemory() { + return memory; + } + + public void setMemory(long memory) { + this.memory = memory; + } + + @Override + public long getNodeRootDiskSize() { + return nodeRootDiskSize; + } + + public void setNodeRootDiskSize(long nodeRootDiskSize) { + this.nodeRootDiskSize = nodeRootDiskSize; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getKeyPair() { + return keyPair; + } + + public void setKeyPair(String keyPair) { + this.keyPair = keyPair; + } + + @Override + public boolean isDisplay() { + return true; + } + + + public Date getRemoved() { + if (removed == null) + return null; + return new Date(removed.getTime()); + } + + @Override + public boolean isCheckForGc() { + return checkForGc; + } + + public void setCheckForGc(boolean check) { + checkForGc = check; + } + + @Override + public Date getCreated() { + return created; + } + + public KubernetesClusterVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, + long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, + String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) { + this.uuid = UUID.randomUUID().toString(); + this.name = name; + this.description = description; + this.zoneId = zoneId; + this.kubernetesVersionId = kubernetesVersionId; + this.serviceOfferingId = serviceOfferingId; + this.templateId = templateId; + this.networkId = networkId; + this.domainId = domainId; + this.accountId = accountId; + this.masterNodeCount = masterNodeCount; + this.nodeCount = nodeCount; + this.state = state; + this.keyPair = keyPair; + this.cores = cores; + this.memory = memory; + if (nodeRootDiskSize != null && nodeRootDiskSize > 0) { + this.nodeRootDiskSize = nodeRootDiskSize; + } + this.endpoint = endpoint; + this.checkForGc = false; + } + + @Override + public Class getEntityType() { + return KubernetesCluster.class; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java new file mode 100644 index 00000000000..c7399202348 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +/** + * KubernetesClusterVmMap will describe mapping of ID of KubernetesCuster + * and ID of its VirtualMachine. A KubernetesCluster can have multiple VMs + * deployed for it therefore a list of KubernetesClusterVmMap are associated + * with a KubernetesCluster. + * A particular VM can be deployed only for a single KubernetesCluster. + */ +public interface KubernetesClusterVmMap { + long getId(); + long getClusterId(); + long getVmId(); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java new file mode 100644 index 00000000000..edb06e79534 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import javax.persistence.Column; + +import javax.persistence.Entity; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; + +@Entity +@Table(name = "kubernetes_cluster_vm_map") +public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { + + @Override + public long getId() { + return id; + } + + @Override + public long getClusterId() { + return clusterId; + + } + + public void setClusterId(long clusterId) { + + this.clusterId = clusterId; + } + + @Override + public long getVmId() { + return vmId; + } + + public void setVmId(long vmId) { + + this.vmId = vmId; + } + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "cluster_id") + long clusterId; + + @Column(name = "vm_id") + long vmId; + + public KubernetesClusterVmMapVO() { + + } + + public KubernetesClusterVmMapVO(long clusterId, long vmId) { + this.vmId = vmId; + this.clusterId = clusterId; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java new file mode 100644 index 00000000000..aad9a225a44 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -0,0 +1,380 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.VlanDao; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao; +import com.cloud.network.IpAddress; +import com.cloud.network.IpAddressManager; +import com.cloud.network.Network; +import com.cloud.network.NetworkModel; +import com.cloud.network.dao.NetworkDao; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateApiService; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.SSHKeyPairDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.UserVmService; +import com.cloud.vm.dao.UserVmDao; +import com.google.common.base.Strings; + +public class KubernetesClusterActionWorker { + + public static final String CLUSTER_NODE_VM_USER = "core"; + public static final int CLUSTER_API_PORT = 6443; + public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222; + + protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterActionWorker.class); + + protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); + + @Inject + protected CAManager caManager; + @Inject + protected ConfigurationDao configurationDao; + @Inject + protected DataCenterDao dataCenterDao; + @Inject + protected AccountDao accountDao; + @Inject + protected IpAddressManager ipAddressManager; + @Inject + protected NetworkOrchestrationService networkMgr; + @Inject + protected NetworkDao networkDao; + @Inject + protected NetworkModel networkModel; + @Inject + protected ServiceOfferingDao serviceOfferingDao; + @Inject + protected SSHKeyPairDao sshKeyPairDao; + @Inject + protected VMTemplateDao templateDao; + @Inject + protected TemplateApiService templateService; + @Inject + protected UserVmDao userVmDao; + @Inject + protected UserVmService userVmService; + @Inject + protected VlanDao vlanDao; + + protected KubernetesClusterDao kubernetesClusterDao; + protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao; + protected KubernetesClusterDetailsDao kubernetesClusterDetailsDao; + protected KubernetesSupportedVersionDao kubernetesSupportedVersionDao; + + protected KubernetesCluster kubernetesCluster; + protected Account owner; + protected File sshKeyFile; + protected String publicIpAddress; + protected int sshPort; + + protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + this.kubernetesCluster = kubernetesCluster; + this.kubernetesClusterDao = clusterManager.kubernetesClusterDao; + this.kubernetesClusterDetailsDao = clusterManager.kubernetesClusterDetailsDao; + this.kubernetesClusterVmMapDao = clusterManager.kubernetesClusterVmMapDao; + this.kubernetesSupportedVersionDao = clusterManager.kubernetesSupportedVersionDao; + } + + protected void init() { + this.owner = accountDao.findById(kubernetesCluster.getAccountId()); + this.sshKeyFile = getManagementServerSshPublicKeyFile(); + } + + protected String readResourceFile(String resource) throws IOException { + return IOUtils.toString(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream(resource)), StringUtils.getPreferredCharset()); + } + + protected void logMessage(final Level logLevel, final String message, final Exception e) { + if (logLevel == Level.INFO) { + if (LOGGER.isInfoEnabled()) { + if (e != null) { + LOGGER.info(message, e); + } else { + LOGGER.info(message); + } + } + } else if (logLevel == Level.DEBUG) { + if (LOGGER.isDebugEnabled()) { + if (e != null) { + LOGGER.debug(message, e); + } else { + LOGGER.debug(message); + } + } + } else if (logLevel == Level.WARN) { + if (e != null) { + LOGGER.warn(message, e); + } else { + LOGGER.warn(message); + } + } else { + if (e != null) { + LOGGER.error(message, e); + } else { + LOGGER.error(message); + } + } + } + + protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster, + final List clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { + logMessage(logLevel, message, e); + stateTransitTo(kubernetesCluster.getId(), event); + detachIsoKubernetesVMs(clusterVMs); + if (e == null) { + throw new CloudRuntimeException(message); + } + throw new CloudRuntimeException(message, e); + } + + protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { + logMessage(logLevel, message, e); + if (kubernetesClusterId != null && event != null) { + stateTransitTo(kubernetesClusterId, event); + } + if (e == null) { + throw new CloudRuntimeException(message); + } + throw new CloudRuntimeException(message, e); + } + + protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event) throws CloudRuntimeException { + logTransitStateAndThrow(logLevel, message, kubernetesClusterId, event, null); + } + + protected void logAndThrow(final Level logLevel, final String message) throws CloudRuntimeException { + logTransitStateAndThrow(logLevel, message, null, null, null); + } + + protected void logAndThrow(final Level logLevel, final String message, final Exception ex) throws CloudRuntimeException { + logTransitStateAndThrow(logLevel, message, null, null, ex); + } + + protected File getManagementServerSshPublicKeyFile() { + boolean devel = Boolean.parseBoolean(configurationDao.getValue("developer")); + String keyFile = String.format("%s/.ssh/id_rsa", System.getProperty("user.home")); + if (devel) { + keyFile += ".cloud"; + } + return new File(keyFile); + } + + protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) { + return Transaction.execute(new TransactionCallback() { + @Override + public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { + KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId); + kubernetesClusterVmMapDao.persist(newClusterVmMap); + return newClusterVmMap; + } + }); + } + + private UserVm fetchMasterVmIfMissing(final UserVm masterVm) { + if (masterVm != null) { + return masterVm; + } + List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + if (CollectionUtils.isEmpty(clusterVMs)) { + LOGGER.warn(String.format("Unable to retrieve VMs for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + return null; + } + List vmIds = new ArrayList<>(); + for (KubernetesClusterVmMapVO vmMap : clusterVMs) { + vmIds.add(vmMap.getVmId()); + } + Collections.sort(vmIds); + return userVmDao.findById(vmIds.get(0)); + } + + protected String getMasterVmPrivateIp() { + String ip = null; + UserVm vm = fetchMasterVmIfMissing(null); + if (vm != null) { + ip = vm.getPrivateIpAddress(); + } + return ip; + } + + protected Pair getKubernetesClusterServerIpSshPort(UserVm masterVm) { + int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; + KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); + if (detail != null && !Strings.isNullOrEmpty(detail.getValue())) { + return new Pair<>(detail.getValue(), port); + } + Network network = networkDao.findById(kubernetesCluster.getNetworkId()); + if (network == null) { + LOGGER.warn(String.format("Network for Kubernetes cluster ID: %s cannot be found", kubernetesCluster.getUuid())); + return new Pair<>(null, port); + } + if (Network.GuestType.Isolated.equals(network.getGuestType())) { + List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); + if (CollectionUtils.isEmpty(addresses)) { + LOGGER.warn(String.format("No public IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid())); + return new Pair<>(null, port); + } + for (IpAddress address : addresses) { + if (address.isSourceNat()) { + return new Pair<>(address.getAddress().addr(), port); + } + } + LOGGER.warn(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid())); + return new Pair<>(null, port); + } else if (Network.GuestType.Shared.equals(network.getGuestType())) { + port = 22; + masterVm = fetchMasterVmIfMissing(masterVm); + if (masterVm == null) { + LOGGER.warn(String.format("Unable to retrieve master VM for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + return new Pair<>(null, port); + } + return new Pair<>(masterVm.getPrivateIpAddress(), port); + } + LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + return new Pair<>(null, port); + } + + protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException { + KubernetesSupportedVersion version = kubernetesSupportedVersion; + if (kubernetesSupportedVersion == null) { + version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + } + KubernetesCluster.Event failedEvent = KubernetesCluster.Event.OperationFailed; + KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId()); + if (cluster != null && cluster.getState() == KubernetesCluster.State.Starting) { + failedEvent = KubernetesCluster.Event.CreateFailed; + } + if (version == null) { + logTransitStateAndThrow(Level.ERROR, String .format("Unable to find Kubernetes version for cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent); + } + VMTemplateVO iso = templateDao.findById(version.getIsoId()); + if (iso == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not found.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent); + } + if (!iso.getFormat().equals(Storage.ImageFormat.ISO)) { + logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Invalid Binaries ISO.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent); + } + if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) { + logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not active.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent); + } + for (UserVm vm : clusterVMs) { + try { + templateService.attachIso(iso.getId(), vm.getId()); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Attached binaries ISO for VM: %s in cluster: %s", vm.getUuid(), kubernetesCluster.getName())); + } + } catch (CloudRuntimeException ex) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM: %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex); + } + } + } + + protected void attachIsoKubernetesVMs(List clusterVMs) throws CloudRuntimeException { + attachIsoKubernetesVMs(clusterVMs, null); + } + + protected void detachIsoKubernetesVMs(List clusterVMs) { + for (UserVm vm : clusterVMs) { + boolean result = false; + try { + result = templateService.detachIso(vm.getId()); + } catch (CloudRuntimeException ex) { + LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid()), ex); + } + if (result) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Detached Kubernetes binaries from VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid())); + } + continue; + } + LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid())); + } + } + + protected List getKubernetesClusterVMMaps() { + List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + if (!CollectionUtils.isEmpty(clusterVMs)) { + clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId()))); + } + return clusterVMs; + } + + protected List getKubernetesClusterVMs() { + List vmList = new ArrayList<>(); + List clusterVMs = getKubernetesClusterVMMaps(); + if (!CollectionUtils.isEmpty(clusterVMs)) { + for (KubernetesClusterVmMapVO vmMap : clusterVMs) { + vmList.add(userVmDao.findById(vmMap.getVmId())); + } + } + return vmList; + } + + protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + try { + return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); + } catch (NoTransitionException nte) { + LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster ID: %s in state %s on event %s", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString(), e.toString()), nte); + return false; + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java new file mode 100644 index 00000000000..8d7f42730aa --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.context.CallContext; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMap; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.rules.FirewallRule; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.User; +import com.cloud.uservm.UserVm; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.UserVmVO; + +public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceModifierActionWorker { + + @Inject + protected AccountManager accountManager; + + private List clusterVMs; + + public KubernetesClusterDestroyWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + private void validateClusterSate() { + if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Running) + || kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped) + || kubernetesCluster.getState().equals(KubernetesCluster.State.Alert) + || kubernetesCluster.getState().equals(KubernetesCluster.State.Error) + || kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) { + String msg = String.format("Cannot perform delete operation on cluster ID: %s in state: %s",kubernetesCluster.getUuid(), kubernetesCluster.getState()); + LOGGER.warn(msg); + throw new PermissionDeniedException(msg); + } + } + + private boolean destroyClusterVMs() { + boolean vmDestroyed = true; + if (!CollectionUtils.isEmpty(clusterVMs)) { + for (KubernetesClusterVmMapVO clusterVM : clusterVMs) { + long vmID = clusterVM.getVmId(); + + // delete only if VM exists and is not removed + UserVmVO userVM = userVmDao.findById(vmID); + if (userVM == null || userVM.isRemoved()) { + continue; + } + try { + UserVm vm = userVmService.destroyVm(vmID, true); + if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { + LOGGER.warn(String.format("Unable to expunge VM '%s' ID: %s, destroying Kubernetes cluster will probably fail" + , vm.getInstanceName() + , vm.getUuid())); + } + kubernetesClusterVmMapDao.expunge(clusterVM.getId()); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Destroyed VM ID: %s as part of Kubernetes cluster ID: %s cleanup", vm.getUuid(), kubernetesCluster.getUuid())); + } + } catch (ResourceUnavailableException | ConcurrentOperationException e) { + LOGGER.warn(String.format("Failed to destroy VM ID: %s part of the Kubernetes cluster ID: %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getUuid(), kubernetesCluster.getUuid()), e); + return false; + } + } + } + return vmDestroyed; + } + + private boolean updateKubernetesClusterEntryForGC() { + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); + kubernetesClusterVO.setCheckForGc(true); + return kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO); + } + + private void destroyKubernetesClusterNetwork() throws ManagementServerException { + NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId()); + if (network != null && network.getRemoved() == null) { + Account owner = accountManager.getAccount(network.getAccountId()); + User callerUser = accountManager.getActiveUser(CallContext.current().getCallingUserId()); + ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); + boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true); + if (!networkDestroyed) { + String msg = String.format("Failed to destroy network ID: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid()); + LOGGER.warn(msg); + throw new ManagementServerException(msg); + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Destroyed network: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid())); + } + } + } + + private void deleteKubernetesClusterNetworkRules() throws ManagementServerException { + NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId()); + if (network == null || !Network.GuestType.Isolated.equals(network.getGuestType())) { + return; + } + List removedVmIds = new ArrayList<>(); + if (!CollectionUtils.isEmpty(clusterVMs)) { + for (KubernetesClusterVmMapVO clusterVM : clusterVMs) { + removedVmIds.add(clusterVM.getVmId()); + } + } + IpAddress publicIp = getSourceNatIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s", network.getUuid())); + } + try { + removeLoadBalancingRule(publicIp, network, owner, CLUSTER_API_PORT); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to KubernetesCluster load balancing rule for network ID: %s", network.getUuid())); + } + FirewallRule firewallRule = removeApiFirewallRule(publicIp); + if (firewallRule == null) { + logMessage(Level.WARN, "Firewall rule for API access can't be removed", null); + } + firewallRule = removeSshFirewallRule(publicIp); + if (firewallRule == null) { + logMessage(Level.WARN, "Firewall rule for SSH access can't be removed", null); + } + try { + removePortForwardingRules(publicIp, network, owner, removedVmIds); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to KubernetesCluster port forwarding rules for network ID: %s", network.getUuid())); + } + } + + private void validateClusterVMsDestroyed() { + if(clusterVMs!=null && !clusterVMs.isEmpty()) { // Wait for few seconds to get all VMs really expunged + final int maxRetries = 3; + int retryCounter = 0; + while (retryCounter < maxRetries) { + boolean allVMsRemoved = true; + for (KubernetesClusterVmMap clusterVM : clusterVMs) { + UserVmVO userVM = userVmDao.findById(clusterVM.getVmId()); + if (userVM != null && !userVM.isRemoved()) { + allVMsRemoved = false; + break; + } + } + if (allVMsRemoved) { + break; + } + try { + Thread.sleep(10000); + } catch (InterruptedException ie) {} + retryCounter++; + } + } + } + + public boolean destroy() throws CloudRuntimeException { + init(); + validateClusterSate(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Destroying Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested); + this.clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + boolean vmsDestroyed = destroyClusterVMs(); + boolean cleanupNetwork = true; + final KubernetesClusterDetailsVO clusterDetails = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "networkCleanup"); + if (clusterDetails != null) { + cleanupNetwork = Boolean.parseBoolean(clusterDetails.getValue()); + } + // if there are VM's that were not expunged, we can not delete the network + if (vmsDestroyed) { + if (cleanupNetwork) { + validateClusterVMsDestroyed(); + try { + destroyKubernetesClusterNetwork(); + } catch (ManagementServerException e) { + String msg = String.format("Failed to destroy network of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid()); + LOGGER.warn(msg, e); + updateKubernetesClusterEntryForGC(); + throw new CloudRuntimeException(msg, e); + } + } else { + try { + deleteKubernetesClusterNetworkRules(); + } catch (ManagementServerException e) { + String msg = String.format("Failed to remove network rules of Kubernetes cluster ID: %s", kubernetesCluster.getUuid()); + LOGGER.warn(msg, e); + updateKubernetesClusterEntryForGC(); + throw new CloudRuntimeException(msg, e); + } + } + } else { + String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid()); + LOGGER.warn(msg); + updateKubernetesClusterEntryForGC(); + throw new CloudRuntimeException(msg); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + boolean deleted = kubernetesClusterDao.remove(kubernetesCluster.getId()); + if (!deleted) { + logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), null); + updateKubernetesClusterEntryForGC(); + return false; + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster ID: %s is successfully deleted", kubernetesCluster.getUuid())); + } + return true; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java new file mode 100644 index 00000000000..5d256140f34 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -0,0 +1,513 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; +import org.apache.cloudstack.api.command.user.vm.StartVMCmd; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; + +import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.dao.LoadBalancerDao; +import com.cloud.network.dao.LoadBalancerVO; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.lb.LoadBalancingRulesService; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.network.rules.RulesService; +import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.offering.ServiceOffering; +import com.cloud.resource.ResourceManager; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.SSHKeyPairVO; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.utils.net.Ip; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.Nic; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import com.google.common.base.Strings; + +public class KubernetesClusterResourceModifierActionWorker extends KubernetesClusterActionWorker { + + @Inject + protected CapacityManager capacityManager; + @Inject + protected ClusterDao clusterDao; + @Inject + protected ClusterDetailsDao clusterDetailsDao; + @Inject + protected FirewallRulesDao firewallRulesDao; + @Inject + protected FirewallService firewallService; + @Inject + protected LoadBalancingRulesService lbService; + @Inject + protected RulesService rulesService; + @Inject + protected PortForwardingRulesDao portForwardingRulesDao; + @Inject + protected ResourceManager resourceManager; + @Inject + protected LoadBalancerDao loadBalancerDao; + @Inject + protected VMInstanceDao vmInstanceDao; + @Inject + protected UserVmManager userVmManager; + + protected String kubernetesClusterNodeNamePrefix; + + protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + protected void init() { + super.init(); + kubernetesClusterNodeNamePrefix = getKubernetesClusterNodeNamePrefix(); + } + + private String getKubernetesNodeConfig(final String joinIp, final boolean ejectIso) throws IOException { + String k8sNodeConfig = readResourceFile("/conf/k8s-node.yml"); + final String sshPubKey = "{{ k8s.ssh.pub.key }}"; + final String joinIpKey = "{{ k8s_master.join_ip }}"; + final String clusterTokenKey = "{{ k8s_master.cluster.token }}"; + final String ejectIsoKey = "{{ k8s.eject.iso }}"; + String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; + String sshKeyPair = kubernetesCluster.getKeyPair(); + if (!Strings.isNullOrEmpty(sshKeyPair)) { + SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshkp != null) { + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + } + } + k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey); + k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp); + k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + /* genarate /.docker/config.json file on the nodes only if Kubernetes cluster is created to + * use docker private registry */ + String dockerUserName = null; + String dockerPassword = null; + String dockerRegistryUrl = null; + String dockerRegistryEmail = null; + List details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId()); + for (KubernetesClusterDetailsVO detail : details) { + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) { + dockerUserName = detail.getValue(); + } + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) { + dockerPassword = detail.getValue(); + } + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) { + dockerRegistryUrl = detail.getValue(); + } + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_EMAIL)) { + dockerRegistryEmail = detail.getValue(); + } + } + if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) { + // do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section + // optional or conditionally applied + String dockerConfigString = "write-files:\n" + + " - path: /.docker/config.json\n" + + " owner: core:core\n" + + " permissions: '0644'\n" + + " content: |\n" + + " {\n" + + " \"auths\": {\n" + + " {{docker.url}}: {\n" + + " \"auth\": {{docker.secret}},\n" + + " \"email\": {{docker.email}}\n" + + " }\n" + + " }\n" + + " }"; + k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString); + final String dockerUrlKey = "{{docker.url}}"; + final String dockerAuthKey = "{{docker.secret}}"; + final String dockerEmailKey = "{{docker.email}}"; + final String usernamePasswordKey = dockerUserName + ":" + dockerPassword; + String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(StringUtils.getPreferredCharset())); + k8sNodeConfig = k8sNodeConfig.replace(dockerUrlKey, "\"" + dockerRegistryUrl + "\""); + k8sNodeConfig = k8sNodeConfig.replace(dockerAuthKey, "\"" + base64Auth + "\""); + k8sNodeConfig = k8sNodeConfig.replace(dockerEmailKey, "\"" + dockerRegistryEmail + "\""); + } + return k8sNodeConfig; + } + + protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException { + final int cpu_requested = offering.getCpu() * offering.getSpeed(); + final long ram_requested = offering.getRamSize() * 1024L * 1024L; + List hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId()); + final Map> hosts_with_resevered_capacity = new ConcurrentHashMap>(); + for (HostVO h : hosts) { + hosts_with_resevered_capacity.put(h.getUuid(), new Pair(h, 0)); + } + boolean suitable_host_found = false; + for (int i = 1; i <= nodesCount + 1; i++) { + suitable_host_found = false; + for (Map.Entry> hostEntry : hosts_with_resevered_capacity.entrySet()) { + Pair hp = hostEntry.getValue(); + HostVO h = hp.first(); + int reserved = hp.second(); + reserved++; + ClusterVO cluster = clusterDao.findById(h.getClusterId()); + ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved)); + } + if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved)); + } + hostEntry.setValue(new Pair(h, reserved)); + suitable_host_found = true; + break; + } + } + if (!suitable_host_found) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d", zone.getUuid(), i)); + } + break; + } + } + if (suitable_host_found) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); + } + return new DeployDestination(zone, null, null, null); + } + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%1$s memory=%2$s)", + cpu_requested * nodesCount, ram_requested * nodesCount); + LOGGER.warn(msg); + throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); + } + + protected DeployDestination plan() throws InsufficientServerCapacityException { + ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Checking deployment destination for Kubernetes cluster ID: %s in zone ID: %s", kubernetesCluster.getUuid(), zone.getUuid())); + } + return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); + } + + protected void startKubernetesVM(final UserVm vm) throws ManagementServerException { + try { + StartVMCmd startVm = new StartVMCmd(); + startVm = ComponentContext.inject(startVm); + Field f = startVm.getClass().getDeclaredField("id"); + f.setAccessible(true); + f.set(startVm, vm.getId()); + userVmService.startVirtualMachine(startVm); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Started VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid())); + } + } catch (IllegalAccessException | NoSuchFieldException | ExecutionException | + ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) { + throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), ex); + } + + UserVm startVm = userVmDao.findById(vm.getId()); + if (!startVm.getState().equals(VirtualMachine.State.Running)) { + throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + } + + protected List provisionKubernetesClusterNodeVms(final long nodeCount, final int offset, final String publicIpAddress) throws ManagementServerException, + ResourceUnavailableException, InsufficientCapacityException { + List nodes = new ArrayList<>(); + for (int i = offset + 1; i <= nodeCount; i++) { + UserVm vm = createKubernetesNode(publicIpAddress, i); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + startKubernetesVM(vm); + vm = userVmDao.findById(vm.getId()); + if (vm == null) { + throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid())); + } + nodes.add(vm); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned node VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid())); + } + } + return nodes; + } + + protected List provisionKubernetesClusterNodeVms(final long nodeCount, final String publicIpAddress) throws ManagementServerException, + ResourceUnavailableException, InsufficientCapacityException { + return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress); + } + + protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException, + ResourceUnavailableException, InsufficientCapacityException { + UserVm nodeVm = null; + DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId()); + List networkIds = new ArrayList(); + networkIds.add(kubernetesCluster.getNetworkId()); + Account owner = accountDao.findById(kubernetesCluster.getAccountId()); + Network.IpAddresses addrs = new Network.IpAddresses(null, null); + long rootDiskSize = kubernetesCluster.getNodeRootDiskSize(); + Map customParameterMap = new HashMap(); + if (rootDiskSize > 0) { + customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); + } + String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance)); + String k8sNodeConfig = null; + try { + k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType())); + } catch (IOException e) { + logAndThrow(Level.ERROR, "Failed to read Kubernetes node configuration file", e); + } + String base64UserData = Base64.encodeBase64String(k8sNodeConfig.getBytes(StringUtils.getPreferredCharset())); + nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, + hostName, hostName, null, null, null, + null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), + null, addrs, null, null, null, customParameterMap, null, null, null, null); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Created node VM ID: %s, %s in the Kubernetes cluster ID: %s", nodeVm.getUuid(), hostName, kubernetesCluster.getUuid())); + } + return nodeVm; + } + + protected IpAddress getSourceNatIp(Network network) { + List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); + if (CollectionUtils.isEmpty(addresses)) { + return null; + } + for (IpAddress address : addresses) { + if (address.isSourceNat()) { + return address; + } + } + return null; + } + + protected void provisionFirewallRules(final IpAddress publicIp, final Account account, int startPort, int endPort) throws NoSuchFieldException, + IllegalAccessException, ResourceUnavailableException, NetworkRuleConflictException { + List sourceCidrList = new ArrayList(); + sourceCidrList.add("0.0.0.0/0"); + + CreateFirewallRuleCmd rule = new CreateFirewallRuleCmd(); + rule = ComponentContext.inject(rule); + + Field addressField = rule.getClass().getDeclaredField("ipAddressId"); + addressField.setAccessible(true); + addressField.set(rule, publicIp.getId()); + + Field protocolField = rule.getClass().getDeclaredField("protocol"); + protocolField.setAccessible(true); + protocolField.set(rule, "TCP"); + + Field startPortField = rule.getClass().getDeclaredField("publicStartPort"); + startPortField.setAccessible(true); + startPortField.set(rule, startPort); + + Field endPortField = rule.getClass().getDeclaredField("publicEndPort"); + endPortField.setAccessible(true); + endPortField.set(rule, endPort); + + Field cidrField = rule.getClass().getDeclaredField("cidrlist"); + cidrField.setAccessible(true); + cidrField.set(rule, sourceCidrList); + + firewallService.createIngressFirewallRule(rule); + firewallService.applyIngressFwRules(publicIp.getId(), account); + } + + /** + * To provision SSH port forwarding rules for the given Kubernetes cluster + * for its given virtual machines + * @param publicIp + * @param network + * @param account + * @param List clusterVMIds (when empty then method must be called while + * down-scaling of the KubernetesCluster therefore no new rules + * to be added) + * @param firewallRuleSourcePortStart + * @throws ResourceUnavailableException + * @throws NetworkRuleConflictException + */ + protected void provisionSshPortForwardingRules(IpAddress publicIp, Network network, Account account, + List clusterVMIds, int firewallRuleSourcePortStart) throws ResourceUnavailableException, + NetworkRuleConflictException { + if (!CollectionUtils.isEmpty(clusterVMIds)) { + final long publicIpId = publicIp.getId(); + final long networkId = network.getId(); + final long accountId = account.getId(); + final long domainId = account.getDomainId(); + for (int i = 0; i < clusterVMIds.size(); ++i) { + long vmId = clusterVMIds.get(i); + Nic vmNic = networkModel.getNicInNetwork(vmId, networkId); + final Ip vmIp = new Ip(vmNic.getIPv4Address()); + final long vmIdFinal = vmId; + final int srcPortFinal = firewallRuleSourcePortStart + i; + + PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException() { + @Override + public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + PortForwardingRuleVO newRule = + new PortForwardingRuleVO(null, publicIpId, + srcPortFinal, srcPortFinal, + vmIp, + 22, 22, + "tcp", networkId, accountId, domainId, vmIdFinal); + newRule.setDisplay(true); + newRule.setState(FirewallRule.State.Add); + newRule = portForwardingRulesDao.persist(newRule); + return newRule; + } + }); + rulesService.applyPortForwardingRules(publicIp.getId(), account); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned SSH port forwarding rule from port %d to 22 on %s to the VM IP : %s in Kubernetes cluster ID: %s", srcPortFinal, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getUuid())); + } + } + } + } + + protected FirewallRule removeApiFirewallRule(final IpAddress publicIp) { + FirewallRule rule = null; + List firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall); + for (FirewallRuleVO firewallRule : firewallRules) { + if (firewallRule.getSourcePortStart() == CLUSTER_API_PORT && + firewallRule.getSourcePortEnd() == CLUSTER_API_PORT) { + rule = firewallRule; + firewallService.revokeIngressFwRule(firewallRule.getId(), true); + break; + } + } + return rule; + } + + protected FirewallRule removeSshFirewallRule(final IpAddress publicIp) { + FirewallRule rule = null; + List firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall); + for (FirewallRuleVO firewallRule : firewallRules) { + if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT) { + rule = firewallRule; + firewallService.revokeIngressFwRule(firewallRule.getId(), true); + break; + } + } + return rule; + } + + protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, final List removedVMIds) throws ResourceUnavailableException { + if (!CollectionUtils.isEmpty(removedVMIds)) { + for (Long vmId : removedVMIds) { + List pfRules = portForwardingRulesDao.listByNetwork(network.getId()); + for (PortForwardingRuleVO pfRule : pfRules) { + if (pfRule.getVirtualMachineId() == vmId) { + portForwardingRulesDao.remove(pfRule.getId()); + break; + } + } + } + rulesService.applyPortForwardingRules(publicIp.getId(), account); + } + } + + protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network, + final Account account, final int port) throws ResourceUnavailableException { + List rules = loadBalancerDao.listByIpAddress(publicIp.getId()); + for (LoadBalancerVO rule : rules) { + if (rule.getNetworkId() == network.getId() && + rule.getAccountId() == account.getId() && + rule.getSourcePortStart() == port && + rule.getSourcePortEnd() == port) { + lbService.deleteLoadBalancerRule(rule.getId(), true); + break; + } + } + } + + protected String getKubernetesClusterNodeNamePrefix() { + String prefix = kubernetesCluster.getName(); + if (!NetUtils.verifyDomainNameLabel(prefix, true)) { + prefix = prefix.replaceAll("[^a-zA-Z0-9-]", ""); + if (prefix.length() == 0) { + prefix = kubernetesCluster.getUuid(); + } + prefix = "k8s-" + prefix; + } + if (prefix.length() > 40) { + prefix = prefix.substring(0, 40); + } + return prefix; + } + + protected String getKubernetesClusterNodeAvailableName(final String hostName) { + String name = hostName; + int suffix = 1; + while (vmInstanceDao.findVMByHostName(name) != null) { + name = String.format("%s-%d", hostName, suffix); + suffix++; + } + return name; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java new file mode 100644 index 00000000000..0d6a028c9a8 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -0,0 +1,431 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.context.CallContext; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; + +import com.cloud.dc.DataCenter; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.rules.FirewallRule; +import com.cloud.offering.ServiceOffering; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.ssh.SshHelper; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import com.google.common.base.Strings; + +public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker { + + @Inject + protected VMInstanceDao vmInstanceDao; + + private ServiceOffering serviceOffering; + private Long clusterSize; + private KubernetesCluster.State originalState; + private Network network; + private long scaleTimeoutTime; + + public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, + final ServiceOffering serviceOffering, + final Long clusterSize, + final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + this.serviceOffering = serviceOffering; + this.clusterSize = clusterSize; + this.originalState = kubernetesCluster.getState(); + } + + protected void init() { + super.init(); + this.network = networkDao.findById(kubernetesCluster.getNetworkId()); + } + + private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message, final Exception e) throws CloudRuntimeException { + KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId()); + if (cluster != null && KubernetesCluster.State.Scaling.equals(cluster.getState())) { + logTransitStateAndThrow(logLevel, message, kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + } else { + logAndThrow(logLevel, message, e); + } + } + + private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message) throws CloudRuntimeException { + logTransitStateToFailedIfNeededAndThrow(logLevel, message, null); + } + + /** + * Scale network rules for an existing Kubernetes cluster while scaling it + * Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n. + * Also remove port forwarding rules for removed virtual machines and create port-forwarding rule + * to forward public IP traffic to all node VMs' private IP. + * @param clusterVMIds + * @param removedVMIds + * @throws ManagementServerException + */ + private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, final List removedVMIds) throws ManagementServerException { + if (!Network.GuestType.Isolated.equals(network.getGuestType())) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid())); + } + return; + } + IpAddress publicIp = getSourceNatIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid())); + } + + // Remove existing SSH firewall rules + FirewallRule firewallRule = removeSshFirewallRule(publicIp); + if (firewallRule == null) { + throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); + } + int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); + final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); + // Provision new SSH firewall rules + try { + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s", + CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + + try { + removePortForwardingRules(publicIp, network, owner, removedVMIds); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + } + + private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory, + final Long size, final Long serviceOfferingId) { + return Transaction.execute((TransactionCallback) status -> { + KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); + updatedCluster.setCores(cores); + updatedCluster.setMemory(memory); + if (size != null) { + updatedCluster.setNodeCount(size); + } + if (serviceOfferingId != null) { + updatedCluster.setServiceOfferingId(serviceOfferingId); + } + kubernetesClusterDao.persist(updatedCluster); + return updatedCluster; + }); + } + + private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { + final ServiceOffering serviceOffering = newServiceOffering == null ? + serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; + final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); + final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount()); + final long cores = serviceOffering.getCpu() * size; + final long memory = serviceOffering.getRamSize() * size; + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId); + if (kubernetesClusterVO == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + return kubernetesClusterVO; + } + + private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) { + File pkFile = getManagementServerSshPublicKeyFile(); + int retryCounter = 0; + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + while (retryCounter < retries) { + retryCounter++; + try { + Pair result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + 10000, 10000, 60000); + if (!result.first()) { + LOGGER.warn(String.format("Draining node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid())); + } else { + result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo kubectl delete node %s", hostName), + 10000, 10000, 30000); + if (result.first()) { + return true; + } else { + LOGGER.warn(String.format("Deleting node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid())); + } + } + break; + } catch (Exception e) { + String msg = String.format("Failed to remove Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid()); + LOGGER.warn(msg, e); + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ie) { + LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s on VM ID: %s removal", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie); + } + retryCounter++; + } + return false; + } + + private void validateKubernetesClusterScaleOfferingParameters() throws CloudRuntimeException { + if (KubernetesCluster.State.Created.equals(originalState)) { + return; + } + final long originalNodeCount = kubernetesCluster.getTotalNodeCount(); + List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + if (vmList == null || vmList.isEmpty() || vmList.size() < originalNodeCount) { + logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, it is in unstable state as not enough existing VM instances found!", kubernetesCluster.getUuid())); + } else { + for (KubernetesClusterVmMapVO vmMapVO : vmList) { + VMInstanceVO vmInstance = vmInstanceDao.findById(vmMapVO.getVmId()); + if (vmInstance != null && vmInstance.getState().equals(VirtualMachine.State.Running) && + vmInstance.getHypervisorType() != Hypervisor.HypervisorType.XenServer && + vmInstance.getHypervisorType() != Hypervisor.HypervisorType.VMware && + vmInstance.getHypervisorType() != Hypervisor.HypervisorType.Simulator) { + logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling Kubernetes cluster with running VMs on hypervisor %s is not supported!", kubernetesCluster.getUuid(), vmInstance.getHypervisorType())); + } + } + } + } + + private void validateKubernetesClusterScaleSizeParameters() throws CloudRuntimeException { + final long originalClusterSize = kubernetesCluster.getNodeCount(); + if (network == null) { + logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster network not found", kubernetesCluster.getUuid())); + } + // Check capacity and transition state + final long newVmRequiredCount = clusterSize - originalClusterSize; + final ServiceOffering clusterServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + if (clusterServiceOffering == null) { + logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster service offering not found", kubernetesCluster.getUuid())); + } + if (newVmRequiredCount > 0) { + final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + try { + if (originalState.equals(KubernetesCluster.State.Running)) { + plan(newVmRequiredCount, zone, clusterServiceOffering); + } else { + plan(kubernetesCluster.getTotalNodeCount() + newVmRequiredCount, zone, clusterServiceOffering); + } + } catch (InsufficientCapacityException e) { + logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s in zone ID: %s, insufficient capacity", kubernetesCluster.getUuid(), zone.getUuid())); + } + } + List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + if (CollectionUtils.isEmpty(vmList) || vmList.size() < kubernetesCluster.getTotalNodeCount()) { + logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, it is in unstable state as not enough existing VM instances found", kubernetesCluster.getUuid())); + } + } + + private void scaleKubernetesClusterOffering() throws CloudRuntimeException { + validateKubernetesClusterScaleOfferingParameters(); + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); + } + if (KubernetesCluster.State.Created.equals(originalState)) { + kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + return; + } + final long size = kubernetesCluster.getTotalNodeCount(); + List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + final long tobeScaledVMCount = Math.min(vmList.size(), size); + for (long i = 0; i < tobeScaledVMCount; i++) { + KubernetesClusterVmMapVO vmMapVO = vmList.get((int) i); + UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); + boolean result = false; + try { + result = userVmManager.upgradeVirtualMachine(userVM.getId(), serviceOffering.getId(), new HashMap()); + } catch (ResourceUnavailableException | ManagementServerException | ConcurrentOperationException | VirtualMachineMigrationException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + } + if (!result) { + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + if (System.currentTimeMillis() > scaleTimeoutTime) { + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + } + kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + } + + private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested); + } + final List originalVmList = getKubernetesClusterVMMaps(); + int i = originalVmList.size() - 1; + List removedVmIds = new ArrayList<>(); + while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) { + KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i); + UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); + if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, failed to remove Kubernetes node: %s running on VM ID: %s", kubernetesCluster.getUuid(), userVM.getHostName(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + // For removing port-forwarding network rules + removedVmIds.add(userVM.getId()); + try { + UserVm vm = userVmService.destroyVm(userVM.getId(), true); + if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'." + , kubernetesCluster.getUuid() + , vm.getInstanceName()), + kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + } catch (ResourceUnavailableException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s" + , kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + } + kubernetesClusterVmMapDao.expunge(vmMapVO.getId()); + if (System.currentTimeMillis() > scaleTimeoutTime) { + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + i--; + } + // Scale network rules to update firewall rule + try { + scaleKubernetesClusterNetworkRules(null, removedVmIds); + } catch (ManagementServerException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + } + } + + private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); + } + List clusterVMs = new ArrayList<>(); + List clusterVMIds = new ArrayList<>(); + try { + clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress); + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { + logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to provision node VM in the cluster", kubernetesCluster.getUuid()), e); + } + attachIsoKubernetesVMs(clusterVMs); + for (UserVm vm : clusterVMs) { + clusterVMIds.add(vm.getId()); + } + try { + scaleKubernetesClusterNetworkRules(clusterVMIds, null); + } catch (ManagementServerException e) { + logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), e); + } + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); + kubernetesClusterVO.setNodeCount(clusterSize); + boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort, + CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000); + detachIsoKubernetesVMs(clusterVMs); + if (!readyNodesCountValid) { // Scaling failed + logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid())); + } + } + + private void scaleKubernetesClusterSize() throws CloudRuntimeException { + validateKubernetesClusterScaleSizeParameters(); + final long originalClusterSize = kubernetesCluster.getNodeCount(); + final long newVmRequiredCount = clusterSize - originalClusterSize; + if (KubernetesCluster.State.Created.equals(originalState)) { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), newVmRequiredCount > 0 ? KubernetesCluster.Event.ScaleUpRequested : KubernetesCluster.Event.ScaleDownRequested); + } + kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + return; + } + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + if (Strings.isNullOrEmpty(publicIpAddress)) { + logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid())); + } + if (newVmRequiredCount < 0) { // downscale + scaleDownKubernetesClusterSize(); + } else { // upscale, same node count handled above + scaleUpKubernetesClusterSize(newVmRequiredCount); + } + kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null); + } + + public boolean scaleCluster() throws CloudRuntimeException { + init(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Scaling Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000; + final long originalClusterSize = kubernetesCluster.getNodeCount(); + final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + if (existingServiceOffering == null) { + logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getUuid())); + } + final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); + final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; + final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize; + if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) { + if (newVMRequired > 0) { + scaleKubernetesClusterOffering(); + scaleKubernetesClusterSize(); + } else { + scaleKubernetesClusterSize(); + scaleKubernetesClusterOffering(); + } + } else if (serviceOfferingScalingNeeded) { + scaleKubernetesClusterOffering(); + } else if (clusterSizeScalingNeeded) { + scaleKubernetesClusterSize(); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return true; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java new file mode 100644 index 00000000000..d4525630196 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -0,0 +1,640 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.framework.ca.Certificate; +import org.apache.cloudstack.utils.security.CertUtils; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; + +import com.cloud.dc.DataCenter; +import com.cloud.dc.Vlan; +import com.cloud.dc.VlanVO; +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesVersionManagerImpl; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.addr.PublicIp; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.offering.ServiceOffering; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.SSHKeyPairVO; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.Ip; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.Nic; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.VirtualMachine; +import com.google.common.base.Strings; + +public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { + + public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + private Pair> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException { + String masterIp = null; + Map requestedIps = null; + if (Network.GuestType.Shared.equals(network.getGuestType())) { + List vlanIds = new ArrayList<>(); + List vlans = vlanDao.listVlansByNetworkId(network.getId()); + for (VlanVO vlan : vlans) { + vlanIds.add(vlan.getId()); + } + PublicIp ip = ipAddressManager.getAvailablePublicIpAddressFromVlans(zone.getId(), null, account, Vlan.VlanType.DirectAttached, vlanIds,network.getId(), null, false); + if (ip != null) { + masterIp = ip.getAddress().toString(); + } + requestedIps = new HashMap<>(); + Ip ipAddress = ip.getAddress(); + boolean isIp6 = ipAddress.isIp6(); + requestedIps.put(network.getId(), new Network.IpAddresses(ipAddress.isIp4() ? ip.getAddress().addr() : null, null)); + } else { + masterIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null); + } + return new Pair<>(masterIp, requestedIps); + } + + private boolean isKubernetesVersionSupportsHA() { + boolean haSupported = false; + final KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); + if (version != null) { + try { + if (KubernetesVersionManagerImpl.compareSemanticVersions(version.getSemanticVersion(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT) >= 0) { + haSupported = true; + } + } catch (IllegalArgumentException e) { + LOGGER.error(String.format("Unable to compare Kubernetes version for cluster version ID: %s with %s", version.getUuid(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT), e); + } + } + return haSupported; + } + + private String getKubernetesMasterConfig(final String masterIp, final String serverIp, + final String hostName, final boolean haSupported, + final boolean ejectIso) throws IOException { + String k8sMasterConfig = readResourceFile("/conf/k8s-master.yml"); + final String apiServerCert = "{{ k8s_master.apiserver.crt }}"; + final String apiServerKey = "{{ k8s_master.apiserver.key }}"; + final String caCert = "{{ k8s_master.ca.crt }}"; + final String sshPubKey = "{{ k8s.ssh.pub.key }}"; + final String clusterToken = "{{ k8s_master.cluster.token }}"; + final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}"; + final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final List addresses = new ArrayList<>(); + addresses.add(masterIp); + if (!serverIp.equals(masterIp)) { + addresses.add(serverIp); + } + final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes", + "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"), + addresses, 3650, null); + final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate()); + final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey()); + final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates()); + k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); + k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); + k8sMasterConfig = k8sMasterConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); + String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; + String sshKeyPair = kubernetesCluster.getKeyPair(); + if (!Strings.isNullOrEmpty(sshKeyPair)) { + SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshkp != null) { + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + } + } + k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); + k8sMasterConfig = k8sMasterConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + String initArgs = ""; + if (haSupported) { + initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ", + serverIp, + CLUSTER_API_PORT, + KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); + } + initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp); + k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs); + k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sMasterConfig; + } + + private UserVm createKubernetesMaster(final Network network, String serverIp) throws ManagementServerException, + ResourceUnavailableException, InsufficientCapacityException { + UserVm masterVm = null; + DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId()); + List networkIds = new ArrayList(); + networkIds.add(kubernetesCluster.getNetworkId()); + Pair> ipAddresses = getKubernetesMasterIpAddresses(zone, network, owner); + String masterIp = ipAddresses.first(); + Map requestedIps = ipAddresses.second(); + if (Network.GuestType.Shared.equals(network.getGuestType()) && Strings.isNullOrEmpty(serverIp)) { + serverIp = masterIp; + } + Network.IpAddresses addrs = new Network.IpAddresses(masterIp, null); + long rootDiskSize = kubernetesCluster.getNodeRootDiskSize(); + Map customParameterMap = new HashMap(); + if (rootDiskSize > 0) { + customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); + } + String hostName = kubernetesClusterNodeNamePrefix + "-master"; + if (kubernetesCluster.getMasterNodeCount() > 1) { + hostName += "-1"; + } + hostName = getKubernetesClusterNodeAvailableName(hostName); + boolean haSupported = isKubernetesVersionSupportsHA(); + String k8sMasterConfig = null; + try { + k8sMasterConfig = getKubernetesMasterConfig(masterIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType())); + } catch (IOException e) { + logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + } + String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); + masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, + hostName, hostName, null, null, null, + null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), + requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", masterVm.getUuid(), hostName, kubernetesCluster.getUuid())); + } + return masterVm; + } + + private String getKubernetesAdditionalMasterConfig(final String joinIp, final boolean ejectIso) throws IOException { + String k8sMasterConfig = readResourceFile("/conf/k8s-master-add.yml"); + final String joinIpKey = "{{ k8s_master.join_ip }}"; + final String clusterTokenKey = "{{ k8s_master.cluster.token }}"; + final String sshPubKey = "{{ k8s.ssh.pub.key }}"; + final String clusterHACertificateKey = "{{ k8s_master.cluster.ha.certificate.key }}"; + final String ejectIsoKey = "{{ k8s.eject.iso }}"; + String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; + String sshKeyPair = kubernetesCluster.getKeyPair(); + if (!Strings.isNullOrEmpty(sshKeyPair)) { + SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshkp != null) { + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + } + } + k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); + k8sMasterConfig = k8sMasterConfig.replace(joinIpKey, joinIp); + k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); + k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sMasterConfig; + } + + private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException, + ResourceUnavailableException, InsufficientCapacityException { + UserVm additionalMasterVm = null; + DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId()); + List networkIds = new ArrayList(); + networkIds.add(kubernetesCluster.getNetworkId()); + Network.IpAddresses addrs = new Network.IpAddresses(null, null); + long rootDiskSize = kubernetesCluster.getNodeRootDiskSize(); + Map customParameterMap = new HashMap(); + if (rootDiskSize > 0) { + customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); + } + String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1)); + String k8sMasterConfig = null; + try { + k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType())); + } catch (IOException e) { + logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + } + String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); + additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, + hostName, hostName, null, null, null, + null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), + null, addrs, null, null, null, customParameterMap, null, null, null, null); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getUuid())); + } + return additionalMasterVm; + } + + private UserVm provisionKubernetesClusterMasterVm(final Network network, final String publicIpAddress) throws + ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { + UserVm k8sMasterVM = null; + k8sMasterVM = createKubernetesMaster(network, publicIpAddress); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId()); + startKubernetesVM(k8sMasterVM); + k8sMasterVM = userVmDao.findById(k8sMasterVM.getId()); + if (k8sMasterVM == null) { + throw new ManagementServerException(String.format("Failed to provision master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid())); + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned the master VM ID: %s in to the Kubernetes cluster ID: %s", k8sMasterVM.getUuid(), kubernetesCluster.getUuid())); + } + return k8sMasterVM; + } + + private List provisionKubernetesClusterAdditionalMasterVms(final String publicIpAddress) throws + InsufficientCapacityException, ManagementServerException, ResourceUnavailableException { + List additionalMasters = new ArrayList<>(); + if (kubernetesCluster.getMasterNodeCount() > 1) { + for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) { + UserVm vm = null; + vm = createKubernetesAdditionalMaster(publicIpAddress, i); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + startKubernetesVM(vm); + vm = userVmDao.findById(vm.getId()); + if (vm == null) { + throw new ManagementServerException(String.format("Failed to provision additional master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid())); + } + additionalMasters.add(vm); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned additional master VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid())); + } + } + } + return additionalMasters; + } + + private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException { + final ReservationContext context = new ReservationContextImpl(null, null, null, owner); + Network network = networkDao.findById(kubernetesCluster.getNetworkId()); + if (network == null) { + String msg = String.format("Network for Kubernetes cluster ID: %s not found", kubernetesCluster.getUuid()); + LOGGER.warn(msg); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + throw new ManagementServerException(msg); + } + try { + networkMgr.startNetwork(network.getId(), destination, context); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Network ID: %s is started for the Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid())); + } + } catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) { + String msg = String.format("Failed to start Kubernetes cluster ID: %s as unable to start associated network ID: %s" , kubernetesCluster.getUuid(), network.getUuid()); + LOGGER.error(msg, e); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + throw new ManagementServerException(msg, e); + } + return network; + } + + private void provisionLoadBalancerRule(final IpAddress publicIp, final Network network, + final Account account, final List clusterVMIds, final int port) throws NetworkRuleConflictException, + InsufficientAddressCapacityException { + LoadBalancer lb = lbService.createPublicLoadBalancerRule(null, "api-lb", "LB rule for API access", + port, port, port, port, + publicIp.getId(), NetUtils.TCP_PROTO, "roundrobin", network.getId(), + account.getId(), false, NetUtils.TCP_PROTO, true); + + Map> vmIdIpMap = new HashMap<>(); + for (int i = 0; i < kubernetesCluster.getMasterNodeCount(); ++i) { + List ips = new ArrayList<>(); + Nic masterVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); + ips.add(masterVmNic.getIPv4Address()); + vmIdIpMap.put(clusterVMIds.get(i), ips); + } + lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap); + } + + /** + * Setup network rules for Kubernetes cluster + * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes + * API server is running. Also create load balancing rule to forward public + * IP traffic to master VMs' private IP. + * Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n + * for SSH access. Also create port-forwarding rule to forward public IP traffic to all + * @param network + * @param clusterVMs + * @throws ManagementServerException + */ + private void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { + if (!Network.GuestType.Isolated.equals(network.getGuestType())) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid())); + } + return; + } + List clusterVMIds = new ArrayList<>(); + for (UserVm vm : clusterVMs) { + clusterVMIds.add(vm.getId()); + } + IpAddress publicIp = getSourceNatIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid())); + } + + try { + provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s", + CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + + try { + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1; + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster ID: %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + + // Load balancer rule fo API access for master node VMs + try { + provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); + } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { + throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + + // Port forwarding rule fo SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + } + + private void startKubernetesClusterVMs() { + List clusterVms = getKubernetesClusterVMs(); + for (final UserVm vm : clusterVms) { + if (vm == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + try { + startKubernetesVM(vm); + } catch (ManagementServerException ex) { + LOGGER.warn(String.format("Failed to start VM ID: %s in Kubernetes cluster ID: %s due to ", vm.getUuid(), kubernetesCluster.getUuid()) + ex); + // dont bail out here. proceed further to stop the reset of the VM's + } + } + for (final UserVm userVm : clusterVms) { + UserVm vm = userVmDao.findById(userVm.getId()); + if (vm == null || !vm.getState().equals(VirtualMachine.State.Running)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + } + } + + private boolean isKubernetesClusterKubeConfigAvailable(final long timeoutTime) { + if (Strings.isNullOrEmpty(publicIpAddress)) { + KubernetesClusterDetailsVO kubeConfigDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData"); + if (kubeConfigDetail != null && !Strings.isNullOrEmpty(kubeConfigDetail.getValue())) { + return true; + } + } + String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime); + if (!Strings.isNullOrEmpty(kubeConfig)) { + final String masterVMPrivateIpAddress = getMasterVmPrivateIp(); + if (!Strings.isNullOrEmpty(masterVMPrivateIpAddress)) { + kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", masterVMPrivateIpAddress, CLUSTER_API_PORT), + String.format("server: https://%s:%d", publicIpAddress, CLUSTER_API_PORT)); + } + kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "kubeConfigData", Base64.encodeBase64String(kubeConfig.getBytes(StringUtils.getPreferredCharset())), false); + return true; + } + return false; + } + + private boolean isKubernetesClusterDashboardServiceRunning(final boolean onCreate, final Long timeoutTime) { + if (!onCreate) { + KubernetesClusterDetailsVO dashboardServiceRunningDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "dashboardServiceRunning"); + if (dashboardServiceRunningDetail != null && Boolean.parseBoolean(dashboardServiceRunningDetail.getValue())) { + return true; + } + } + if (KubernetesClusterUtil.isKubernetesClusterDashboardServiceRunning(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime, 15000)) { + kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "dashboardServiceRunning", String.valueOf(true), false); + return true; + } + return false; + } + + private void updateKubernetesClusterEntryEndpoint() { + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); + kubernetesClusterVO.setEndpoint(String.format("https://%s:%d/", publicIpAddress, CLUSTER_API_PORT)); + kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO); + } + + public boolean startKubernetesClusterOnCreate() { + init(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); + DeployDestination dest = null; + try { + dest = plan(); + } catch (InsufficientCapacityException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the cluster failed due to insufficient capacity in the Kubernetes cluster: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + Network network = null; + try { + network = startKubernetesClusterNetwork(dest); + } catch (ManagementServerException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as its network cannot be started", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + if (Strings.isNullOrEmpty(publicIpAddress) && + (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + } + List clusterVMs = new ArrayList<>(); + UserVm k8sMasterVM = null; + try { + k8sMasterVM = provisionKubernetesClusterMasterVm(network, publicIpAddress); + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + clusterVMs.add(k8sMasterVM); + if (Strings.isNullOrEmpty(publicIpAddress)) { + publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sMasterVM); + publicIpAddress = publicIpSshPort.first(); + if (Strings.isNullOrEmpty(publicIpAddress)) { + logTransitStateAndThrow(Level.WARN, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + } + } + try { + List additionalMasterVMs = provisionKubernetesClusterAdditionalMasterVms(publicIpAddress); + clusterVMs.addAll(additionalMasterVMs); + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + try { + List nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress); + clusterVMs.addAll(nodeVMs); + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster ID: %s VMs successfully provisioned", kubernetesCluster.getUuid())); + } + try { + setupKubernetesClusterNetworkRules(network, clusterVMs); + } catch (ManagementServerException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s, unable to setup network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + attachIsoKubernetesVMs(clusterVMs); + if (!KubernetesClusterUtil.isKubernetesClusterMasterVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { + String msg = String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to access master node VMs of the cluster", kubernetesCluster.getUuid()); + if (kubernetesCluster.getMasterNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) { + msg = String.format("%s. Make sure external load-balancer has port forwarding rules for SSH access on ports %d-%d and API access on port %d", + msg, + CLUSTER_NODES_DEFAULT_START_SSH_PORT, + CLUSTER_NODES_DEFAULT_START_SSH_PORT + kubernetesCluster.getTotalNodeCount() - 1, + CLUSTER_API_PORT); + } + logTransitStateDetachIsoAndThrow(Level.ERROR, msg, kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null); + } + boolean k8sApiServerSetup = KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000); + if (!k8sApiServerSetup) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to provision API endpoint for the cluster", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null); + } + sshPort = publicIpSshPort.second(); + updateKubernetesClusterEntryEndpoint(); + boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, + CLUSTER_NODE_VM_USER, sshKeyFile, startTimeoutTime, 15000); + detachIsoKubernetesVMs(clusterVMs); + if (!readyNodesCountValid) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + } + if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return true; + } + + public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { + init(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); + startKubernetesClusterVMs(); + try { + InetAddress address = InetAddress.getByName(new URL(kubernetesCluster.getEndpoint()).getHost()); + } catch (MalformedURLException | UnknownHostException ex) { + logTransitStateAndThrow(Level.ERROR, String.format("Kubernetes cluster ID: %s has invalid API endpoint. Can not verify if cluster is in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + Pair sshIpPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = sshIpPort.first(); + sshPort = sshIpPort.second(); + if (Strings.isNullOrEmpty(publicIpAddress)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster ID: %s successfully started", kubernetesCluster.getUuid())); + } + return true; + } + + public boolean reconcileAlertCluster() { + init(); + final long startTimeoutTime = System.currentTimeMillis() + 3 * 60 * 1000; + List vmMapVOList = getKubernetesClusterVMMaps(); + if (CollectionUtils.isEmpty(vmMapVOList) || vmMapVOList.size() != kubernetesCluster.getTotalNodeCount()) { + return false; + } + Pair sshIpPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = sshIpPort.first(); + sshPort = sshIpPort.second(); + if (Strings.isNullOrEmpty(publicIpAddress)) { + return false; + } + long actualNodeCount = 0; + try { + actualNodeCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile); + } catch (Exception e) { + return false; + } + if (kubernetesCluster.getTotalNodeCount() != actualNodeCount) { + return false; + } + if (Strings.isNullOrEmpty(sshIpPort.first())) { + return false; + } + if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, sshIpPort.first(), + KubernetesClusterActionWorker.CLUSTER_API_PORT, startTimeoutTime, 0)) { + return false; + } + updateKubernetesClusterEntryEndpoint(); + if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) { + return false; + } + if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) { + return false; + } + // mark the cluster to be running + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.RecoveryRequested); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return true; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java new file mode 100644 index 00000000000..a8e1a2c4d84 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.util.List; + +import org.apache.log4j.Level; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.uservm.UserVm; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; + +public class KubernetesClusterStopWorker extends KubernetesClusterActionWorker { + public KubernetesClusterStopWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + public boolean stop() throws CloudRuntimeException { + init(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Stopping Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested); + List clusterVMs = getKubernetesClusterVMs(); + for (UserVm vm : clusterVMs) { + if (vm == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to find all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + try { + userVmService.stopVirtualMachine(vm.getId(), false); + } catch (ConcurrentOperationException ex) { + LOGGER.warn(String.format("Failed to stop VM ID: %s in Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()), ex); + } + } + for (final UserVm userVm : clusterVMs) { + UserVm vm = userVmDao.findById(userVm.getId()); + if (vm == null || !vm.getState().equals(VirtualMachine.State.Stopped)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to stop all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return true; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java new file mode 100644 index 00000000000..eb9058d765d --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -0,0 +1,169 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.actionworkers; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Level; + +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesVersionManagerImpl; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.ssh.SshHelper; +import com.google.common.base.Strings; + +public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorker { + + private List clusterVMs = new ArrayList<>(); + private KubernetesSupportedVersion upgradeVersion; + private File upgradeScriptFile; + private long upgradeTimeoutTime; + + public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, + final KubernetesSupportedVersion upgradeVersion, + final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + this.upgradeVersion = upgradeVersion; + } + + private void retrieveUpgradeScriptFile() { + try { + String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh"); + upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh"); + BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile)); + upgradeScriptFileWriter.write(upgradeScriptData); + upgradeScriptFileWriter.close(); + } catch (IOException e) { + logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to prepare upgrade script", kubernetesCluster.getUuid()), e); + } + } + + private Pair runInstallScriptOnVM(final UserVm vm, final int index) throws Exception { + int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index; + String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress; + SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + "~/", upgradeScriptFile.getAbsolutePath(), "0755"); + String cmdStr = String.format("sudo ./%s %s %s %s %s", + upgradeScriptFile.getName(), + upgradeVersion.getSemanticVersion(), + index == 0 ? "true" : "false", + KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false", + Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType())); + return SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + cmdStr, + 10000, 10000, 10 * 60 * 1000); + } + + private void upgradeKubernetesClusterNodes() { + Pair result = null; + for (int i = 0; i < clusterVMs.size(); ++i) { + UserVm vm = clusterVMs.get(i); + String hostName = vm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + result = null; + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", + vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + } + try { + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, + String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + 10000, 10000, 60000); + } catch (Exception e) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); + } + if (!result.first()) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + if (System.currentTimeMillis() > upgradeTimeoutTime) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + try { + result = runInstallScriptOnVM(vm, i); + } catch (Exception e) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); + } + if (!result.first()) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + if (System.currentTimeMillis() > upgradeTimeoutTime) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to uncordon Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + if (i == 0) { // Wait for master to get in Ready state + if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) { + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to get master Kubernetes node on VM ID: %s in ready state", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + } + } + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", + vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + } + } + } + + public boolean upgradeCluster() throws CloudRuntimeException { + init(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Upgrading Kubernetes cluster ID: %s", kubernetesCluster.getUuid())); + } + upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000; + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + if (Strings.isNullOrEmpty(publicIpAddress)) { + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid())); + } + clusterVMs = getKubernetesClusterVMs(); + if (CollectionUtils.isEmpty(clusterVMs)) { + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve VMs for cluster", kubernetesCluster.getUuid())); + } + retrieveUpgradeScriptFile(); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); + attachIsoKubernetesVMs(clusterVMs, upgradeVersion); + upgradeKubernetesClusterNodes(); + detachIsoKubernetesVMs(clusterVMs); + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); + kubernetesClusterVO.setKubernetesVersionId(upgradeVersion.getId()); + boolean updated = kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO); + if (!updated) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } else { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + } + return updated; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java new file mode 100644 index 00000000000..fe673234ec8 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDao.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + +import java.util.List; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; + +public interface KubernetesClusterDao extends GenericDao, + StateDao { + + List listByAccount(long accountId); + List findKubernetesClustersToGarbageCollect(); + List findKubernetesClustersInState(KubernetesCluster.State state); + List listByNetworkId(long networkId); + List listAllByKubernetesVersion(long kubernetesVersionId); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java new file mode 100644 index 00000000000..003286c860b --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDaoImpl.java @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + +import java.util.List; + +import org.springframework.stereotype.Component; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; + +@Component +public class KubernetesClusterDaoImpl extends GenericDaoBase implements KubernetesClusterDao { + + private final SearchBuilder AccountIdSearch; + private final SearchBuilder GarbageCollectedSearch; + private final SearchBuilder StateSearch; + private final SearchBuilder SameNetworkSearch; + private final SearchBuilder KubernetesVersionSearch; + + public KubernetesClusterDaoImpl() { + AccountIdSearch = createSearchBuilder(); + AccountIdSearch.and("account", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountIdSearch.done(); + + GarbageCollectedSearch = createSearchBuilder(); + GarbageCollectedSearch.and("gc", GarbageCollectedSearch.entity().isCheckForGc(), SearchCriteria.Op.EQ); + GarbageCollectedSearch.and("state", GarbageCollectedSearch.entity().getState(), SearchCriteria.Op.EQ); + GarbageCollectedSearch.done(); + + StateSearch = createSearchBuilder(); + StateSearch.and("state", StateSearch.entity().getState(), SearchCriteria.Op.EQ); + StateSearch.done(); + + SameNetworkSearch = createSearchBuilder(); + SameNetworkSearch.and("network_id", SameNetworkSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + SameNetworkSearch.done(); + + KubernetesVersionSearch = createSearchBuilder(); + KubernetesVersionSearch.and("kubernetesVersionId", KubernetesVersionSearch.entity().getKubernetesVersionId(), SearchCriteria.Op.EQ); + KubernetesVersionSearch.done(); + } + + @Override + public List listByAccount(long accountId) { + SearchCriteria sc = AccountIdSearch.create(); + sc.setParameters("account", accountId); + return listBy(sc, null); + } + + @Override + public List findKubernetesClustersToGarbageCollect() { + SearchCriteria sc = GarbageCollectedSearch.create(); + sc.setParameters("gc", true); + sc.setParameters("state", KubernetesCluster.State.Destroying); + return listBy(sc); + } + + @Override + public List findKubernetesClustersInState(KubernetesCluster.State state) { + SearchCriteria sc = StateSearch.create(); + sc.setParameters("state", state); + return listBy(sc); + } + + @Override + public boolean updateState(KubernetesCluster.State currentState, KubernetesCluster.Event event, KubernetesCluster.State nextState, + KubernetesCluster vo, Object data) { + // TODO: ensure this update is correct + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + + KubernetesClusterVO ccVo = (KubernetesClusterVO)vo; + ccVo.setState(nextState); + super.update(ccVo.getId(), ccVo); + + txn.commit(); + return true; + } + + @Override + public List listByNetworkId(long networkId) { + SearchCriteria sc = SameNetworkSearch.create(); + sc.setParameters("network_id", networkId); + return this.listBy(sc); + } + + @Override + public List listAllByKubernetesVersion(long kubernetesVersionId) { + SearchCriteria sc = KubernetesVersionSearch.create(); + sc.setParameters("kubernetesVersionId", kubernetesVersionId); + return this.listBy(sc); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java new file mode 100644 index 00000000000..52990ebf1b8 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDao.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + + +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; + +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.utils.db.GenericDao; + + +public interface KubernetesClusterDetailsDao extends GenericDao, ResourceDetailsDao { + +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java new file mode 100644 index 00000000000..66ef2adbc91 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterDetailsDaoImpl.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; + + +@Component +public class KubernetesClusterDetailsDaoImpl extends ResourceDetailsDaoBase implements KubernetesClusterDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new KubernetesClusterDetailsVO(resourceId, key, value, display)); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java new file mode 100644 index 00000000000..8b08dd37d55 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.utils.db.GenericDao; + +import java.util.List; + +public interface KubernetesClusterVmMapDao extends GenericDao { + public List listByClusterId(long clusterId); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java new file mode 100644 index 00000000000..0b86b2c1a62 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.dao; + +import java.util.List; + +import org.springframework.stereotype.Component; + +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + + +@Component +public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase implements KubernetesClusterVmMapDao { + + private final SearchBuilder clusterIdSearch; + + public KubernetesClusterVmMapDaoImpl() { + clusterIdSearch = createSearchBuilder(); + clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + clusterIdSearch.done(); + } + + @Override + public List listByClusterId(long clusterId) { + SearchCriteria sc = clusterIdSearch.create(); + sc.setParameters("clusterId", clusterId); + return listBy(sc, null); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java new file mode 100644 index 00000000000..68cd9160193 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -0,0 +1,311 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.cluster.utils; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.URL; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.ssh.SshHelper; +import com.google.common.base.Strings; + +public class KubernetesClusterUtil { + + protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterUtil.class); + + public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, String ipAddress, int port, + String user, File sshKeyFile, String nodeName) throws Exception { + Pair result = SshHelper.sshExecute(ipAddress, port, + user, sshKeyFile, null, + String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), + 10000, 10000, 20000); + if (result.first() && nodeName.equals(result.second().trim())) { + return true; + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster ID: %s. Output: %s", nodeName, kubernetesCluster.getUuid(), result.second())); + } + return false; + } + + public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port, + final String user, final File sshKeyFile, final String nodeName, + final long timeoutTime, final int waitDuration) { + while (System.currentTimeMillis() < timeoutTime) { + boolean ready = false; + try { + ready = isKubernetesClusterNodeReady(kubernetesCluster, ipAddress, port, user, sshKeyFile, nodeName); + } catch (Exception e) { + LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster ID: %s", nodeName, kubernetesCluster.getUuid()), e); + } + if (ready) { + return true; + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ie) { + LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s to become ready", kubernetesCluster.getUuid(), nodeName), ie); + } + } + return false; + } + + /** + * Mark a given node in a given Kubernetes cluster as schedulable. + * kubectl uncordon command will be called through SSH using IP address and port of the host virtual machine or load balancer. + * Multiple retries with a given delay can be used. + * uncordon is required when a particular node in Kubernetes cluster is drained (usually during upgrade) + * @param kubernetesCluster + * @param ipAddress + * @param port + * @param user + * @param sshKeyFile + * @param userVm + * @param timeoutTime + * @param waitDuration + * @return + */ + public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kubernetesCluster, + final String ipAddress, final int port, + final String user, final File sshKeyFile, + final UserVm userVm, final long timeoutTime, + final int waitDuration) { + String hostName = userVm.getHostName(); + if (!Strings.isNullOrEmpty(hostName)) { + hostName = hostName.toLowerCase(); + } + while (System.currentTimeMillis() < timeoutTime) { + Pair result = null; + try { + result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, + String.format("sudo kubectl uncordon %s", hostName), + 10000, 10000, 30000); + if (result.first()) { + return true; + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to uncordon node: %s on VM ID: %s in Kubernetes cluster ID: %s", hostName, userVm.getUuid(), kubernetesCluster.getUuid()), e); + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ie) { + LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie); + } + } + return false; + } + + public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final String user, final File sshKeyFile, + final String namespace, String serviceName) { + try { + String cmd = "sudo kubectl get pods --all-namespaces"; + if (!Strings.isNullOrEmpty(namespace)) { + cmd = String.format("sudo kubectl get pods --namespace=%s", namespace); + } + Pair result = SshHelper.sshExecute(ipAddress, port, user, + sshKeyFile, null, cmd, + 10000, 10000, 10000); + if (result.first() && !Strings.isNullOrEmpty(result.second())) { + String[] lines = result.second().split("\n"); + for (String line : + lines) { + if (line.contains(serviceName) && line.contains("Running")) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Service : %s in namespace: %s for the Kubernetes cluster ID: %s is running", serviceName, namespace, kubernetesCluster.getUuid())); + } + return true; + } + } + } + } catch (Exception e) { + LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster ID: %s", serviceName, namespace, kubernetesCluster.getUuid()), e); + } + return false; + } + + public static boolean isKubernetesClusterDashboardServiceRunning(final KubernetesCluster kubernetesCluster, String ipAddress, + final int port, final String user, final File sshKeyFile, + final long timeoutTime, final long waitDuration) { + boolean running = false; + // Check if dashboard service is up running. + while (System.currentTimeMillis() < timeoutTime) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster ID: %s to come up", kubernetesCluster.getUuid())); + } + if (isKubernetesClusterAddOnServiceRunning(kubernetesCluster, ipAddress, port, user, sshKeyFile, "kubernetes-dashboard", "kubernetes-dashboard")) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Dashboard service for the Kubernetes cluster ID: %s is in running state", kubernetesCluster.getUuid())); + } + running = true; + break; + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ex) { + LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s API dashboard service to be available", kubernetesCluster.getUuid()), ex); + } + } + return running; + } + + public static String getKubernetesClusterConfig(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port, + final String user, final File sshKeyFile, final long timeoutTime) { + String kubeConfig = ""; + while (System.currentTimeMillis() < timeoutTime) { + try { + Pair result = SshHelper.sshExecute(ipAddress, port, user, + sshKeyFile, null, "sudo cat /etc/kubernetes/admin.conf", + 10000, 10000, 10000); + + if (result.first() && !Strings.isNullOrEmpty(result.second())) { + kubeConfig = result.second(); + break; + } else { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second())); + } + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + } + return kubeConfig; + } + + public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final String user, final File sshKeyFile) throws Exception { + Pair result = SshHelper.sshExecute(ipAddress, port, + user, sshKeyFile, null, + "sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + 10000, 10000, 20000); + if (result.first()) { + return Integer.parseInt(result.second().trim().replace("\"", "")); + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second())); + } + } + return 0; + } + + public static boolean isKubernetesClusterServerRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final long timeoutTime, final long waitDuration) { + boolean k8sApiServerSetup = false; + while (System.currentTimeMillis() < timeoutTime) { + try { + String versionOutput = IOUtils.toString(new URL(String.format("https://%s:%d/version", ipAddress, port)), StringUtils.getPreferredCharset()); + if (!Strings.isNullOrEmpty(versionOutput)) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster ID: %s API has been successfully provisioned, %s", kubernetesCluster.getUuid(), versionOutput)); + } + k8sApiServerSetup = true; + break; + } + } catch (Exception e) { + LOGGER.warn(String.format("API endpoint for Kubernetes cluster ID: %s not available", kubernetesCluster.getUuid()), e); + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ie) { + LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s API endpoint to be available", kubernetesCluster.getUuid()), ie); + } + } + return k8sApiServerSetup; + } + + public static boolean isKubernetesClusterMasterVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final long timeoutTime) { + boolean masterVmRunning = false; + while (!masterVmRunning && System.currentTimeMillis() < timeoutTime) { + try (Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(ipAddress, port), 10000); + masterVmRunning = true; + } catch (IOException e) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid())); + } + try { + Thread.sleep(10000); + } catch (InterruptedException ex) { + LOGGER.warn(String.format("Error while waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid()), ex); + } + } + } + return masterVmRunning; + } + + public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, + final String ipAddress, final int port, + final String user, final File sshKeyFile, + final long timeoutTime, final long waitDuration) { + while (System.currentTimeMillis() < timeoutTime) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster ID: %s with total %d provisioned nodes", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount())); + } + try { + int nodesCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, ipAddress, port, + user, sshKeyFile); + if (nodesCount == kubernetesCluster.getTotalNodeCount()) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Kubernetes cluster ID: %s has %d ready nodes now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount())); + } + return true; + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Kubernetes cluster ID: %s has total %d provisioned nodes while %d ready now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), nodesCount)); + } + } + } catch (Exception e) { + LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e); + } + try { + Thread.sleep(waitDuration); + } catch (InterruptedException ex) { + LOGGER.warn(String.format("Error while waiting during Kubernetes cluster ID: %s ready node check", kubernetesCluster.getUuid()), ex); + } + } + return false; + } + + public static String generateClusterToken(final KubernetesCluster kubernetesCluster) { + String token = kubernetesCluster.getUuid(); + token = token.replaceAll("-", ""); + token = token.substring(0, 22); + token = token.substring(0, 6) + "." + token.substring(6); + return token; + } + + public static String generateClusterHACertificateKey(final KubernetesCluster kubernetesCluster) { + String uuid = kubernetesCluster.getUuid(); + StringBuilder token = new StringBuilder(uuid.replaceAll("-", "")); + while (token.length() < 64) { + token.append(token); + } + return token.toString().substring(0, 64); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java new file mode 100644 index 00000000000..0cb430acfe1 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersion.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +/** + * KubernetesSupportedVersion describes the properties of supported kubernetes version + * + */ +public interface KubernetesSupportedVersion extends InternalIdentity, Identity { + + public enum State { + Disabled, Enabled + } + + long getId(); + String getName(); + String getSemanticVersion(); + long getIsoId(); + Long getZoneId(); + State getState(); + + /** + * @return minimum # of cpu. + */ + int getMinimumCpu(); + + /** + * @return minimum ram size in megabytes + */ + int getMinimumRamSize(); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java new file mode 100644 index 00000000000..3f66f943b69 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "kubernetes_supported_version") +public class KubernetesSupportedVersionVO implements KubernetesSupportedVersion { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "semantic_version") + private String semanticVersion; + + @Column(name = "iso_id") + private long isoId; + + @Column(name = "zone_id") + private Long zoneId; + + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + State state = State.Enabled; + + @Column(name = "min_cpu") + private int minimumCpu; + + @Column(name = "min_ram_size") + private int minimumRamSize; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + Date removed; + + public KubernetesSupportedVersionVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public KubernetesSupportedVersionVO(String name, String semanticVersion, long isoId, Long zoneId, + int minimumCpu, int minimumRamSize) { + this.uuid = UUID.randomUUID().toString(); + this.name = name; + this.semanticVersion = semanticVersion; + this.isoId = isoId; + this.zoneId = zoneId; + this.minimumCpu = minimumCpu; + this.minimumRamSize = minimumRamSize; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String getSemanticVersion() { + return semanticVersion; + } + + public void setSemanticVersion(String semanticVersion) { + this.semanticVersion = semanticVersion; + } + + @Override + public long getIsoId() { + return isoId; + } + + public void setIsoId(long isoId) { + this.isoId = isoId; + } + + @Override + public Long getZoneId() { + return zoneId; + } + + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + + @Override + public State getState() { + return this.state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public int getMinimumCpu() { + return minimumCpu; + } + + public void setMinimumCpu(int minimumCpu) { + this.minimumCpu = minimumCpu; + } + + @Override + public int getMinimumRamSize() { + return minimumRamSize; + } + + public void setMinimumRamSize(int minimumRamSize) { + this.minimumRamSize = minimumRamSize; + } + + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java new file mode 100644 index 00000000000..4c979ba8979 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionEventTypes.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +public class KubernetesVersionEventTypes { + public static final String EVENT_KUBERNETES_VERSION_ADD = "KUBERNETES.VERSION.ADD"; + public static final String EVENT_KUBERNETES_VERSION_DELETE = "KUBERNETES.VERSION.DELETE"; + public static final String EVENT_KUBERNETES_VERSION_UPDATE = "KUBERNETES.VERSION.UPDATE"; +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java new file mode 100644 index 00000000000..4eefc3f12ec --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -0,0 +1,388 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +import com.cloud.api.query.dao.TemplateJoinDao; +import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.event.ActionEvent; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; +import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.template.TemplateApiService; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.google.common.base.Strings; + +public class KubernetesVersionManagerImpl extends ManagerBase implements KubernetesVersionService { + public static final Logger LOGGER = Logger.getLogger(KubernetesVersionManagerImpl.class.getName()); + + @Inject + private KubernetesSupportedVersionDao kubernetesSupportedVersionDao; + @Inject + private KubernetesClusterDao kubernetesClusterDao; + @Inject + private AccountManager accountManager; + @Inject + private VMTemplateDao templateDao; + @Inject + private TemplateJoinDao templateJoinDao; + @Inject + private VMTemplateZoneDao templateZoneDao; + @Inject + private DataCenterDao dataCenterDao; + @Inject + private TemplateApiService templateService; + + private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) { + KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse(); + response.setObjectName("kubernetessupportedversion"); + response.setId(kubernetesSupportedVersion.getUuid()); + response.setName(kubernetesSupportedVersion.getName()); + response.setSemanticVersion(kubernetesSupportedVersion.getSemanticVersion()); + if (kubernetesSupportedVersion.getState() != null) { + response.setState(kubernetesSupportedVersion.getState().toString()); + } + response.setMinimumCpu(kubernetesSupportedVersion.getMinimumCpu()); + response.setMinimumRamSize(kubernetesSupportedVersion.getMinimumRamSize()); + DataCenterVO zone = dataCenterDao.findById(kubernetesSupportedVersion.getZoneId()); + if (zone != null) { + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + } + if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), + KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) { + response.setSupportsHA(true); + } else { + response.setSupportsHA(false); + } + TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId()); + if (template != null) { + response.setIsoId(template.getUuid()); + response.setIsoName(template.getName()); + response.setIsoState(template.getState().toString()); + } + return response; + } + + private ListResponse createKubernetesSupportedVersionListResponse(List versions) { + List responseList = new ArrayList<>(); + for (KubernetesSupportedVersionVO version : versions) { + responseList.add(createKubernetesSupportedVersionResponse(version)); + } + ListResponse response = new ListResponse<>(); + response.setResponses(responseList); + return response; + } + + private static boolean isSemanticVersion(final String version) { + if(!version.matches("[0-9]+(\\.[0-9]+)*")) { + return false; + } + String[] parts = version.split("\\."); + if (parts.length < 3) { + return false; + } + return true; + } + + private List filterKubernetesSupportedVersions(List versions, final String minimumSemanticVersion) { + if (!Strings.isNullOrEmpty(minimumSemanticVersion)) { + for (int i = versions.size() - 1; i >= 0; --i) { + KubernetesSupportedVersionVO version = versions.get(i); + try { + if (compareSemanticVersions(minimumSemanticVersion, version.getSemanticVersion()) > 0) { + versions.remove(i); + } + } catch (IllegalArgumentException e) { + LOGGER.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion)); + versions.remove(i); + } + } + } + return versions; + } + + private VirtualMachineTemplate registerKubernetesVersionIso(final Long zoneId, final String versionName, final String isoUrl, final String isoChecksum)throws IllegalAccessException, NoSuchFieldException, + IllegalArgumentException, ResourceAllocationException { + String isoName = String.format("%s-Kubernetes-Binaries-ISO", versionName); + RegisterIsoCmd registerIsoCmd = new RegisterIsoCmd(); + registerIsoCmd = ComponentContext.inject(registerIsoCmd); + registerIsoCmd.setIsoName(isoName); + registerIsoCmd.setPublic(true); + if (zoneId != null) { + registerIsoCmd.setZoneId(zoneId); + } + registerIsoCmd.setDisplayText(isoName); + registerIsoCmd.setBootable(false); + registerIsoCmd.setUrl(isoUrl); + if (!Strings.isNullOrEmpty(isoChecksum)) { + registerIsoCmd.setChecksum(isoChecksum); + } + registerIsoCmd.setAccountName(accountManager.getSystemAccount().getAccountName()); + registerIsoCmd.setDomainId(accountManager.getSystemAccount().getDomainId()); + return templateService.registerIso(registerIsoCmd); + } + + private void deleteKubernetesVersionIso(long templateId) throws IllegalAccessException, NoSuchFieldException, + IllegalArgumentException { + DeleteIsoCmd deleteIsoCmd = new DeleteIsoCmd(); + deleteIsoCmd = ComponentContext.inject(deleteIsoCmd); + deleteIsoCmd.setId(templateId); + templateService.deleteIso(deleteIsoCmd); + } + + public static int compareSemanticVersions(String v1, String v2) throws IllegalArgumentException { + if (Strings.isNullOrEmpty(v1) || Strings.isNullOrEmpty(v2)) { + throw new IllegalArgumentException(String.format("Invalid version comparision with versions %s, %s", v1, v2)); + } + if(!isSemanticVersion(v1)) { + throw new IllegalArgumentException(String.format("Invalid version format, %s", v1)); + } + if(!isSemanticVersion(v2)) { + throw new IllegalArgumentException(String.format("Invalid version format, %s", v2)); + } + String[] thisParts = v1.split("\\."); + String[] thatParts = v2.split("\\."); + int length = Math.max(thisParts.length, thatParts.length); + for(int i = 0; i < length; i++) { + int thisPart = i < thisParts.length ? + Integer.parseInt(thisParts[i]) : 0; + int thatPart = i < thatParts.length ? + Integer.parseInt(thatParts[i]) : 0; + if(thisPart < thatPart) + return -1; + if(thisPart > thatPart) + return 1; + } + return 0; + } + + /** + * Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion + * Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR. + * That is, MINOR versions cannot be skipped during upgrade. + * For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2 + * @param currentVersion + * @param upgradeVersion + * @return + * @throws IllegalArgumentException + */ + public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException { + int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion); + if (versionDiff == 0) { + throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); + } else if (versionDiff < 0) { + throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); + } + String[] thisParts = currentVersion.split("\\."); + String[] thatParts = upgradeVersion.split("\\."); + int majorVerDiff = Integer.parseInt(thatParts[0]) - Integer.parseInt(thisParts[0]); + int minorVerDiff = Integer.parseInt(thatParts[1]) - Integer.parseInt(thisParts[1]); + + if (majorVerDiff != 0 || minorVerDiff > 1) { + throw new IllegalArgumentException(String.format("Kubernetes clusters can be upgraded between next minor or patch version releases, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); + } + return true; + } + + @Override + public ListResponse listKubernetesSupportedVersions(final ListKubernetesSupportedVersionsCmd cmd) { + if (!KubernetesClusterService.KubernetesServiceEnabled.value()) { + throw new CloudRuntimeException("Kubernetes Service plugin is disabled"); + } + final Long versionId = cmd.getId(); + final Long zoneId = cmd.getZoneId(); + String minimumSemanticVersion = cmd.getMinimumSemanticVersion(); + final Long minimumKubernetesVersionId = cmd.getMinimumKubernetesVersionId(); + if (!Strings.isNullOrEmpty(minimumSemanticVersion) && minimumKubernetesVersionId != null) { + throw new CloudRuntimeException(String.format("Both parameters %s and %s can not be passed together", ApiConstants.MIN_SEMANTIC_VERSION, ApiConstants.MIN_KUBERNETES_VERSION_ID)); + } + if (minimumKubernetesVersionId != null) { + KubernetesSupportedVersionVO minVersion = kubernetesSupportedVersionDao.findById(minimumKubernetesVersionId); + if (minVersion == null) { + throw new InvalidParameterValueException(String.format("Invalid %s passed", ApiConstants.MIN_KUBERNETES_VERSION_ID)); + } + minimumSemanticVersion = minVersion.getSemanticVersion(); + } + List versions = new ArrayList<>(); + if (versionId != null) { + KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId); + if (version != null && (zoneId == null || version.getZoneId() == null || version.getZoneId().equals(zoneId))) { + versions.add(version); + } + } else { + if (zoneId == null) { + versions = kubernetesSupportedVersionDao.listAll(); + } else { + versions = kubernetesSupportedVersionDao.listAllInZone(zoneId); + } + } + versions = filterKubernetesSupportedVersions(versions, minimumSemanticVersion); + + return createKubernetesSupportedVersionListResponse(versions); + } + + @Override + @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_ADD, eventDescription = "Adding Kubernetes supported version") + public KubernetesSupportedVersionResponse addKubernetesSupportedVersion(final AddKubernetesSupportedVersionCmd cmd) { + if (!KubernetesClusterService.KubernetesServiceEnabled.value()) { + throw new CloudRuntimeException("Kubernetes Service plugin is disabled"); + } + String name = cmd.getName(); + final String semanticVersion = cmd.getSemanticVersion(); + final Long zoneId = cmd.getZoneId(); + final String isoUrl = cmd.getUrl(); + final String isoChecksum = cmd.getChecksum(); + final Integer minimumCpu = cmd.getMinimumCpu(); + final Integer minimumRamSize = cmd.getMinimumRamSize(); + if (minimumCpu == null || minimumCpu < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU) { + throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_CPU_NUMBER)); + } + if (minimumRamSize == null || minimumRamSize < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { + throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_MEMORY)); + } + if (compareSemanticVersions(semanticVersion, MIN_KUBERNETES_VERSION) < 0) { + throw new InvalidParameterValueException(String.format("New supported Kubernetes version cannot be added as %s is minimum version supported by Kubernetes Service", MIN_KUBERNETES_VERSION)); + } + if (zoneId != null && dataCenterDao.findById(zoneId) == null) { + throw new InvalidParameterValueException("Invalid zone specified"); + } + if (Strings.isNullOrEmpty(isoUrl)) { + throw new InvalidParameterValueException(String.format("Invalid URL for ISO specified, %s", isoUrl)); + } + if (Strings.isNullOrEmpty(name)) { + name = String.format("v%s", semanticVersion); + if (zoneId != null) { + name = String.format("%s-%s", name, dataCenterDao.findById(zoneId).getName()); + } + } + + VMTemplateVO template = null; + try { + VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum); + template = templateDao.findById(vmTemplate.getId()); + } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException | ResourceAllocationException ex) { + LOGGER.error(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl), ex); + throw new CloudRuntimeException(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl)); + } + + KubernetesSupportedVersionVO supportedVersionVO = new KubernetesSupportedVersionVO(name, semanticVersion, template.getId(), zoneId, minimumCpu, minimumRamSize); + supportedVersionVO = kubernetesSupportedVersionDao.persist(supportedVersionVO); + + return createKubernetesSupportedVersionResponse(supportedVersionVO); + } + + @Override + @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE, eventDescription = "Deleting Kubernetes supported version", async = true) + public boolean deleteKubernetesSupportedVersion(final DeleteKubernetesSupportedVersionCmd cmd) { + if (!KubernetesClusterService.KubernetesServiceEnabled.value()) { + throw new CloudRuntimeException("Kubernetes Service plugin is disabled"); + } + final Long versionId = cmd.getId(); + KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(versionId); + if (version == null) { + throw new InvalidParameterValueException("Invalid Kubernetes version id specified"); + } + List clusters = kubernetesClusterDao.listAllByKubernetesVersion(versionId); + if (clusters.size() > 0) { + throw new CloudRuntimeException(String.format("Unable to delete Kubernetes version ID: %s. Existing clusters currently using the version.", version.getUuid())); + } + + VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId()); + if (template == null) { + LOGGER.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid())); + } + if (template != null && template.getRemoved() == null) { // Delete ISO + try { + deleteKubernetesVersionIso(template.getId()); + } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) { + LOGGER.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex); + throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid())); + } + } + return kubernetesSupportedVersionDao.remove(version.getId()); + } + + @Override + @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_UPDATE, eventDescription = "Updating Kubernetes supported version") + public KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(final UpdateKubernetesSupportedVersionCmd cmd) { + if (!KubernetesClusterService.KubernetesServiceEnabled.value()) { + throw new CloudRuntimeException("Kubernetes Service plugin is disabled"); + } + final Long versionId = cmd.getId(); + KubernetesSupportedVersion.State state = null; + KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId); + if (version == null) { + throw new InvalidParameterValueException("Invalid Kubernetes version id specified"); + } + try { + state = KubernetesSupportedVersion.State.valueOf(cmd.getState()); + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.STATE)); + } + if (!state.equals(version.getState())) { + version = kubernetesSupportedVersionDao.createForUpdate(version.getId()); + version.setState(state); + if (!kubernetesSupportedVersionDao.update(version.getId(), version)) { + throw new CloudRuntimeException(String.format("Failed to update Kubernetes supported version ID: %s", version.getUuid())); + } + version = kubernetesSupportedVersionDao.findById(versionId); + } + return createKubernetesSupportedVersionResponse(version); + } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + if (!KubernetesClusterService.KubernetesServiceEnabled.value()) { + return cmdList; + } + cmdList.add(AddKubernetesSupportedVersionCmd.class); + cmdList.add(ListKubernetesSupportedVersionsCmd.class); + cmdList.add(DeleteKubernetesSupportedVersionCmd.class); + cmdList.add(UpdateKubernetesSupportedVersionCmd.class); + return cmdList; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java new file mode 100644 index 00000000000..8e4cd032556 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionService.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.ListResponse; + +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.exception.CloudRuntimeException; + +public interface KubernetesVersionService extends PluggableService { + static final String MIN_KUBERNETES_VERSION = "1.11.0"; + ListResponse listKubernetesSupportedVersions(ListKubernetesSupportedVersionsCmd cmd); + KubernetesSupportedVersionResponse addKubernetesSupportedVersion(AddKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException; + boolean deleteKubernetesSupportedVersion(DeleteKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException; + KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(UpdateKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException; +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java new file mode 100644 index 00000000000..69de862985b --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDao.java @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version.dao; + +import java.util.List; + +import com.cloud.kubernetes.version.KubernetesSupportedVersionVO; +import com.cloud.utils.db.GenericDao; + +public interface KubernetesSupportedVersionDao extends GenericDao { + List listAllInZone(long dataCenterId); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java new file mode 100644 index 00000000000..5dd6eff199a --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/dao/KubernetesSupportedVersionDaoImpl.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version.dao; + +import java.util.List; + +import org.springframework.stereotype.Component; + +import com.cloud.kubernetes.version.KubernetesSupportedVersionVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchCriteria; + +@Component +public class KubernetesSupportedVersionDaoImpl extends GenericDaoBase implements KubernetesSupportedVersionDao { + public KubernetesSupportedVersionDaoImpl() { + } + + @Override + public List listAllInZone(long dataCenterId) { + SearchCriteria sc = createSearchCriteria(); + SearchCriteria scc = createSearchCriteria(); + scc.addOr("zoneId", SearchCriteria.Op.EQ, dataCenterId); + scc.addOr("zoneId", SearchCriteria.Op.NULL); + sc.addAnd("zoneId", SearchCriteria.Op.SC, scc); + return listBy(sc); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java new file mode 100644 index 00000000000..a85e6ee064a --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.kubernetes.version; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesVersionService; +import com.cloud.utils.exception.CloudRuntimeException; +import com.google.common.base.Strings; + +@APICommand(name = AddKubernetesSupportedVersionCmd.APINAME, + description = "Add a supported Kubernetes version", + responseObject = KubernetesSupportedVersionResponse.class, + responseView = ResponseObject.ResponseView.Full, + entityType = {KubernetesSupportedVersion.class}, + authorized = {RoleType.Admin}) +public class AddKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd { + public static final Logger LOGGER = Logger.getLogger(AddKubernetesSupportedVersionCmd.class.getName()); + public static final String APINAME = "addKubernetesSupportedVersion"; + + @Inject + private KubernetesVersionService kubernetesVersionService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, + description = "the name of the Kubernetes supported version") + private String name; + + @Parameter(name = ApiConstants.SEMANTIC_VERSION, type = CommandType.STRING, required = true, + description = "the semantic version of the Kubernetes version") + private String semanticVersion; + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "the ID of the zone in which Kubernetes supported version will be available") + private Long zoneId; + + @Parameter(name = ApiConstants.URL, type = CommandType.STRING, + description = "the URL of the binaries ISO for Kubernetes supported version") + private String url; + + @Parameter(name = ApiConstants.CHECKSUM, type = CommandType.STRING, + description = "the checksum value of the binaries ISO. " + ApiConstants.CHECKSUM_PARAMETER_PREFIX_DESCRIPTION) + private String checksum; + + @Parameter(name = ApiConstants.MIN_CPU_NUMBER, type = CommandType.INTEGER, required = true, + description = "the minimum number of CPUs to be set with the Kubernetes version") + private Integer minimumCpu; + + @Parameter(name = ApiConstants.MIN_MEMORY, type = CommandType.INTEGER, required = true, + description = "the minimum RAM size in MB to be set with the Kubernetes version") + private Integer minimumRamSize; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public String getName() { + return name; + } + + public String getSemanticVersion() { + if(Strings.isNullOrEmpty(semanticVersion)) { + throw new InvalidParameterValueException("Version can not be null"); + } + if(!semanticVersion.matches("[0-9]+(\\.[0-9]+)*")) { + throw new IllegalArgumentException("Invalid version format. Semantic version needed"); + } + return semanticVersion; + } + + public Long getZoneId() { + return zoneId; + } + + public String getUrl() { + return url; + } + + public String getChecksum() { + return checksum; + } + + public Integer getMinimumCpu() { + return minimumCpu; + } + + public Integer getMinimumRamSize() { + return minimumRamSize; + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + KubernetesSupportedVersionResponse response = kubernetesVersionService.addKubernetesSupportedVersion(this); + if (response == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Kubernetes supported version"); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java new file mode 100644 index 00000000000..02489147c65 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.kubernetes.version; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesVersionEventTypes; +import com.cloud.kubernetes.version.KubernetesVersionService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = DeleteKubernetesSupportedVersionCmd.APINAME, + description = "Deletes a Kubernetes cluster", + responseObject = SuccessResponse.class, + entityType = {KubernetesSupportedVersion.class}, + authorized = {RoleType.Admin}) +public class DeleteKubernetesSupportedVersionCmd extends BaseAsyncCmd implements AdminCmd { + public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesSupportedVersionCmd.class.getName()); + public static final String APINAME = "deleteKubernetesSupportedVersion"; + + @Inject + private KubernetesVersionService kubernetesVersionService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesSupportedVersionResponse.class, + description = "the ID of the Kubernetes supported version", + required = true) + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + @Override + public String getEventType() { + return KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE; + } + + @Override + public String getEventDescription() { + return "Deleting Kubernetes supported version " + getId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + if (!kubernetesVersionService.deleteKubernetesSupportedVersion(this)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes supported version ID: %d", getId())); + } + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java new file mode 100644 index 00000000000..bf888c54921 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.kubernetes.version; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.kubernetes.version.KubernetesVersionService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = UpdateKubernetesSupportedVersionCmd.APINAME, + description = "Update a supported Kubernetes version", + responseObject = KubernetesSupportedVersionResponse.class, + responseView = ResponseObject.ResponseView.Full, + entityType = {KubernetesSupportedVersion.class}, + authorized = {RoleType.Admin}) +public class UpdateKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd { + public static final Logger LOGGER = Logger.getLogger(UpdateKubernetesSupportedVersionCmd.class.getName()); + public static final String APINAME = "updateKubernetesSupportedVersion"; + + @Inject + private KubernetesVersionService kubernetesVersionService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, + entityType = KubernetesSupportedVersionResponse.class, + description = "the ID of the Kubernetes supported version", + required = true) + private Long id; + + @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, + description = "the enabled or disabled state of the Kubernetes supported version", + required = true) + private String state; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public String getState() { + return state; + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return 0; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + KubernetesSupportedVersionResponse response = kubernetesVersionService.updateKubernetesSupportedVersion(this); + if (response == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Kubernetes supported version"); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java new file mode 100644 index 00000000000..32b07c4c36a --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -0,0 +1,297 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = CreateKubernetesClusterCmd.APINAME, + description = "Creates a Kubernetes cluster", + responseObject = KubernetesClusterResponse.class, + responseView = ResponseView.Restricted, + entityType = {KubernetesCluster.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { + public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName()); + public static final String APINAME = "createKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "name for the Kubernetes cluster") + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, required = true, description = "description for the Kubernetes cluster") + private String description; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, + description = "availability zone in which Kubernetes cluster to be launched") + private Long zoneId; + + @Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID, entityType = KubernetesSupportedVersionResponse.class, required = true, + description = "Kubernetes version with which cluster to be launched") + private Long kubernetesVersionId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, + required = true, description = "the ID of the service offering for the virtual machines in the cluster.") + private Long serviceOfferingId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" + + " virtual machine. Must be used with domainId.") + private String accountName; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, + description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.") + private Long domainId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, + description = "Deploy cluster for the project") + private Long projectId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, + description = "Network in which Kubernetes cluster is to be launched") + private Long networkId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING, + description = "name of the ssh key pair used to login to the virtual machines") + private String sshKeyPairName; + + @Parameter(name=ApiConstants.MASTER_NODES, type = CommandType.LONG, + description = "number of Kubernetes cluster master nodes, default is 1") + private Long masterNodes; + + @Parameter(name=ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, type = CommandType.STRING, + description = "external load balancer IP address while using shared network with Kubernetes HA cluster") + private String externalLoadBalancerIpAddress; + + @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG, + required = true, description = "number of Kubernetes cluster worker nodes") + private Long clusterSize; + + @Parameter(name = ApiConstants.DOCKER_REGISTRY_USER_NAME, type = CommandType.STRING, + description = "user name for the docker image private registry") + private String dockerRegistryUserName; + + @Parameter(name = ApiConstants.DOCKER_REGISTRY_PASSWORD, type = CommandType.STRING, + description = "password for the docker image private registry") + private String dockerRegistryPassword; + + @Parameter(name = ApiConstants.DOCKER_REGISTRY_URL, type = CommandType.STRING, + description = "URL for the docker image private registry") + private String dockerRegistryUrl; + + @Parameter(name = ApiConstants.DOCKER_REGISTRY_EMAIL, type = CommandType.STRING, + description = "email of the docker image private registry user") + private String dockerRegistryEmail; + + @Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG, + description = "root disk size of root disk for each node") + private Long nodeRootDiskSize; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getAccountName() { + if (accountName == null) { + return CallContext.current().getCallingAccount().getAccountName(); + } + return accountName; + } + + public String getDisplayName() { + return description; + } + + public Long getDomainId() { + if (domainId == null) { + return CallContext.current().getCallingAccount().getDomainId(); + } + return domainId; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + public Long getZoneId() { + return zoneId; + } + + public Long getKubernetesVersionId() { + return kubernetesVersionId; + } + + public Long getNetworkId() { return networkId;} + + public String getName() { + return name; + } + + public String getSSHKeyPairName() { + return sshKeyPairName; + } + + public Long getMasterNodes() { + if (masterNodes == null) { + return 1L; + } + return masterNodes; + } + + public String getExternalLoadBalancerIpAddress() { + return externalLoadBalancerIpAddress; + } + + public Long getClusterSize() { + return clusterSize; + } + + public String getDockerRegistryUserName() { + return dockerRegistryUserName; + } + + public String getDockerRegistryPassword() { + return dockerRegistryPassword; + } + + public String getDockerRegistryUrl() { + return dockerRegistryUrl; + } + + public String getDockerRegistryEmail() { + return dockerRegistryEmail; + } + + public Long getNodeRootDiskSize() { + return nodeRootDiskSize; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + public static String getResultObjectName() { + return "kubernetescluster"; + } + + @Override + public long getEntityOwnerId() { + Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + if (accountId == null) { + return CallContext.current().getCallingAccount().getId(); + } + + return accountId; + } + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE; + } + + @Override + public String getCreateEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE; + } + + @Override + public String getCreateEventDescription() { + return "creating Kubernetes cluster"; + } + + @Override + public String getEventDescription() { + return "creating Kubernetes cluster. Cluster Id: " + getEntityId(); + } + + @Override + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.VirtualMachine; + } + + @Override + public void execute() { + try { + if (!kubernetesClusterService.startKubernetesCluster(getEntityId(), true)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start Kubernetes cluster"); + } + KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getEntityId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public void create() throws CloudRuntimeException { + try { + KubernetesCluster cluster = kubernetesClusterService.createKubernetesCluster(this); + if (cluster == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Kubernetes cluster"); + } + setEntityId(cluster.getId()); + setEntityUuid(cluster.getUuid()); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java new file mode 100644 index 00000000000..4f32138758e --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = DeleteKubernetesClusterCmd.APINAME, + description = "Deletes a Kubernetes cluster", + responseObject = SuccessResponse.class, + entityType = {KubernetesCluster.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class DeleteKubernetesClusterCmd extends BaseAsyncCmd { + public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesClusterCmd.class.getName()); + public static final String APINAME = "deleteKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, + required = true, + description = "the ID of the Kubernetes cluster") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + if (!kubernetesClusterService.deleteKubernetesCluster(id)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster ID: %d", getId())); + } + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_DELETE; + } + + @Override + public String getEventDescription() { + KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId()); + return String.format("Deleting Kubernetes cluster ID: %s", cluster.getUuid()); + } + +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java new file mode 100644 index 00000000000..c88f0eb17df --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + + +@APICommand(name = GetKubernetesClusterConfigCmd.APINAME, + description = "Get Kubernetes cluster config", + responseObject = KubernetesClusterConfigResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class GetKubernetesClusterConfigCmd extends BaseCmd { + public static final Logger LOGGER = Logger.getLogger(GetKubernetesClusterConfigCmd.class.getName()); + public static final String APINAME = "getKubernetesClusterConfig"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public void execute() throws ServerApiException { + try { + KubernetesClusterConfigResponse response = kubernetesClusterService.getKubernetesClusterConfig(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java new file mode 100644 index 00000000000..ef960d532ee --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java @@ -0,0 +1,100 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = ListKubernetesClustersCmd.APINAME, + description = "Lists Kubernetes clusters", + responseObject = KubernetesClusterResponse.class, + responseView = ResponseView.Restricted, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListKubernetesClustersCmd extends BaseListProjectAndAccountResourcesCmd { + public static final Logger LOGGER = Logger.getLogger(ListKubernetesClustersCmd.class.getName()); + public static final String APINAME = "listKubernetesClusters"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster") + private Long id; + + @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "state of the Kubernetes cluster") + private String state; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the Kubernetes cluster" + + " (a substring match is made against the parameter value, data for all matching Kubernetes clusters will be returned)") + private String name; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public String getState() { + return state; + } + + public String getName() { + return name; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public void execute() throws ServerApiException { + try { + ListResponse response = kubernetesClusterService.listKubernetesClusters(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java new file mode 100644 index 00000000000..90ccfa41917 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = ScaleKubernetesClusterCmd.APINAME, + description = "Scales a created, running or stopped Kubernetes cluster", + responseObject = KubernetesClusterResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {KubernetesCluster.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { + public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName()); + public static final String APINAME = "scaleKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster") + private Long id; + + @ACL(accessType = SecurityChecker.AccessType.UseEntry) + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, + description = "the ID of the service offering for the virtual machines in the cluster.") + private Long serviceOfferingId; + + @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG, + description = "number of Kubernetes cluster nodes") + private Long clusterSize; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + public Long getClusterSize() { + return clusterSize; + } + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE; + } + + @Override + public String getEventDescription() { + KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId()); + return String.format("Scaling Kubernetes cluster ID: %s", cluster.getUuid()); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + if (!kubernetesClusterService.scaleKubernetesCluster(this)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster ID: %d", getId())); + } + final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java new file mode 100644 index 00000000000..1ce2fe09c10 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = StartKubernetesClusterCmd.APINAME, description = "Starts a stopped Kubernetes cluster", + responseObject = KubernetesClusterResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {KubernetesCluster.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class StartKubernetesClusterCmd extends BaseAsyncCmd { + public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName()); + public static final String APINAME = "startKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, required = true, + description = "the ID of the Kubernetes cluster") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_START; + } + + @Override + public String getEventDescription() { + KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId()); + return String.format("Starting Kubernetes cluster ID: %s", cluster.getUuid()); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public KubernetesCluster validateRequest() { + if (getId() == null || getId() < 1L) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid Kubernetes cluster ID provided"); + } + final KubernetesCluster kubernetesCluster = kubernetesClusterService.findById(getId()); + if (kubernetesCluster == null) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Given Kubernetes cluster was not found"); + } + return kubernetesCluster; + } + + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + final KubernetesCluster kubernetesCluster = validateRequest(); + try { + if (!kubernetesClusterService.startKubernetesCluster(kubernetesCluster.getId(), false)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId())); + } + final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(kubernetesCluster.getId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } + +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java new file mode 100644 index 00000000000..ba2649f863e --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = StopKubernetesClusterCmd.APINAME, description = "Stops a running Kubernetes cluster", + responseObject = SuccessResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {KubernetesCluster.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class StopKubernetesClusterCmd extends BaseAsyncCmd { + public static final Logger LOGGER = Logger.getLogger(StopKubernetesClusterCmd.class.getName()); + public static final String APINAME = "stopKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, required = true, + description = "the ID of the Kubernetes cluster") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_STOP; + } + + @Override + public String getEventDescription() { + KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId()); + return String.format("Stopping Kubernetes cluster ID: %s", cluster.getUuid()); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + if (!kubernetesClusterService.stopKubernetesCluster(getId())) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId())); + } + final SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } + +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java new file mode 100644 index 00000000000..2c99b005ff4 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = UpgradeKubernetesClusterCmd.APINAME, description = "Upgrades a running Kubernetes cluster", + responseObject = KubernetesClusterResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {KubernetesCluster.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd { + public static final Logger LOGGER = Logger.getLogger(UpgradeKubernetesClusterCmd.class.getName()); + public static final String APINAME = "upgradeKubernetesCluster"; + + @Inject + public KubernetesClusterService kubernetesClusterService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesClusterResponse.class, required = true, + description = "the ID of the Kubernetes cluster") + private Long id; + + @Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID, + entityType = KubernetesSupportedVersionResponse.class, required = true, + description = "the ID of the Kubernetes version for upgrade") + private Long kubernetesVersionId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getKubernetesVersionId() { + return kubernetesVersionId; + } + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_UPGRADE; + } + + @Override + public String getEventDescription() { + KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId()); + return String.format("Upgrading Kubernetes cluster ID: %s", cluster.getUuid()); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + try { + if (!kubernetesClusterService.upgradeKubernetesCluster(this)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %d", getId())); + } + final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java new file mode 100644 index 00000000000..efa029a1de5 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.kubernetes.version; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.kubernetes.version.KubernetesVersionService; +import com.google.common.base.Strings; + +@APICommand(name = ListKubernetesSupportedVersionsCmd.APINAME, + description = "Lists container clusters", + responseObject = KubernetesSupportedVersionResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListKubernetesSupportedVersionsCmd extends BaseListCmd { + public static final Logger LOGGER = Logger.getLogger(ListKubernetesSupportedVersionsCmd.class.getName()); + public static final String APINAME = "listKubernetesSupportedVersions"; + + @Inject + private KubernetesVersionService kubernetesVersionService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = KubernetesSupportedVersionResponse.class, + description = "the ID of the Kubernetes supported version") + private Long id; + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "the ID of the zone in which Kubernetes supported version will be available") + private Long zoneId; + + @Parameter(name = ApiConstants.MIN_SEMANTIC_VERSION, type = CommandType.STRING, + description = "the minimum semantic version for the Kubernetes supported version to be listed") + private String minimumSemanticVersion; + + @Parameter(name = ApiConstants.MIN_KUBERNETES_VERSION_ID, type = CommandType.UUID, + entityType = KubernetesSupportedVersionResponse.class, + description = "the ID of the minimum Kubernetes supported version") + private Long minimumKubernetesVersionId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public Long getZoneId() { + return zoneId; + } + + public String getMinimumSemanticVersion() { + if(!Strings.isNullOrEmpty(minimumSemanticVersion) && + !minimumSemanticVersion.matches("[0-9]+(\\.[0-9]+)*")) { + throw new IllegalArgumentException("Invalid version format"); + } + return minimumSemanticVersion; + } + + public Long getMinimumKubernetesVersionId() { + return minimumKubernetesVersionId; + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + "response"; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException, ConcurrentOperationException { + ListResponse response = kubernetesVersionService.listKubernetesSupportedVersions(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java new file mode 100644 index 00000000000..0308518b998 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterConfigResponse.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class KubernetesClusterConfigResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "the id of the container cluster") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "Name of the container cluster") + private String name; + + @SerializedName("configdata") + @Param(description = "the config data of the cluster") + private String configData; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getConfigData() { + return configData; + } + + public void setConfigData(String configData) { + this.configData = configData; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java new file mode 100644 index 00000000000..2c6fc8191e5 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -0,0 +1,329 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import java.util.List; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@SuppressWarnings("unused") +@EntityReference(value = {KubernetesCluster.class}) +public class KubernetesClusterResponse extends BaseResponse implements ControlledEntityResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "the id of the Kubernetes cluster") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "the name of the Kubernetes cluster") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) + @Param(description = "the description of the Kubernetes cluster") + private String description; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the name of the zone of the Kubernetes cluster") + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "the name of the zone of the Kubernetes cluster") + private String zoneName; + + @SerializedName(ApiConstants.SERVICE_OFFERING_ID) + @Param(description = "the ID of the service offering of the Kubernetes cluster") + private String serviceOfferingId; + + @SerializedName("serviceofferingname") + @Param(description = "the name of the service offering of the Kubernetes cluster") + private String serviceOfferingName; + + @SerializedName(ApiConstants.TEMPLATE_ID) + @Param(description = "the ID of the template of the Kubernetes cluster") + private String templateId; + + @SerializedName(ApiConstants.NETWORK_ID) + @Param(description = "the ID of the network of the Kubernetes cluster") + private String networkId; + + @SerializedName(ApiConstants.ASSOCIATED_NETWORK_NAME) + @Param(description = "the name of the network of the Kubernetes cluster") + private String associatedNetworkName; + + @SerializedName(ApiConstants.KUBERNETES_VERSION_ID) + @Param(description = "the ID of the Kubernetes version for the Kubernetes cluster") + private String kubernetesVersionId; + + @SerializedName(ApiConstants.KUBERNETES_VERSION_NAME) + @Param(description = "the name of the Kubernetes version for the Kubernetes cluster") + private String kubernetesVersionName; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "the account associated with the Kubernetes cluster") + private String accountName; + + @SerializedName(ApiConstants.PROJECT_ID) + @Param(description = "the project id of the Kubernetes cluster") + private String projectId; + + @SerializedName(ApiConstants.PROJECT) + @Param(description = "the project name of the Kubernetes cluster") + private String projectName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the ID of the domain in which the Kubernetes cluster exists") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the name of the domain in which the Kubernetes cluster exists") + private String domainName; + + @SerializedName(ApiConstants.SSH_KEYPAIR) + @Param(description = "keypair details") + private String keypair; + + @SerializedName(ApiConstants.MASTER_NODES) + @Param(description = "the master nodes count for the Kubernetes cluster") + private Long masterNodes; + + @SerializedName(ApiConstants.SIZE) + @Param(description = "the size (worker nodes count) of the Kubernetes cluster") + private Long clusterSize; + + @SerializedName(ApiConstants.STATE) + @Param(description = "the state of the Kubernetes cluster") + private String state; + + @SerializedName(ApiConstants.CPU_NUMBER) + @Param(description = "the cpu cores of the Kubernetes cluster") + private String cores; + + @SerializedName(ApiConstants.MEMORY) + @Param(description = "the memory the Kubernetes cluster") + private String memory; + + @SerializedName(ApiConstants.END_POINT) + @Param(description = "URL end point for the Kubernetes cluster") + private String endpoint; + + @SerializedName(ApiConstants.CONSOLE_END_POINT) + @Param(description = "URL end point for the Kubernetes cluster dashboard UI") + private String consoleEndpoint; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_IDS) + @Param(description = "the list of virtualmachine IDs associated with this Kubernetes cluster") + private List virtualMachineIds; + + public KubernetesClusterResponse() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public String getServiceOfferingId() { + return serviceOfferingId; + } + + public void setServiceOfferingId(String serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + public String getTemplateId() { + return templateId; + } + + public void setTemplateId(String templateId) { + this.templateId = templateId; + } + + public String getNetworkId() { + return networkId; + } + + public void setNetworkId(String networkId) { + this.networkId = networkId; + } + + public String getAssociatedNetworkName() { + return associatedNetworkName; + } + + public void setAssociatedNetworkName(String associatedNetworkName) { + this.associatedNetworkName = associatedNetworkName; + } + + public String getKubernetesVersionId() { + return kubernetesVersionId; + } + + public void setKubernetesVersionId(String kubernetesVersionId) { + this.kubernetesVersionId = kubernetesVersionId; + } + + public String getKubernetesVersionName() { + return kubernetesVersionName; + } + + public void setKubernetesVersionName(String kubernetesVersionName) { + this.kubernetesVersionName = kubernetesVersionName; + } + + public String getProjectId() { + return projectId; + } + + @Override + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + @Override + public void setProjectId(String projectId) { + this.projectId = projectId; + } + + @Override + public void setProjectName(String projectName) { + this.projectName = projectName; + } + + @Override + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + @Override + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public String getKeypair() { + return keypair; + } + + public void setKeypair(String keypair) { + this.keypair = keypair; + } + + public Long getMasterNodes() { + return masterNodes; + } + + public void setMasterNodes(Long masterNodes) { + this.masterNodes = masterNodes; + } + + public Long getClusterSize() { + return clusterSize; + } + + public void setClusterSize(Long clusterSize) { + this.clusterSize = clusterSize; + } + + public String getCores() { + return cores; + } + + public void setCores(String cores) { + this.cores = cores; + } + + public String getMemory() { + return memory; + } + + public void setMemory(String memory) { + this.memory = memory; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public String getEndpoint() { + return endpoint; + } + + public void setEndpoint(String endpoint) { + this.endpoint = endpoint; + } + + public String getId() { + return this.id; + } + + public void setId(String id) { + this.id = id; + } + + public String getServiceOfferingName() { + return serviceOfferingName; + } + + public void setServiceOfferingName(String serviceOfferingName) { + this.serviceOfferingName = serviceOfferingName; + } + + public void setVirtualMachineIds(List virtualMachineIds) { + this.virtualMachineIds = virtualMachineIds; + } + + ; + + public List getVirtualMachineIds() { + return virtualMachineIds; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java new file mode 100644 index 00000000000..4deb50d4a0b --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -0,0 +1,174 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@SuppressWarnings("unused") +@EntityReference(value = {KubernetesSupportedVersion.class}) +public class KubernetesSupportedVersionResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "the id of the Kubernetes supported version") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "Name of the Kubernetes supported version") + private String name; + + @SerializedName(ApiConstants.SEMANTIC_VERSION) + @Param(description = "Kubernetes semantic version") + private String semanticVersion; + + @SerializedName(ApiConstants.ISO_ID) + @Param(description = "the id of the binaries ISO for Kubernetes supported version") + private String isoId; + + @SerializedName(ApiConstants.ISO_NAME) + @Param(description = "the name of the binaries ISO for Kubernetes supported version") + private String isoName; + + @SerializedName(ApiConstants.ISO_STATE) + @Param(description = "the state of the binaries ISO for Kubernetes supported version") + private String isoState; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the id of the zone in which Kubernetes supported version is available") + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "the name of the zone in which Kubernetes supported version is available") + private String zoneName; + + @SerializedName(ApiConstants.SUPPORTS_HA) + @Param(description = "whether Kubernetes supported version supports HA, multi-master") + private Boolean supportsHA; + + @SerializedName(ApiConstants.STATE) + @Param(description = "the enabled or disabled state of the Kubernetes supported version") + private String state; + + @SerializedName(ApiConstants.MIN_CPU_NUMBER) + @Param(description = "the minimum number of CPUs needed for the Kubernetes supported version") + private Integer minimumCpu; + + @SerializedName(ApiConstants.MIN_MEMORY) + @Param(description = "the minimum RAM size in MB needed for the Kubernetes supported version") + private Integer minimumRamSize; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getSemanticVersion() { + return semanticVersion; + } + + public void setSemanticVersion(String semanticVersion) { + this.semanticVersion = semanticVersion; + } + + public String getIsoId() { + return isoId; + } + + public void setIsoId(String isoId) { + this.isoId = isoId; + } + + public String getIsoName() { + return isoName; + } + + public void setIsoName(String isoName) { + this.isoName = isoName; + } + + public String getIsoState() { + return isoState; + } + + public void setIsoState(String isoState) { + this.isoState = isoState; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public Boolean isSupportsHA() { + return supportsHA; + } + + public void setSupportsHA(Boolean supportsHA) { + this.supportsHA = supportsHA; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public Integer getMinimumCpu() { + return minimumCpu; + } + + public void setMinimumCpu(Integer minimumCpu) { + this.minimumCpu = minimumCpu; + } + + public Integer getMinimumRamSize() { + return minimumRamSize; + } + + public void setMinimumRamSize(Integer minimumRamSize) { + this.minimumRamSize = minimumRamSize; + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties new file mode 100644 index 00000000000..e6f02da6586 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=kubernetes-service +parent=compute diff --git a/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml new file mode 100644 index 00000000000..12f2a46a8ac --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/META-INF/cloudstack/kubernetes-service/spring-kubernetes-service-context.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml new file mode 100644 index 00000000000..787ea97491c --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml @@ -0,0 +1,237 @@ +#cloud-config +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +--- +ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write-files: + - path: /opt/bin/setup-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + ISO_MOUNT_DIR=/mnt/k8sdisk + BINARIES_DIR=${ISO_MOUNT_DIR}/ + K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/ + ATTEMPT_ONLINE_INSTALL=false + setup_complete=false + + OFFLINE_INSTALL_ATTEMPT_SLEEP=15 + MAX_OFFLINE_INSTALL_ATTEMPTS=100 + offline_attempts=1 + MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 + EJECT_ISO_FROM_OS={{ k8s.eject.iso }} + crucial_cmd_attempts=1 + iso_drive_path="" + while true; do + if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then + echo "Warning: Offline install timed out!" + break + fi + set +e + output=`blkid -o device -t TYPE=iso9660` + set -e + if [ "$output" != "" ]; then + while read -r line; do + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + retval=0 + set +e + mount -o ro "${line}" "${ISO_MOUNT_DIR}" + retval=$? + set -e + if [ $retval -eq 0 ]; then + if [ -d "$BINARIES_DIR" ]; then + iso_drive_path="${line}" + break + else + umount "${line}" && rmdir "${ISO_MOUNT_DIR}" + fi + fi + done <<< "$output" + fi + if [ -d "$BINARIES_DIR" ]; then + break + fi + echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts" + sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP + offline_attempts=$[$offline_attempts + 1] + done + + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + + if [ -d "$BINARIES_DIR" ]; then + ### Binaries available offline ### + echo "Installing binaries from ${BINARIES_DIR}" + mkdir -p /opt/cni/bin + tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz + + mkdir -p /opt/bin + tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + chmod +x {kubeadm,kubelet,kubectl} + + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + + output=`ls ${BINARIES_DIR}/docker/` + if [ "$output" != "" ]; then + while read -r line; do + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!" + break; + fi + retval=0 + set +e + docker load < "${BINARIES_DIR}/docker/$line" + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + done <<< "$output" + setup_complete=true + fi + umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" + if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then + eject "${iso_drive_path}" + fi + fi + if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + ### Binaries not available offline ### + RELEASE="v1.16.3" + CNI_VERSION="v0.7.5" + CRICTL_VERSION="v1.16.0" + echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet." + mkdir -p /opt/cni/bin + curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz + + mkdir -p /opt/bin + curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} + chmod +x {kubeadm,kubelet,kubectl} + + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + fi + + systemctl enable kubelet && systemctl start kubelet + modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1 + + if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Warning: kubeadm pull images failed after multiple tries!" + break; + fi + retval=0 + set +e + kubeadm config images pull + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + fi + + - path: /opt/bin/deploy-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then + echo "setup-kube-system is running!" + exit 1 + fi + modprobe ip_vs + modprobe ip_vs_wrr + modprobe ip_vs_sh + modprobe nf_conntrack_ipv4 + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --control-plane --certificate-key {{ k8s_master.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification + + sudo touch /home/core/success + echo "true" > /home/core/success + +coreos: + units: + - name: docker.service + command: start + enable: true + + - name: setup-kube-system.service + command: start + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - name: deploy-kube-system.service + command: start + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + + update: + group: stable + reboot-strategy: off diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml new file mode 100644 index 00000000000..14828578ed8 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml @@ -0,0 +1,294 @@ +#cloud-config +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +--- +ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write-files: + - path: /etc/conf.d/nfs + permissions: '0644' + content: | + OPTS_RPC_MOUNTD="" + + - path: /etc/kubernetes/pki/cloudstack/ca.crt + permissions: '0644' + content: | + {{ k8s_master.ca.crt }} + + - path: /etc/kubernetes/pki/cloudstack/apiserver.crt + permissions: '0644' + content: | + {{ k8s_master.apiserver.crt }} + + - path: /etc/kubernetes/pki/cloudstack/apiserver.key + permissions: '0600' + content: | + {{ k8s_master.apiserver.key }} + + - path: /opt/bin/setup-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + ISO_MOUNT_DIR=/mnt/k8sdisk + BINARIES_DIR=${ISO_MOUNT_DIR}/ + K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/ + ATTEMPT_ONLINE_INSTALL=false + setup_complete=false + + OFFLINE_INSTALL_ATTEMPT_SLEEP=15 + MAX_OFFLINE_INSTALL_ATTEMPTS=100 + offline_attempts=1 + MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 + EJECT_ISO_FROM_OS={{ k8s.eject.iso }} + crucial_cmd_attempts=1 + iso_drive_path="" + while true; do + if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then + echo "Warning: Offline install timed out!" + break + fi + set +e + output=`blkid -o device -t TYPE=iso9660` + set -e + if [ "$output" != "" ]; then + while read -r line; do + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + retval=0 + set +e + mount -o ro "${line}" "${ISO_MOUNT_DIR}" + retval=$? + set -e + if [ $retval -eq 0 ]; then + if [ -d "$BINARIES_DIR" ]; then + iso_drive_path="${line}" + break + else + umount "${line}" && rmdir "${ISO_MOUNT_DIR}" + fi + fi + done <<< "$output" + fi + if [ -d "$BINARIES_DIR" ]; then + break + fi + echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts" + sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP + offline_attempts=$[$offline_attempts + 1] + done + + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + + if [ -d "$BINARIES_DIR" ]; then + ### Binaries available offline ### + echo "Installing binaries from ${BINARIES_DIR}" + mkdir -p /opt/cni/bin + tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz + + mkdir -p /opt/bin + tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + chmod +x {kubeadm,kubelet,kubectl} + + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + + output=`ls ${BINARIES_DIR}/docker/` + if [ "$output" != "" ]; then + while read -r line; do + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!" + break; + fi + retval=0 + set +e + docker load < "${BINARIES_DIR}/docker/$line" + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + done <<< "$output" + setup_complete=true + fi + mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" + if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then + eject "${iso_drive_path}" + fi + fi + if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + ### Binaries not available offline ### + RELEASE="v1.16.3" + CNI_VERSION="v0.7.5" + CRICTL_VERSION="v1.16.0" + echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet." + mkdir -p /opt/cni/bin + curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz + + mkdir -p /opt/bin + curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} + chmod +x {kubeadm,kubelet,kubectl} + + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + fi + + systemctl enable kubelet && systemctl start kubelet + modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1 + + if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Warning: kubeadm pull images failed after multiple tries!" + break; + fi + retval=0 + set +e + kubeadm config images pull + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + fi + + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Error: kubeadm init failed!" + exit 1 + fi + retval=0 + set +e + kubeadm init --token {{ k8s_master.cluster.token }} {{ k8s_master.cluster.initargs }} + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + + - path: /opt/bin/deploy-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/ + + if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then + echo "setup-kube-system is running!" + exit 1 + fi + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + export KUBECONFIG=/etc/kubernetes/admin.conf + + mkdir -p /root/.kube + cp -i /etc/kubernetes/admin.conf /root/.kube/config + chown $(id -u):$(id -g) /root/.kube/config + echo export PATH=\$PATH:/opt/bin >> /root/.bashrc + + if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then + ### Network, dashboard configs available offline ### + echo "Offline configs are available!" + kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml + kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml + rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + else + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml + fi + + kubectl create rolebinding admin-binding --role=admin --user=admin || true + kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true + kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true + + sudo touch /home/core/success + echo "true" > /home/core/success + +coreos: + units: + - name: docker.service + command: start + enable: true + + - name: setup-kube-system.service + command: start + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - name: deploy-kube-system.service + command: start + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version + ExecStart=/opt/bin/deploy-kube-system + + update: + group: stable + reboot-strategy: off diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml new file mode 100644 index 00000000000..d2f5454a669 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -0,0 +1,237 @@ +#cloud-config +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +--- +ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write-files: + - path: /opt/bin/setup-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + ISO_MOUNT_DIR=/mnt/k8sdisk + BINARIES_DIR=${ISO_MOUNT_DIR}/ + K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/ + ATTEMPT_ONLINE_INSTALL=false + setup_complete=false + + OFFLINE_INSTALL_ATTEMPT_SLEEP=30 + MAX_OFFLINE_INSTALL_ATTEMPTS=40 + offline_attempts=1 + MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 + EJECT_ISO_FROM_OS={{ k8s.eject.iso }} + crucial_cmd_attempts=1 + iso_drive_path="" + while true; do + if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then + echo "Warning: Offline install timed out!" + break + fi + set +e + output=`blkid -o device -t TYPE=iso9660` + set -e + if [ "$output" != "" ]; then + while read -r line; do + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + retval=0 + set +e + mount -o ro "${line}" "${ISO_MOUNT_DIR}" + retval=$? + set -e + if [ $retval -eq 0 ]; then + if [ -d "$BINARIES_DIR" ]; then + iso_drive_path="${line}" + break + else + umount "${line}" && rmdir "${ISO_MOUNT_DIR}" + fi + fi + done <<< "$output" + fi + if [ -d "$BINARIES_DIR" ]; then + break + fi + echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts" + sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP + offline_attempts=$[$offline_attempts + 1] + done + + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + + if [ -d "$BINARIES_DIR" ]; then + ### Binaries available offline ### + echo "Installing binaries from ${BINARIES_DIR}" + mkdir -p /opt/cni/bin + tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz + + mkdir -p /opt/bin + tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + chmod +x {kubeadm,kubelet,kubectl} + + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + + output=`ls ${BINARIES_DIR}/docker/` + if [ "$output" != "" ]; then + while read -r line; do + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!" + break; + fi + retval=0 + set +e + docker load < "${BINARIES_DIR}/docker/$line" + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + done <<< "$output" + setup_complete=true + fi + umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" + if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then + eject "${iso_drive_path}" + fi + fi + if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + ### Binaries not available offline ### + RELEASE="v1.16.3" + CNI_VERSION="v0.7.5" + CRICTL_VERSION="v1.16.0" + echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet." + mkdir -p /opt/cni/bin + curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz + + mkdir -p /opt/bin + curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz + + mkdir -p /opt/bin + cd /opt/bin + curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} + chmod +x {kubeadm,kubelet,kubectl} + + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service + mkdir -p /etc/systemd/system/kubelet.service.d + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + fi + + systemctl enable kubelet && systemctl start kubelet + modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1 + + if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then + crucial_cmd_attempts=1 + while true; do + if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then + echo "Warning: kubeadm pull images failed after multiple tries!" + break; + fi + retval=0 + set +e + kubeadm config images pull + retval=$? + set -e + if [ $retval -eq 0 ]; then + break; + fi + crucial_cmd_attempts=$[$crucial_cmd_attempts + 1] + done + fi + + - path: /opt/bin/deploy-kube-system + permissions: 0700 + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/core/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then + echo "setup-kube-system is running!" + exit 1 + fi + modprobe ip_vs + modprobe ip_vs_wrr + modprobe ip_vs_sh + modprobe nf_conntrack_ipv4 + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --discovery-token-unsafe-skip-ca-verification + + sudo touch /home/core/success + echo "true" > /home/core/success + +coreos: + units: + - name: docker.service + command: start + enable: true + + - name: setup-kube-system.service + command: start + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - name: deploy-kube-system.service + command: start + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + + update: + group: stable + reboot-strategy: off diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh new file mode 100644 index 00000000000..ea36d7ee897 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -0,0 +1,133 @@ +#!/bin/bash -e +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Version 1.14 and below needs extra flags with kubeadm upgrade node +if [ $# -lt 4 ]; then + echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_MASTER IS_OLD_VERSION IS_EJECT_ISO" + echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false" + exit 1 +fi +UPGRADE_VERSION="${1}" +IS_MAIN_MASTER="" +if [ $# -gt 1 ]; then + IS_MAIN_MASTER="${2}" +fi +IS_OLD_VERSION="" +if [ $# -gt 2 ]; then + IS_OLD_VERSION="${3}" +fi +EJECT_ISO_FROM_OS=false +if [ $# -gt 3 ]; then + EJECT_ISO_FROM_OS="${4}" +fi + +export PATH=$PATH:/opt/bin + +ISO_MOUNT_DIR=/mnt/k8sdisk +BINARIES_DIR=${ISO_MOUNT_DIR}/ + +OFFLINE_INSTALL_ATTEMPT_SLEEP=5 +MAX_OFFLINE_INSTALL_ATTEMPTS=10 +offline_attempts=1 +iso_drive_path="" +while true; do + if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then + echo "Warning: Offline install timed out!" + break + fi + set +e + output=`blkid -o device -t TYPE=iso9660` + set -e + if [ "$output" != "" ]; then + while read -r line; do + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + retval=0 + set +e + mount -o ro "${line}" "${ISO_MOUNT_DIR}" + retval=$? + set -e + if [ $retval -eq 0 ]; then + if [ -d "$BINARIES_DIR" ]; then + iso_drive_path="${line}" + break + else + umount "${line}" && rmdir "${ISO_MOUNT_DIR}" + fi + fi + done <<< "$output" + fi + if [ -d "$BINARIES_DIR" ]; then + break + fi + echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts" + sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP + offline_attempts=$[$offline_attempts + 1] +done + +if [ -d "$BINARIES_DIR" ]; then + ### Binaries available offline ### + echo "Installing binaries from ${BINARIES_DIR}" + + cd /opt/bin + + cp ${BINARIES_DIR}/k8s/kubeadm /opt/bin + chmod +x kubeadm + + output=`ls ${BINARIES_DIR}/docker/` + if [ "$output" != "" ]; then + while read -r line; do + docker load < "${BINARIES_DIR}/docker/$line" + done <<< "$output" + fi + + tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz + tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz + + if [ "${IS_MAIN_MASTER}" == 'true' ]; then + set +e + kubeadm upgrade apply ${UPGRADE_VERSION} -y + retval=$? + set -e + if [ $retval -ne 0 ]; then + kubeadm upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y + fi + else + if [ "${IS_OLD_VERSION}" == 'true' ]; then + kubeadm upgrade node config --kubelet-version ${UPGRADE_VERSION} + else + kubeadm upgrade node + fi + fi + + systemctl stop kubelet + cp -a ${BINARIES_DIR}/k8s/{kubelet,kubectl} /opt/bin + chmod +x {kubelet,kubectl} + systemctl restart kubelet + + if [ "${IS_MAIN_MASTER}" == 'true' ]; then + kubectl apply -f ${BINARIES_DIR}/network.yaml + kubectl apply -f ${BINARIES_DIR}/dashboard.yaml + fi + + umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" + if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then + eject "${iso_drive_path}" + fi +fi diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java new file mode 100644 index 00000000000..6878c4cd29b --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java @@ -0,0 +1,253 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.kubernetes.version; + +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd; +import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.api.query.dao.TemplateJoinDao; +import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; +import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateApiService; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({ComponentContext.class}) +public class KubernetesVersionServiceTest { + + @InjectMocks + private KubernetesVersionService kubernetesVersionService = new KubernetesVersionManagerImpl(); + + @Mock + private KubernetesSupportedVersionDao kubernetesSupportedVersionDao; + @Mock + private KubernetesClusterDao kubernetesClusterDao; + @Mock + private AccountManager accountManager; + @Mock + private VMTemplateDao templateDao; + @Mock + private TemplateJoinDao templateJoinDao; + @Mock + private DataCenterDao dataCenterDao; + @Mock + private TemplateApiService templateService; + + private void overrideDefaultConfigValue(final ConfigKey configKey, final String name, final Object o) throws IllegalAccessException, NoSuchFieldException { + Field f = ConfigKey.class.getDeclaredField(name); + f.setAccessible(true); + f.set(configKey, o); + } + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + + overrideDefaultConfigValue(KubernetesClusterService.KubernetesServiceEnabled, "_defaultValue", "true"); + + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + when(zone.getId()).thenReturn(1L); + when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone); + + TemplateJoinVO templateJoinVO = Mockito.mock(TemplateJoinVO.class); + when(templateJoinVO.getId()).thenReturn(1L); + when(templateJoinVO.getUrl()).thenReturn("https://download.cloudstack.com"); + when(templateJoinVO.getState()).thenReturn(ObjectInDataStoreStateMachine.State.Ready); + when(templateJoinDao.findById(Mockito.anyLong())).thenReturn(templateJoinVO); + + KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); + when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + when(kubernetesSupportedVersionDao.persist(Mockito.any(KubernetesSupportedVersionVO.class))).thenReturn(versionVO); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void listKubernetesSupportedVersionsTest() { + ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class); + List versionVOs = new ArrayList<>(); + KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); + when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + versionVOs.add(versionVO); + when(kubernetesSupportedVersionDao.listAll()).thenReturn(versionVOs); + when(kubernetesSupportedVersionDao.listAllInZone(Mockito.anyLong())).thenReturn(versionVOs); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO); + kubernetesVersionService.listKubernetesSupportedVersions(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void addKubernetesSupportedVersionLowerUnsupportedTest() { + AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class); + when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU); + when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + when(cmd.getSemanticVersion()).thenReturn("1.1.1"); + kubernetesVersionService.addKubernetesSupportedVersion(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void addKubernetesSupportedVersionInvalidCpuTest() { + AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class); + when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU-1); + when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + CallContext.register(user, account); + kubernetesVersionService.addKubernetesSupportedVersion(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void addKubernetesSupportedVersionInvalidRamSizeTest() { + AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class); + when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU); + when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE-10); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + CallContext.register(user, account); + kubernetesVersionService.addKubernetesSupportedVersion(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void addKubernetesSupportedVersionEmptyUrlTest() { + AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class); + when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU); + when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + CallContext.register(user, account); + when(cmd.getUrl()).thenReturn(""); + kubernetesVersionService.addKubernetesSupportedVersion(cmd); + } + + @Test + public void addKubernetesSupportedVersionIsoUrlTest() throws ResourceAllocationException, NoSuchFieldException { + AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + when(cmd.getUrl()).thenReturn("https://download.cloudstack.com"); + when(cmd.getChecksum()).thenReturn(null); + when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU); + when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE); + Account systemAccount = new AccountVO("system", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + when(accountManager.getSystemAccount()).thenReturn(systemAccount); + PowerMockito.mockStatic(ComponentContext.class); + when(ComponentContext.inject(Mockito.any(RegisterIsoCmd.class))).thenReturn(new RegisterIsoCmd()); + when(templateService.registerIso(Mockito.any(RegisterIsoCmd.class))).thenReturn(Mockito.mock(VirtualMachineTemplate.class)); + VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); + when(templateVO.getId()).thenReturn(1L); + when(templateDao.findById(Mockito.anyLong())).thenReturn(templateVO); + kubernetesVersionService.addKubernetesSupportedVersion(cmd); + } + + @Test(expected = CloudRuntimeException.class) + public void deleteKubernetesSupportedVersionExistingClustersTest() { + DeleteKubernetesSupportedVersionCmd cmd = Mockito.mock(DeleteKubernetesSupportedVersionCmd.class); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class)); + List clusters = new ArrayList<>(); + clusters.add(Mockito.mock(KubernetesClusterVO.class)); + when(kubernetesClusterDao.listAllByKubernetesVersion(Mockito.anyLong())).thenReturn(clusters); + kubernetesVersionService.deleteKubernetesSupportedVersion(cmd); + } + + @Test + public void deleteKubernetesSupportedVersionTest() { + DeleteKubernetesSupportedVersionCmd cmd = Mockito.mock(DeleteKubernetesSupportedVersionCmd.class); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class)); + List clusters = new ArrayList<>(); + when(kubernetesClusterDao.listAllByKubernetesVersion(Mockito.anyLong())).thenReturn(clusters); + when(templateDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMTemplateVO.class)); + PowerMockito.mockStatic(ComponentContext.class); + when(ComponentContext.inject(Mockito.any(DeleteIsoCmd.class))).thenReturn(new DeleteIsoCmd()); + when(templateService.deleteIso(Mockito.any(DeleteIsoCmd.class))).thenReturn(true); + when(kubernetesClusterDao.remove(Mockito.anyLong())).thenReturn(true); + kubernetesVersionService.deleteKubernetesSupportedVersion(cmd); + } + + @Test + public void updateKubernetesSupportedVersionTest() { + UpdateKubernetesSupportedVersionCmd cmd = Mockito.mock(UpdateKubernetesSupportedVersionCmd.class); + when(cmd.getState()).thenReturn(KubernetesSupportedVersion.State.Disabled.toString()); + AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid"); + UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class)); + KubernetesSupportedVersionVO version = Mockito.mock(KubernetesSupportedVersionVO.class); + when(kubernetesSupportedVersionDao.createForUpdate(Mockito.anyLong())).thenReturn(version); + when(kubernetesSupportedVersionDao.update(Mockito.anyLong(), Mockito.any(KubernetesSupportedVersionVO.class))).thenReturn(true); + when(version.getState()).thenReturn(KubernetesSupportedVersion.State.Disabled); + when(version.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(version); + kubernetesVersionService.updateKubernetesSupportedVersion(cmd); + } +} \ No newline at end of file diff --git a/plugins/pom.xml b/plugins/pom.xml index 2ecd6972211..524e4a13ece 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -88,6 +88,7 @@ integrations/cloudian integrations/prometheus + integrations/kubernetes-service metrics diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh new file mode 100755 index 00000000000..bf97f0662f7 --- /dev/null +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -0,0 +1,106 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +if [ $# -lt 6 ]; then + echo "Invalid input. Valid usage: ./create-kubernetes-binaries-iso.sh OUTPUT_PATH KUBERNETES_VERSION CNI_VERSION CRICTL_VERSION WEAVENET_NETWORK_YAML_CONFIG DASHBOARD_YAML_CONFIG" + echo "eg: ./create-kubernetes-binaries-iso.sh ./ 1.11.4 0.7.1 1.11.1 https://github.com/weaveworks/weave/releases/download/latest_release/weave-daemonset-k8s-1.11.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.0/src/deploy/recommended/kubernetes-dashboard.yaml" + exit 1 +fi + +RELEASE="v${2}" +output_dir="${1}" +start_dir="$PWD" +iso_dir="/tmp/iso" +working_dir="${iso_dir}/" +mkdir -p "${working_dir}" + +CNI_VERSION="v${3}" +echo "Downloading CNI ${CNI_VERSION}..." +cni_dir="${working_dir}/cni/" +mkdir -p "${cni_dir}" +curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" -o "${cni_dir}/cni-plugins-amd64.tgz" + +CRICTL_VERSION="v${4}" +echo "Downloading CRI tools ${CRICTL_VERSION}..." +crictl_dir="${working_dir}/cri-tools/" +mkdir -p "${crictl_dir}" +curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" -o "${crictl_dir}/crictl-linux-amd64.tar.gz" + +echo "Downloading Kubernetes tools ${RELEASE}..." +k8s_dir="${working_dir}/k8s" +mkdir -p "${k8s_dir}" +cd "${k8s_dir}" +curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} +kubeadm_file_permissions=`stat --format '%a' kubeadm` +chmod +x kubeadm + +echo "Downloading kubelet.service ${RELEASE}..." +cd $start_dir +kubelet_service_file="${working_dir}/kubelet.service" +touch "${kubelet_service_file}" +curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} + +echo "Downloading 10-kubeadm.conf ${RELEASE}..." +kubeadm_conf_file="${working_dir}/10-kubeadm.conf" +touch "${kubeadm_conf_file}" +curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} + +NETWORK_CONFIG_URL="${5}" +echo "Downloading network config ${NETWORK_CONFIG_URL}" +network_conf_file="${working_dir}/network.yaml" +curl -sSL ${NETWORK_CONFIG_URL} -o ${network_conf_file} + +DASHBORAD_CONFIG_URL="${6}" +echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}" +dashboard_conf_file="${working_dir}/dashboard.yaml" +curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file} + +echo "Fetching k8s docker images..." +docker -v +if [ $? -ne 0 ]; then + echo "Installing docker..." + if [ -f /etc/redhat-release ]; then + sudo yum -y remove docker-common docker container-selinux docker-selinux docker-engine + sudo yum -y install lvm2 device-mapper device-mapper-persistent-data device-mapper-event device-mapper-libs device-mapper-event-libs + sudo yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-3.el7.noarch.rpm + sudo wget https://download.docker.com/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo && sudo yum -y install docker-ce + sudo systemctl enable docker && sudo systemctl start docker + elif [ -f /etc/lsb-release ]; then + sudo apt update && sudo apt install docker.io -y + sudo systemctl enable docker && sudo systemctl start docker + fi +fi +mkdir -p "${working_dir}/docker" +output=`${k8s_dir}/kubeadm config images list` +while read -r line; do + echo "Downloading docker image $line ---" + sudo docker pull "$line" + image_name=`echo "$line" | grep -oE "[^/]+$"` + sudo docker save "$line" > "${working_dir}/docker/$image_name.tar" + sudo docker image rm "$line" +done <<< "$output" + +echo "Restore kubeadm permissions..." +if [ "${kubeadm_file_permissions}" -eq "" ]; then + kubeadm_file_permissions=644 +fi +chmod ${kubeadm_file_permissions} "${working_dir}/k8s/kubeadm" + +mkisofs -o "${output_dir}/setup-${RELEASE}.iso" -J -R -l "${iso_dir}" + +rm -rf "${iso_dir}" diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java index 362cabb304f..767b9acf5d4 100644 --- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDao.java @@ -27,9 +27,27 @@ import com.cloud.utils.db.GenericDao; public interface NetworkOfferingJoinDao extends GenericDao { - List findByDomainId(long domainId); + /** + * Returns list of network offerings for a given domain + * NetworkOfferingJoinVO can have multiple domains set. Method will search for + * given domainId in list of domains for the offering. + * @param long domainId + * @param Boolean includeAllDomainOffering (if set to true offerings for which domain + * is not set will also be returned) + * @return List List of network offerings + */ + List findByDomainId(long domainId, Boolean includeAllDomainOffering); - List findByZoneId(long zoneId); + /** + * Returns list of network offerings for a given zone + * NetworkOfferingJoinVO can have multiple zones set. Method will search for + * given zoneId in list of zones for the offering. + * @param long zoneId + * @param Boolean includeAllZoneOffering (if set to true offerings for which zone + * is not set will also be returned) + * @return List List of network offerings + */ + List findByZoneId(long zoneId, Boolean includeAllZoneOffering); NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering nof); diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java index b53aef85d46..0c258d1966a 100644 --- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java @@ -43,9 +43,12 @@ public class NetworkOfferingJoinDaoImpl extends GenericDaoBase findByDomainId(long domainId) { + public List findByDomainId(long domainId, Boolean includeAllDomainOffering) { SearchBuilder sb = createSearchBuilder(); sb.and("domainId", sb.entity().getDomainId(), SearchCriteria.Op.FIND_IN_SET); + if (includeAllDomainOffering) { + sb.or("dId", sb.entity().getDomainId(), SearchCriteria.Op.NULL); + } sb.done(); SearchCriteria sc = sb.create(); @@ -54,9 +57,12 @@ public class NetworkOfferingJoinDaoImpl extends GenericDaoBase findByZoneId(long zoneId) { + public List findByZoneId(long zoneId, Boolean includeAllZoneOffering) { SearchBuilder sb = createSearchBuilder(); sb.and("zoneId", sb.entity().getZoneId(), SearchCriteria.Op.FIND_IN_SET); + if (includeAllZoneOffering) { + sb.or("zId", sb.entity().getZoneId(), SearchCriteria.Op.NULL); + } sb.done(); SearchCriteria sc = sb.create(); diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index 409b88f53c4..ddb596a840b 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -300,6 +300,72 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage private Random rand = new Random(System.currentTimeMillis()); + @DB + private IPAddressVO assignAndAllocateIpAddressEntry(final Account owner, final VlanType vlanUse, final Long guestNetworkId, + final boolean sourceNat, final boolean allocate, final boolean isSystem, + final Long vpcId, final Boolean displayIp, final boolean fetchFromDedicatedRange, + final List addressVOS) throws CloudRuntimeException { + return Transaction.execute((TransactionCallbackWithException) status -> { + IPAddressVO finalAddress = null; + if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) { + // Check that the maximum number of public IPs for the given accountId will not be exceeded + try { + _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); + } catch (ResourceAllocationException ex) { + s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); + throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); + } + } + + for (final IPAddressVO possibleAddr : addressVOS) { + if (possibleAddr.getState() != State.Free) { + continue; + } + final IPAddressVO addressVO = possibleAddr; + addressVO.setSourceNat(sourceNat); + addressVO.setAllocatedTime(new Date()); + addressVO.setAllocatedInDomainId(owner.getDomainId()); + addressVO.setAllocatedToAccountId(owner.getId()); + addressVO.setSystem(isSystem); + + if (displayIp != null) { + addressVO.setDisplay(displayIp); + } + + if (vlanUse != VlanType.DirectAttached) { + addressVO.setAssociatedWithNetworkId(guestNetworkId); + addressVO.setVpcId(vpcId); + } + if (_ipAddressDao.lockRow(possibleAddr.getId(), true) != null) { + final IPAddressVO userIp = _ipAddressDao.findById(addressVO.getId()); + if (userIp.getState() == State.Free) { + addressVO.setState(State.Allocating); + if (_ipAddressDao.update(addressVO.getId(), addressVO)) { + finalAddress = addressVO; + break; + } + } + } + } + + if (finalAddress == null) { + s_logger.error("Failed to fetch any free public IP address"); + throw new CloudRuntimeException("Failed to fetch any free public IP address"); + } + + if (allocate) { + markPublicIpAsAllocated(finalAddress); + } + + final State expectedAddressState = allocate ? State.Allocated : State.Allocating; + if (finalAddress.getState() != expectedAddressState) { + s_logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState); + throw new CloudRuntimeException("Failed to fetch new public IP with expected state " + expectedAddressState); + } + return finalAddress; + }); + } + @Override public boolean configure(String name, Map params) { // populate providers @@ -694,9 +760,23 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, type, networkId, false, true, requestedIp, isSystem, null, null, false); } + @Override + public PublicIp getAvailablePublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, boolean isSystem) + throws InsufficientAddressCapacityException { + return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, type, networkId, false, false, false, requestedIp, isSystem, null, null, false); + } + @DB public PublicIp fetchNewPublicIp(final long dcId, final Long podId, final List vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId, - final boolean sourceNat, final boolean assign, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms) + final boolean sourceNat, final boolean allocate, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms) + throws InsufficientAddressCapacityException { + return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, vlanUse, guestNetworkId, + sourceNat, true, allocate, requestedIp, isSystem, vpcId, displayIp, forSystemVms); + } + + @DB + public PublicIp fetchNewPublicIp(final long dcId, final Long podId, final List vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId, + final boolean sourceNat, final boolean assign, final boolean allocate, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms) throws InsufficientAddressCapacityException { IPAddressVO addr = Transaction.execute(new TransactionCallbackWithException() { @Override @@ -807,64 +887,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } assert(addrs.size() == 1) : "Return size is incorrect: " + addrs.size(); - - if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) { - // Check that the maximum number of public IPs for the given accountId will not be exceeded - try { - _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); - } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); - throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); - } - } - IPAddressVO finalAddr = null; - for (final IPAddressVO possibleAddr: addrs) { - if (possibleAddr.getState() != IpAddress.State.Free) { - continue; - } - final IPAddressVO addr = possibleAddr; - addr.setSourceNat(sourceNat); - addr.setAllocatedTime(new Date()); - addr.setAllocatedInDomainId(owner.getDomainId()); - addr.setAllocatedToAccountId(owner.getId()); - addr.setSystem(isSystem); - - if (displayIp != null) { - addr.setDisplay(displayIp); - } - - if (vlanUse != VlanType.DirectAttached) { - addr.setAssociatedWithNetworkId(guestNetworkId); - addr.setVpcId(vpcId); - } - if (_ipAddressDao.lockRow(possibleAddr.getId(), true) != null) { - final IPAddressVO userIp = _ipAddressDao.findById(addr.getId()); - if (userIp.getState() == IpAddress.State.Free) { - addr.setState(IpAddress.State.Allocating); - if (_ipAddressDao.update(addr.getId(), addr)) { - finalAddr = addr; - break; - } - } - } - } - - if (finalAddr == null) { - s_logger.error("Failed to fetch any free public IP address"); - throw new CloudRuntimeException("Failed to fetch any free public IP address"); - } - if (assign) { - markPublicIpAsAllocated(finalAddr); + finalAddr = assignAndAllocateIpAddressEntry(owner, vlanUse, guestNetworkId, sourceNat, allocate, + isSystem,vpcId, displayIp, fetchFromDedicatedRange, addrs); + } else { + finalAddr = addrs.get(0); } - - final State expectedAddressState = assign ? State.Allocated : State.Allocating; - if (finalAddr.getState() != expectedAddressState) { - s_logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState); - throw new CloudRuntimeException("Failed to fetch new public IP with expected state " + expectedAddressState); - } - return finalAddr; } }); diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index a4fac2461b7..e02f0167cb8 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -3516,6 +3516,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final boolean allowUserViewAllDomainAccounts = (QueryService.AllowUserViewAllDomainAccounts.valueIn(caller.getDomainId())); + final boolean kubernetesServiceEnabled = Boolean.parseBoolean(_configDao.getValue("cloud.kubernetes.service.enabled")); + final boolean kubernetesClusterExperimentalFeaturesEnabled = Boolean.parseBoolean(_configDao.getValue("cloud.kubernetes.cluster.experimental.features.enabled")); + // check if region-wide secondary storage is used boolean regionSecondaryEnabled = false; final List imgStores = _imgStoreDao.findRegionImageStores(); @@ -3537,6 +3540,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe capabilities.put("allowUserExpungeRecoverVM", allowUserExpungeRecoverVM); capabilities.put("allowUserExpungeRecoverVolume", allowUserExpungeRecoverVolume); capabilities.put("allowUserViewAllDomainAccounts", allowUserViewAllDomainAccounts); + capabilities.put("kubernetesServiceEnabled", kubernetesServiceEnabled); + capabilities.put("kubernetesClusterExperimentalFeaturesEnabled", kubernetesClusterExperimentalFeaturesEnabled); if (apiLimitEnabled) { capabilities.put("apiLimitInterval", apiLimitInterval); capabilities.put("apiLimitMax", apiLimitMax); diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py new file mode 100644 index 00000000000..021a7490ff2 --- /dev/null +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -0,0 +1,729 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Tests for Kubernetes supported version """ + +#Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.cloudstackAPI import (listInfrastructure, + listKubernetesSupportedVersions, + addKubernetesSupportedVersion, + deleteKubernetesSupportedVersion, + createKubernetesCluster, + stopKubernetesCluster, + deleteKubernetesCluster, + upgradeKubernetesCluster, + scaleKubernetesCluster) +from marvin.cloudstackException import CloudstackAPIException +from marvin.codes import FAILED +from marvin.lib.base import (Template, + ServiceOffering, + Configurations) +from marvin.lib.utils import (cleanup_resources, + random_gen) +from marvin.lib.common import (get_zone) +from marvin.sshClient import SshClient +from nose.plugins.attrib import attr + +import time + +_multiprocess_shared_ = True + +class TestKubernetesCluster(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.hypervisor = cls.testClient.getHypervisorInfo() + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower() + + cls.setup_failed = False + + cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, + name="cloud.kubernetes.service.enabled")[0].value + if cls.initial_configuration_cks_enabled not in ["true", True]: + cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server") + Configurations.update(cls.apiclient, + "cloud.kubernetes.service.enabled", + "true") + cls.restartServer() + + cls.cks_template = None + cls.initial_configuration_cks_template_name = None + cls.cks_service_offering = None + + cls.kubernetes_version_ids = [] + if cls.setup_failed == False: + try: + cls.kuberetes_version_1 = cls.addKubernetesSupportedVersion('1.14.9', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso') + cls.kubernetes_version_ids.append(cls.kuberetes_version_1.id) + except Exception as e: + cls.setup_failed = True + cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso, %s" % e) + if cls.setup_failed == False: + try: + cls.kuberetes_version_2 = cls.addKubernetesSupportedVersion('1.15.0', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso') + cls.kubernetes_version_ids.append(cls.kuberetes_version_2.id) + except Exception as e: + cls.setup_failed = True + cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso, %s" % e) + if cls.setup_failed == False: + try: + cls.kuberetes_version_3 = cls.addKubernetesSupportedVersion('1.16.0', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.iso') + cls.kubernetes_version_ids.append(cls.kuberetes_version_3.id) + except Exception as e: + cls.setup_failed = True + cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.is, %s" % e) + if cls.setup_failed == False: + try: + cls.kuberetes_version_4 = cls.addKubernetesSupportedVersion('1.16.3', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.iso') + cls.kubernetes_version_ids.append(cls.kuberetes_version_4.id) + except Exception as e: + cls.setup_failed = True + cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.is, %s" % e) + + cks_template_data = { + "name": "Kubernetes-Service-Template", + "displaytext": "Kubernetes-Service-Template", + "format": "qcow2", + "hypervisor": "kvm", + "ostype": "CoreOS", + "url": "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-kvm.qcow2.bz2", + "ispublic": "True", + "isextractable": "True" + } + # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2" + cks_template_data_details = [] + if cls.hypervisor.lower() == "vmware": + cks_template_data["url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-vmware.ova" # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova" + cks_template_data["format"] = "OVA" + cks_template_data_details = [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}] + elif cls.hypervisor.lower() == "xenserver": + cks_template_data["url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-xen.vhd.bz2" # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2" + cks_template_data["format"] = "VHD" + elif cls.hypervisor.lower() == "kvm": + cks_template_data["requireshvm"] = "True" + if cls.setup_failed == False: + cls.cks_template = Template.register( + cls.apiclient, + cks_template_data, + zoneid=cls.zone.id, + hypervisor=cls.hypervisor, + details=cks_template_data_details + ) + cls.debug("Waiting for CKS template with ID %s to be ready" % cls.cks_template.id) + try: + cls.waitForTemplateReadyState(cls.cks_template.id) + except Exception as e: + cls.setup_failed = True + cls.debug("Failed to get CKS template in ready state, {}, {}".format(cks_template_data["url"], e)) + + cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient, + name=cls.cks_template_name_key)[0].value + Configurations.update(cls.apiclient, + cls.cks_template_name_key, + cls.cks_template.name) + + cks_offering_data = { + "name": "CKS-Instance", + "displaytext": "CKS Instance", + "cpunumber": 2, + "cpuspeed": 1000, + "memory": 2048, + } + cks_offering_data["name"] = cks_offering_data["name"] + '-' + random_gen() + if cls.setup_failed == False: + cls.cks_service_offering = ServiceOffering.create( + cls.apiclient, + cks_offering_data + ) + + cls._cleanup = [] + if cls.cks_template != None: + cls._cleanup.append(cls.cks_template) + if cls.cks_service_offering != None: + cls._cleanup.append(cls.cks_service_offering) + return + + @classmethod + def tearDownClass(cls): + version_delete_failed = False + # Delete added Kubernetes supported version + for version_id in cls.kubernetes_version_ids: + try: + cls.deleteKubernetesSupportedVersion(version_id) + except Exception as e: + version_delete_failed = True + cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e) + try: + # Restore original CKS template + if cls.initial_configuration_cks_template_name != None: + Configurations.update(cls.apiclient, + cls.cks_template_name_key, + cls.initial_configuration_cks_template_name) + # Delete created CKS template + if cls.setup_failed == False and cls.cks_template != None: + cls.cks_template.delete(cls.apiclient, + cls.zone.id) + # Restore CKS enabled + if cls.initial_configuration_cks_enabled not in ["true", True]: + cls.debug("Restoring Kubernetes Service enabled value") + Configurations.update(cls.apiclient, + "cloud.kubernetes.service.enabled", + "false") + cls.restartServer() + + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + if version_delete_failed == True: + raise Exception("Warning: Exception during cleanup, unable to delete Kubernetes supported versions") + return + + @classmethod + def restartServer(cls): + """Restart management server""" + + cls.debug("Restarting management server") + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + command = "service cloudstack-management stop" + sshClient.execute(command) + + command = "service cloudstack-management start" + sshClient.execute(command) + + #Waits for management to come up in 5 mins, when it's up it will continue + timeout = time.time() + 300 + while time.time() < timeout: + if cls.isManagementUp() is True: return + time.sleep(5) + cls.setup_failed = True + cls.debug("Management server did not come up, failing") + return + + @classmethod + def isManagementUp(cls): + try: + cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + @classmethod + def waitForTemplateReadyState(cls, template_id, retries=30, interval=30): + """Check if template download will finish""" + while retries > -1: + time.sleep(interval) + template_response = Template.list( + cls.apiclient, + id=template_id, + zoneid=cls.zone.id, + templatefilter='self' + ) + + if isinstance(template_response, list): + template = template_response[0] + if not hasattr(template, 'status') or not template or not template.status: + retries = retries - 1 + continue + if 'Failed' == template.status: + raise Exception("Failed to download template: status - %s" % template.status) + elif template.status == 'Download Complete' and template.isready: + return + retries = retries - 1 + raise Exception("Template download timed out") + + @classmethod + def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=20, interval=30): + """Check if Kubernetes supported version ISO is in Ready state""" + + while retries > -1: + time.sleep(interval) + list_versions_response = cls.listKubernetesSupportedVersion(version_id) + if not hasattr(list_versions_response, 'isostate') or not list_versions_response or not list_versions_response.isostate: + retries = retries - 1 + continue + if 'Creating' == list_versions_response.isostate: + retries = retries - 1 + elif 'Ready' == list_versions_response.isostate: + return + elif 'Failed' == list_versions_response.isostate: + raise Exception( "Failed to download template: status - %s" % template.status) + else: + raise Exception( + "Failed to download Kubernetes supported version ISO: status - %s" % + list_versions_response.isostate) + raise Exception("Kubernetes supported version Ready state timed out") + + @classmethod + def listKubernetesSupportedVersion(cls, version_id): + listKubernetesSupportedVersionsCmd = listKubernetesSupportedVersions.listKubernetesSupportedVersionsCmd() + listKubernetesSupportedVersionsCmd.id = version_id + versionResponse = cls.apiclient.listKubernetesSupportedVersions(listKubernetesSupportedVersionsCmd) + return versionResponse[0] + + @classmethod + def addKubernetesSupportedVersion(cls, semantic_version, iso_url): + addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd() + addKubernetesSupportedVersionCmd.semanticversion = semantic_version + addKubernetesSupportedVersionCmd.name = 'v' + semantic_version + '-' + random_gen() + addKubernetesSupportedVersionCmd.url = iso_url + addKubernetesSupportedVersionCmd.mincpunumber = 2 + addKubernetesSupportedVersionCmd.minmemory = 2048 + kubernetes_version = cls.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd) + cls.debug("Waiting for Kubernetes version with ID %s to be ready" % kubernetes_version.id) + cls.waitForKubernetesSupportedVersionIsoReadyState(kubernetes_version.id) + kubernetes_version = cls.listKubernetesSupportedVersion(kubernetes_version.id) + return kubernetes_version + + @classmethod + def deleteKubernetesSupportedVersion(cls, version_id): + deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd() + deleteKubernetesSupportedVersionCmd.id = version_id + deleteKubernetesSupportedVersionCmd.deleteiso = True + cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd) + + def setUp(self): + self.services = self.testClient.getParsedTestDataConfig() + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + #Clean up, terminate the created templates + cleanup_resources(self.apiclient, self.cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_01_deploy_kubernetes_cluster(self): + """Test to deploy a new Kubernetes cluster + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + # 3. stopKubernetesCluster should stop the cluster + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % cluster_response.id) + + self.stopAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully stopped, now deleting it" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_02_deploy_kubernetes_ha_cluster(self): + """Test to deploy a new Kubernetes cluster + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_3.id, 1, 2) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now deleting it" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_03_deploy_invalid_kubernetes_ha_cluster(self): + """Test to deploy a new Kubernetes cluster + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + try: + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id, 1, 2) + self.debug("Invslid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) + self.deleteKubernetesCluster(cluster_response.id) + self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.") + except CloudstackAPIException as e: + self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_04_deploy_and_upgrade_kubernetes_cluster(self): + """Test to deploy a new Kubernetes cluster and upgrade it to newer version + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + # 3. upgradeKubernetesCluster should return valid info for the cluster + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id) + + try: + cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_3.id) + except Exception as e: + self.deleteKubernetesCluster(cluster_response.id) + self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) + + self.verifyKubernetesClusterUpgrade(cluster_response, self.kuberetes_version_3.id) + + self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_05_deploy_and_upgrade_kubernetes_ha_cluster(self): + """Test to deploy a new HA Kubernetes cluster and upgrade it to newer version + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + # 3. upgradeKubernetesCluster should return valid info for the cluster + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_3.id, 1, 2) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id) + + try: + cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_4.id) + except Exception as e: + self.deleteKubernetesCluster(cluster_response.id) + self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) + + self.verifyKubernetesClusterUpgrade(cluster_response, self.kuberetes_version_4.id) + + self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_06_deploy_and_invalid_upgrade_kubernetes_cluster(self): + """Test to deploy a new Kubernetes cluster and check for failure while tying to upgrade it to a lower version + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + # 3. upgradeKubernetesCluster should fail + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now scaling it" % cluster_response.id) + + try: + cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_1.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kuberetes_version_1.id) + self.deleteKubernetesCluster(cluster_response.id) + self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") + except Exception as e: + self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) + + self.debug("Deleting Kubernetes cluster with ID: %s" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_07_deploy_and_scale_kubernetes_cluster(self): + """Test to deploy a new Kubernetes cluster and check for failure while tying to scale it + + # Validate the following: + # 1. createKubernetesCluster should return valid info for new cluster + # 2. The Cloud Database contains the valid information + # 3. scaleKubernetesCluster should return valid info for the cluster when it is scaled up + # 4. scaleKubernetesCluster should return valid info for the cluster when it is scaled down + """ + if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: + self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) + if self.setup_failed == True: + self.skipTest("Setup incomplete") + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + + cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id) + + self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) + + self.debug("Kubernetes cluster with ID: %s successfully deployed, now upscaling it" % cluster_response.id) + + try: + cluster_response = self.scaleKubernetesCluster(cluster_response.id, 2) + except Exception as e: + self.deleteKubernetesCluster(cluster_response.id) + self.fail("Failed to upscale Kubernetes cluster due to: %s" % e) + + self.verifyKubernetesClusterScale(cluster_response, 2) + + self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % cluster_response.id) + + try: + cluster_response = self.scaleKubernetesCluster(cluster_response.id, 1) + except Exception as e: + self.deleteKubernetesCluster(cluster_response.id) + self.fail("Failed to downscale Kubernetes cluster due to: %s" % e) + + self.verifyKubernetesClusterScale(cluster_response) + + self.debug("Kubernetes cluster with ID: %s successfully downscaled, now deleting it" % cluster_response.id) + + self.deleteAndVerifyKubernetesCluster(cluster_response.id) + + self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) + + return + + def listKubernetesCluster(self, cluster_id): + listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() + listKubernetesClustersCmd.id = cluster_id + clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd) + return clusterResponse[0] + + def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1): + createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() + createKubernetesClusterCmd.name = name + createKubernetesClusterCmd.description = name + "-description" + createKubernetesClusterCmd.kubernetesversionid = version_id + createKubernetesClusterCmd.size = size + createKubernetesClusterCmd.masternodes = master_nodes + createKubernetesClusterCmd.serviceofferingid = self.cks_service_offering.id + createKubernetesClusterCmd.zoneid = self.zone.id + createKubernetesClusterCmd.noderootdisksize = 10 + clusterResponse = self.apiclient.createKubernetesCluster(createKubernetesClusterCmd) + if not clusterResponse: + self.cleanup.append(clusterResponse) + return clusterResponse + + def stopKubernetesCluster(self, cluster_id): + stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd() + stopKubernetesClusterCmd.id = cluster_id + response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd) + return response + + def deleteKubernetesCluster(self, cluster_id): + deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd() + deleteKubernetesClusterCmd.id = cluster_id + response = self.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd) + return response + + def upgradeKubernetesCluster(self, cluster_id, version_id): + upgradeKubernetesClusterCmd = upgradeKubernetesCluster.upgradeKubernetesClusterCmd() + upgradeKubernetesClusterCmd.id = cluster_id + upgradeKubernetesClusterCmd.kubernetesversionid = version_id + response = self.apiclient.upgradeKubernetesCluster(upgradeKubernetesClusterCmd) + return response + + def scaleKubernetesCluster(self, cluster_id, size): + scaleKubernetesClusterCmd = scaleKubernetesCluster.scaleKubernetesClusterCmd() + scaleKubernetesClusterCmd.id = cluster_id + scaleKubernetesClusterCmd.size = size + response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) + return response + + def verifyKubernetesCluster(self, cluster_response, name, version_id, size=1, master_nodes=1): + """Check if Kubernetes cluster is valid""" + + self.verifyKubernetesClusterState(cluster_response, 'Running') + + self.assertEqual( + cluster_response.name, + name, + "Check KubernetesCluster name {}, {}".format(cluster_response.name, name) + ) + + self.verifyKubernetesClusterVersion(cluster_response, version_id) + + self.assertEqual( + cluster_response.zoneid, + self.zone.id, + "Check KubernetesCluster zone {}, {}".format(cluster_response.zoneid, self.zone.id) + ) + + self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + + db_cluster_name = self.dbclient.execute("select name from kubernetes_cluster where uuid = '%s';" % cluster_response.id)[0][0] + + self.assertEqual( + str(db_cluster_name), + name, + "Check KubernetesCluster name in DB {}, {}".format(db_cluster_name, name) + ) + + def verifyKubernetesClusterState(self, cluster_response, state): + """Check if Kubernetes cluster state is Running""" + + self.assertEqual( + cluster_response.state, + 'Running', + "Check KubernetesCluster state {}, {}".format(cluster_response.state, state) + ) + + def verifyKubernetesClusterVersion(self, cluster_response, version_id): + """Check if Kubernetes cluster node sizes are valid""" + + self.assertEqual( + cluster_response.kubernetesversionid, + version_id, + "Check KubernetesCluster version {}, {}".format(cluster_response.kubernetesversionid, version_id) + ) + + def verifyKubernetesClusterSize(self, cluster_response, size=1, master_nodes=1): + """Check if Kubernetes cluster node sizes are valid""" + + self.assertEqual( + cluster_response.size, + size, + "Check KubernetesCluster size {}, {}".format(cluster_response.size, size) + ) + + self.assertEqual( + cluster_response.masternodes, + master_nodes, + "Check KubernetesCluster master nodes {}, {}".format(cluster_response.masternodes, master_nodes) + ) + + def verifyKubernetesClusterUpgrade(self, cluster_response, version_id): + """Check if Kubernetes cluster state and version are valid after upgrade""" + + self.verifyKubernetesClusterState(cluster_response, 'Running') + self.verifyKubernetesClusterVersion(cluster_response, version_id) + + def verifyKubernetesClusterScale(self, cluster_response, size=1, master_nodes=1): + """Check if Kubernetes cluster state and node sizes are valid after upgrade""" + + self.verifyKubernetesClusterState(cluster_response, 'Running') + self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + + def stopAndVerifyKubernetesCluster(self, cluster_id): + """Stop Kubernetes cluster and check if it is really stopped""" + + stop_response = self.stopKubernetesCluster(cluster_id) + + self.assertEqual( + stop_response.success, + True, + "Check KubernetesCluster stop response {}, {}".format(stop_response.success, True) + ) + + db_cluster_state = self.dbclient.execute("select state from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] + + self.assertEqual( + db_cluster_state, + 'Stopped', + "KubernetesCluster not stopped in DB, {}".format(db_cluster_state) + ) + + def deleteAndVerifyKubernetesCluster(self, cluster_id): + """Delete Kubernetes cluster and check if it is really deleted""" + + delete_response = self.deleteKubernetesCluster(cluster_id) + + self.assertEqual( + delete_response.success, + True, + "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) + ) + + db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] + + self.assertNotEqual( + db_cluster_removed, + None, + "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) + ) diff --git a/test/integration/smoke/test_kubernetes_supported_versions.py b/test/integration/smoke/test_kubernetes_supported_versions.py new file mode 100644 index 00000000000..3d699e4b676 --- /dev/null +++ b/test/integration/smoke/test_kubernetes_supported_versions.py @@ -0,0 +1,278 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Tests for Kubernetes supported version """ + +#Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.cloudstackAPI import (listInfrastructure, + listKubernetesSupportedVersions, + addKubernetesSupportedVersion, + deleteKubernetesSupportedVersion) +from marvin.cloudstackException import CloudstackAPIException +from marvin.codes import FAILED +from marvin.lib.base import Configurations +from marvin.lib.utils import (cleanup_resources, + random_gen) +from marvin.lib.common import get_zone +from marvin.sshClient import SshClient +from nose.plugins.attrib import attr + +import time + +_multiprocess_shared_ = True + +class TestKubernetesSupportedVersion(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestKubernetesSupportedVersion, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + cls.kubernetes_version_iso_url = 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.iso' + + cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient, + name="cloud.kubernetes.service.enabled")[0].value + if cls.initial_configuration_cks_enabled not in ["true", True]: + cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server") + Configurations.update(cls.apiclient, + "cloud.kubernetes.service.enabled", + "true") + cls.restartServer() + + cls._cleanup = [] + return + + @classmethod + def tearDownClass(cls): + try: + # Restore CKS enabled + if cls.initial_configuration_cks_enabled not in ["true", True]: + cls.debug("Restoring Kubernetes Service enabled value") + Configurations.update(cls.apiclient, + "cloud.kubernetes.service.enabled", + "false") + cls.restartServer() + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @classmethod + def restartServer(cls): + """Restart management server""" + + cls.debug("Restarting management server") + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + command = "service cloudstack-management stop" + sshClient.execute(command) + + command = "service cloudstack-management start" + sshClient.execute(command) + + #Waits for management to come up in 5 mins, when it's up it will continue + timeout = time.time() + 300 + while time.time() < timeout: + if cls.isManagementUp() is True: return + time.sleep(5) + return cls.fail("Management server did not come up, failing") + + @classmethod + def isManagementUp(cls): + try: + cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + def setUp(self): + self.services = self.testClient.getParsedTestDataConfig() + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + #Clean up, terminate the created templates + cleanup_resources(self.apiclient, self.cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_01_add_delete_kubernetes_supported_version(self): + """Test to add a new Kubernetes supported version + + # Validate the following: + # 1. addKubernetesSupportedVersion should return valid info for new version + # 2. The Cloud Database contains the valid information when listKubernetesSupportedVersions is called + """ + + version = '1.16.3' + name = 'v' + version + '-' + random_gen() + + self.debug("Adding Kubernetes supported version with name: %s" % name) + + version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url) + + list_versions_response = self.listKubernetesSupportedVersion(version_response.id) + + self.assertEqual( + list_versions_response.name, + name, + "Check KubernetesSupportedVersion name {}, {}".format(list_versions_response.name, name) + ) + + self.assertEqual( + list_versions_response.semanticversion, + version, + "Check KubernetesSupportedVersion version {}, {}".format(list_versions_response.semanticversion, version) + ) + self.assertEqual( + list_versions_response.zoneid, + self.zone.id, + "Check KubernetesSupportedVersion zone {}, {}".format(list_versions_response.zoneid, self.zone.id) + ) + + db_version_name = self.dbclient.execute("select name from kubernetes_supported_version where uuid = '%s';" % version_response.id)[0][0] + + self.assertEqual( + str(db_version_name), + name, + "Check KubernetesSupportedVersion name in DB {}, {}".format(db_version_name, name) + ) + + self.debug("Added Kubernetes supported version with ID: %s. Waiting for its ISO to be Ready" % version_response.id) + + self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id) + + self.debug("Deleting Kubernetes supported version with ID: %s" % version_response.id) + + delete_response = self.deleteKubernetesSupportedVersion(version_response.id, True) + + self.assertEqual( + delete_response.success, + True, + "Check KubernetesSupportedVersion deletion in DB {}, {}".format(delete_response.success, True) + ) + + db_version_removed = self.dbclient.execute("select removed from kubernetes_supported_version where uuid = '%s';" % version_response.id)[0][0] + + self.assertNotEqual( + db_version_removed, + None, + "KubernetesSupportedVersion not removed in DB" + ) + + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_02_add_unsupported_kubernetes_supported_version(self): + """Test to trying to add a new unsupported Kubernetes supported version + + # Validate the following: + # 1. API should return an error + """ + + version = '1.1.1' + name = 'v' + version + '-' + random_gen() + try: + version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url) + self.debug("Unsupported CKS Kubernetes supported added with ID: %s. Deleting it and failing test." % version_response.id) + self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id) + self.deleteKubernetesSupportedVersion(version_response.id, True) + self.fail("Kubernetes supported version below version 1.11.0 been added. Must be an error.") + except CloudstackAPIException as e: + self.debug("Unsupported version error check successful, API failure: %s" % e) + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_03_add_invalid_kubernetes_supported_version(self): + """Test to trying to add a new unsupported Kubernetes supported version + + # Validate the following: + # 1. API should return an error + """ + + version = 'invalid' + name = 'v' + version + '-' + random_gen() + try: + version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url) + self.debug("Invalid Kubernetes supported added with ID: %s. Deleting it and failing test." % version_response.id) + self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id) + self.deleteKubernetesSupportedVersion(version_response.id, True) + self.fail("Invalid Kubernetes supported version has been added. Must be an error.") + except CloudstackAPIException as e: + self.debug("Unsupported version error check successful, API failure: %s" % e) + return + + def addKubernetesSupportedVersion(self, version, name, zoneId, isoUrl): + addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd() + addKubernetesSupportedVersionCmd.semanticversion = version + addKubernetesSupportedVersionCmd.name = name + addKubernetesSupportedVersionCmd.zoneid = zoneId + addKubernetesSupportedVersionCmd.url = isoUrl + addKubernetesSupportedVersionCmd.mincpunumber = 2 + addKubernetesSupportedVersionCmd.minmemory = 2048 + versionResponse = self.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd) + if not versionResponse: + self.cleanup.append(versionResponse) + return versionResponse + + def listKubernetesSupportedVersion(self, versionId): + listKubernetesSupportedVersionsCmd = listKubernetesSupportedVersions.listKubernetesSupportedVersionsCmd() + listKubernetesSupportedVersionsCmd.id = versionId + versionResponse = self.apiclient.listKubernetesSupportedVersions(listKubernetesSupportedVersionsCmd) + return versionResponse[0] + + def deleteKubernetesSupportedVersion(self, cmd): + response = self.apiclient.deleteKubernetesSupportedVersion(cmd) + return response + + def deleteKubernetesSupportedVersion(self, versionId, deleteIso): + deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd() + deleteKubernetesSupportedVersionCmd.id = versionId + deleteKubernetesSupportedVersionCmd.deleteiso = deleteIso + response = self.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd) + return response + + def waitForKubernetesSupportedVersionIsoReadyState(self, version_id, retries=20, interval=30): + """Check if Kubernetes supported version ISO is in Ready state""" + + while retries > -1: + time.sleep(interval) + list_versions_response = self.listKubernetesSupportedVersion(version_id) + if not hasattr(list_versions_response, 'isostate') or not list_versions_response or not list_versions_response.isostate: + retries = retries - 1 + continue + if 'Creating' == list_versions_response.isostate: + retries = retries - 1 + elif 'Ready' == list_versions_response.isostate: + return + else: + raise Exception( + "Failed to download Kubernetes supported version ISO: status - %s" % + list_versions_response.isostate) + raise Exception("Kubernetes supported version Ready state timed out") diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 39a41239c2a..068b6850021 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -191,7 +191,9 @@ known_categories = { 'Management': 'Management', 'Backup' : 'Backup and Recovery', 'Restore' : 'Backup and Recovery', - 'UnmanagedInstance': 'Virtual Machine' + 'UnmanagedInstance': 'Virtual Machine', + 'KubernetesSupportedVersion': 'Kubernetes Service', + 'KubernetesCluster': 'Kubernetes Service' } diff --git a/ui/l10n/en.js b/ui/l10n/en.js index fcb6f1eb9aa..36f6d6d42f4 100644 --- a/ui/l10n/en.js +++ b/ui/l10n/en.js @@ -92,6 +92,7 @@ var dictionary = { "label.about":"About", "label.about.app":"About CloudStack", "label.accept.project.invitation":"Accept project invitation", +"label.access":"Access", "label.account":"Account", "label.accounts":"Accounts", "label.account.and.security.group":"Account, Security group", @@ -359,6 +360,8 @@ var dictionary = { "label.add.isolated.guest.network":"Add Isolated Guest Network", "label.add.isolated.guest.network.with.sourcenat":"Add Isolated Guest Network with SourceNat", "label.add.isolated.network":"Add Isolated Network", +"label.add.kubernetes.cluster":"Add Kubernetes Cluster", +"label.add.kubernetes.version":"Add Kubernetes Version", "label.add.l2.guest.network":"Add L2 Guest Network", "label.add.ldap.account":"Add LDAP account", "label.add.list.name":"ACL List Name", @@ -372,6 +375,7 @@ var dictionary = { "label.add.network.device":"Add Network Device", "label.add.network.offering":"Add network offering", "label.add.new.F5":"Add new F5", +"label.add.new.iso":"Add new ISO", "label.add.new.NetScaler":"Add new NetScaler", "label.add.new.PA":"Add new Palo Alto", "label.add.new.SRX":"Add new SRX", @@ -450,6 +454,7 @@ var dictionary = { "label.allocated":"Allocated", "label.allocation.state":"Allocation State", "label.allow":"Allow", +"label.all.zones":"All zones", "label.annotated.by":"Annotator", "label.annotation":"Annotation", "label.anti.affinity":"Anti-affinity", @@ -556,6 +561,8 @@ var dictionary = { "label.cloud.managed":"Cloud.com Managed", "label.cluster":"Cluster", "label.cluster.name":"Cluster Name", +"label.cluster.size":"Cluster size", +"label.cluster.size.worker.nodes":"Cluster size (Worker nodes)", "label.cluster.type":"Cluster Type", "label.clusters":"Clusters", "label.clvm":"CLVM", @@ -614,6 +621,7 @@ var dictionary = { "label.day":"Day", "label.day.of.month":"Day of Month", "label.day.of.week":"Day of Week", +"label.dashboard.endpoint":"Dashboard endpoint", "label.dc.name":"DC Name", "label.dead.peer.detection":"Dead Peer Detection", "label.decline.invitation":"Decline invitation", @@ -650,6 +658,8 @@ var dictionary = { "label.delete.events":"Delete events", "label.delete.gateway":"Delete gateway", "label.delete.internal.lb":"Delete Internal LB", +"label.delete.iso":"Delete ISO", +"label.delete.kubernetes.version":"Delete Kubernetes version", "label.delete.portable.ip.range":"Delete Portable IP Range", "label.delete.profile":"Delete Profile", "label.delete.project":"Delete project", @@ -667,6 +677,7 @@ var dictionary = { "label.destination.physical.network.id":"Destination physical network ID", "label.destination.zone":"Destination Zone", "label.destroy":"Destroy", +"label.destroy.kubernetes.cluster":"Destroy Kubernetes cluster", "label.destroy.router":"Destroy router", "label.destroy.vm.graceperiod":"Destroy VM Grace Period", "label.detaching.disk":"Detaching Disk", @@ -732,6 +743,7 @@ var dictionary = { "label.domain.suffix":"DNS Domain Suffix (i.e., xyz.com)", "label.done":"Done", "label.double.quotes.are.not.allowed":"Double quotes are not allowed", +"label.download.kubernetes.cluster.config":"Download Kubernetes cluster config", "label.download.progress":"Download Progress", "label.drag.new.position":"Drag to new position", "label.duration.in.sec":"Duration (in sec)", @@ -790,6 +802,7 @@ var dictionary = { "label.expunge":"Expunge", "label.external.id":"External ID", "label.external.link":"External link", +'label.external.loadbalancer.ip.address': "External load balancer IP address", "label.extractable":"Extractable", "label.extractable.lower":"extractable", "label.f5":"F5", @@ -964,6 +977,9 @@ var dictionary = { "label.iscsi":"iSCSI", "label.iso":"ISO", "label.iso.boot":"ISO Boot", +"label.iso.id":"ISO ID", +"label.iso.name":"ISO name", +"label.iso.state":"ISO state", "label.isolated.networks":"Isolated networks", "label.isolation.method":"Isolation method", "label.isolation.mode":"Isolation Mode", @@ -975,6 +991,11 @@ var dictionary = { "label.key":"Key", "label.keyboard.language":"Keyboard language", "label.keyboard.type":"Keyboard type", +"label.kubernetes.cluster":"Kubernetes cluster", +"label.kubernetes.cluster.details":"Kubernetes cluster details", +"label.kubernetes.service":"Kubernetes Service", +"label.kubernetes.version":"Kubernetes version", +"label.kubernetes.version.details":"Kubernetes version details", "label.kvm.traffic.label":"KVM traffic label", "label.label":"Label", "label.lang.arabic":"Arabic", @@ -1042,6 +1063,7 @@ var dictionary = { "label.mac.address": "MAC Address", "label.management.servers":"Management Servers", "label.mac.address.changes":"MAC Address Changes", +"label.master.nodes":"Master nodes", "label.max.cpus":"Max. CPU cores", "label.max.guest.limit":"Max guest limit", "label.max.instances":"Max Instances", @@ -1246,6 +1268,7 @@ var dictionary = { "label.no.items":"No Available Items", "label.no.security.groups":"No Available Security Groups", "label.no.thanks":"No thanks", +"label.node.root.disk.size.gb":"Node root disk size (in GB)", "label.none":"None", "label.not.found":"Not Found", "label.notifications":"Notifications", @@ -1352,6 +1375,7 @@ var dictionary = { "label.private.key":"Private Key", "label.private.network":"Private network", "label.private.port":"Private Port", +"label.private.registry":"Private registry", "label.private.zone":"Private Zone", "label.privatekey":"PKCS#8 Private Key", "label.privatekey.name":"Private Key", @@ -1549,6 +1573,7 @@ var dictionary = { "label.save.and.continue":"Save and continue", "label.save.changes":"Save changes", "label.saving.processing":"Saving....", +"label.scale.kubernetes.cluster":"Scale Kubernetes cluster", "label.scale.up.policy":"SCALE UP POLICY", "label.scaledown.policy":"ScaleDown Policy", "label.scaleup.policy":"ScaleUp Policy", @@ -1589,6 +1614,7 @@ var dictionary = { "label.select.template":"Select Template", "label.select.tier":"Select Tier", "label.select.vm.for.static.nat":"Select VM for static NAT", +"label.semantic.version":"Semantic version", "label.sent":"Sent", "label.server":"Server", "label.service.capabilities":"Service Capabilities", @@ -1639,6 +1665,7 @@ var dictionary = { "label.sslcertificates":"SSL Certificates", "label.standard.us.keyboard":"Standard (US) keyboard", "label.start.IP":"Start IP", +"label.start.kuberentes.cluster":"Start Kubernetes cluster", "label.start.lb.vm":"Start LB VM", "label.start.port":"Start Port", "label.start.reserved.system.IP":"Start Reserved system IP", @@ -1679,6 +1706,7 @@ var dictionary = { "label.sticky.request-learn":"Request learn", "label.sticky.tablesize":"Table size", "label.stop":"Stop", +"label.stop.kuberentes.cluster":"Stop Kubernetes cluster", "label.stop.lb.vm":"Stop LB VM", "label.stopped.vms":"Stopped VMs", "label.storage":"Storage", @@ -1757,11 +1785,13 @@ var dictionary = { "label.unhealthy.threshold":"Unhealthy Threshold", "label.unlimited":"Unlimited", "label.untagged":"Untagged", +"label.update.kubernetes.version":"Update Kubernetes Version", "label.update.project.resources":"Update project resources", "label.update.ssl":" SSL Certificate", "label.update.ssl.cert":" SSL Certificate", "label.update.vmware.datacenter":"Update VMware datacenter", "label.updating":"Updating", +"label.upgrade.kubernetes.cluster":"Upgrade Kubernetes cluster", "label.upgrade.required":"Upgrade is required", "label.upgrade.router.newer.template":"Upgrade Router to Use Newer Template", "label.upload":"Upload", @@ -1790,6 +1820,7 @@ var dictionary = { "label.username.lower":"username", "label.users":"Users", "label.uuid":"UUID", +"label.versions":"Versions", "label.vSwitch.type":"vSwitch Type", "label.value":"Value", "label.vcdcname":"vCenter DC name", @@ -2096,8 +2127,10 @@ var dictionary = { "message.confirm.delete.ciscoASA1000v":"Please confirm you want to delete CiscoASA1000v", "message.confirm.delete.ciscovnmc.resource":"Please confirm you want to delete CiscoVNMC resource", "message.confirm.delete.internal.lb":"Please confirm you want to delete Internal LB", +"message.confirm.delete.kubernetes.version":"Please confirm that you want to delete this Kubernetes version.", "message.confirm.delete.secondary.staging.store":"Please confirm you want to delete Secondary Staging Store.", "message.confirm.delete.ucs.manager":"Please confirm that you want to delete UCS Manager", +"message.confirm.destroy.kubernetes.cluster":"Please confirm that you want to destroy this Kubernetes cluster.", "message.confirm.destroy.router":"Please confirm that you would like to destroy this router", "message.confirm.disable.host":"Please confirm that you want to disable the host", "message.confirm.disable.network.offering":"Are you sure you want to disable this network offering?", @@ -2130,7 +2163,9 @@ var dictionary = { "message.confirm.scale.up.router.vm":"Do you really want to scale up the Router VM ?", "message.confirm.scale.up.system.vm":"Do you really want to scale up the system VM ?", "message.confirm.shutdown.provider":"Please confirm that you would like to shutdown this provider", +"message.confirm.start.kubernetes.cluster":"Please confirm that you want to start this Kubernetes cluster.", "message.confirm.start.lb.vm":"Please confirm you want to start LB VM", +"message.confirm.stop.kubernetes.cluster":"Please confirm that you want to stop this Kubernetes cluster.", "message.confirm.stop.lb.vm":"Please confirm you want to stop LB VM", "message.confirm.upgrade.router.newer.template":"Please confirm that you want to upgrade router to use newer template", "message.confirm.upgrade.routers.account.newtemplate":"Please confirm that you want to upgrade all routers in this account to use newer template", diff --git a/ui/plugins/cks/cks.css b/ui/plugins/cks/cks.css new file mode 100644 index 00000000000..acdd1e64cd7 --- /dev/null +++ b/ui/plugins/cks/cks.css @@ -0,0 +1,43 @@ +/*[fmt]1C20-1C0D-E*/ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. +*/ + +.downloadKubernetesClusterKubeConfig .icon { + background-position: -35px -125px; +} + +.downloadKubernetesClusterKubeConfig:hover .icon { + background-position: -35px -707px; +} + +.scaleKubernetesCluster .icon { + background-position: -264px -2px; +} + +.scaleKubernetesCluster:hover .icon { + background-position: -263px -583px; +} + +.upgradeKubernetesCluster .icon { + background-position: -138px -65px; +} + +.upgradeKubernetesCluster:hover .icon { + background-position: -138px -647px; +} diff --git a/ui/plugins/cks/cks.js b/ui/plugins/cks/cks.js new file mode 100644 index 00000000000..c353c24d093 --- /dev/null +++ b/ui/plugins/cks/cks.js @@ -0,0 +1,1581 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +(function (cloudStack) { + + var rootCaCert = ""; + var downloadCaCert = function() { + var blob = new Blob([rootCaCert], {type: 'application/x-x509-ca-cert'}); + var filename = "cloudstack-ca.pem"; + if(window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveBlob(blob, filename); + } else{ + var elem = window.document.createElement('a'); + elem.href = window.URL.createObjectURL(blob); + elem.download = filename; + document.body.appendChild(elem) + elem.click(); + document.body.removeChild(elem); + } + }; + var clusterKubeConfig = ""; + var downloadClusterKubeConfig = function() { + var blob = new Blob([clusterKubeConfig], {type: 'text/plain'}); + var filename = "kube.conf"; + if(window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveBlob(blob, filename); + } else{ + var elem = window.document.createElement('a'); + elem.href = window.URL.createObjectURL(blob); + elem.download = filename; + document.body.appendChild(elem) + elem.click(); + document.body.removeChild(elem); + } + }; + var minCpu = 0; + var minRamSize = 0; + cloudStack.plugins.cks = function(plugin) { + plugin.ui.addSection({ + id: 'cks', + title: 'label.kubernetes.service', + preFilter: function(args) { + var pluginEnabled = false; + $.ajax({ + url: createURL('listCapabilities'), + async: false, + success: function(json) { + pluginEnabled = json.listcapabilitiesresponse.capability.kubernetesserviceenabled; + }, + error: function(XMLHttpResponse) { + pluginEnabled = false; + } + }); + return pluginEnabled; + }, + showOnNavigation: true, + sectionSelect: { + label: 'label.select-view', + preFilter: function() { + return ['kubernetesclusters', 'kubernetesversions']; + } + }, + sections: { + kubernetesclusters: { + id: 'kubernetesclusters', + type: 'select', + title: "label.clusters", + listView: { + filters: { + all: { + label: 'ui.listView.filters.all' + }, + running: { + label: 'state.Running' + }, + stopped: { + label: 'state.Stopped' + }, + destroyed: { + label: 'state.Destroyed' + } + }, + fields: { + name: { + label: 'label.name' + }, + zonename: { + label: 'label.zone.name' + }, + size : { + label: 'label.size' + }, + cpunumber: { + label: 'label.num.cpu.cores' + }, + memory: { + label: 'label.memory.mb' + }, + state: { + label: 'label.state', + indicator: { + 'Running': 'on', + 'Stopped': 'off', + 'Destroyed': 'off', + 'Error': 'off' + } + } + }, + advSearchFields: { + name: { + label: 'label.name' + }, + zoneid: { + label: 'label.zone', + select: function(args) { + $.ajax({ + url: createURL('listZones'), + data: { + listAll: true + }, + success: function(json) { + var zones = json.listzonesresponse.zone ? json.listzonesresponse.zone : []; + + args.response.success({ + data: $.map(zones, function(zone) { + return { + id: zone.id, + description: zone.name + }; + }) + }); + } + }); + } + }, + }, + // List view actions + actions: { + add: { + label: 'label.add.kubernetes.cluster', + createForm: { + title: 'label.add.kubernetes.cluster', + preFilter: function(args) { + args.$form.find('.form-item[rel=masternodes]').find('input[name=masternodes]').val('2'); + args.$form.find('.form-item[rel=size]').find('input[name=size]').val('1'); + var experimentalFeaturesEnabled = false; + $.ajax({ + url: createURL('listCapabilities'), + async: false, + success: function(json) { + experimentalFeaturesEnabled = json.listcapabilitiesresponse.capability.kubernetesclusterexperimentalfeaturesenabled; + } + }); + if (experimentalFeaturesEnabled == true) { + args.$form.find('.form-item[rel=supportPrivateRegistry]').css('display', 'inline-block'); + } + }, + fields: { + name: { + label: 'label.name', + //docID: 'Name of the cluster', + validation: { + required: true + } + }, + description: { + label: 'label.description', + //docID: 'helpKubernetesClusterDesc', + validation: { + required: true + } + }, + zone: { + label: 'label.zone', + //docID: 'helpKubernetesClusterZone', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listZones&available=true"), + dataType: "json", + async: true, + success: function(json) { + var items = []; + var zoneObjs = json.listzonesresponse.zone; + if (zoneObjs != null) { + for (var i = 0; i < zoneObjs.length; i++) { + items.push({ + id: zoneObjs[i].id, + description: zoneObjs[i].name + }); + } + } + args.response.success({ + data: items + }); + } + }); + } + }, + kubernetesversion: { + label: 'label.kubernetes.version', + dependsOn: ['zone'], + //docID: 'helpKubernetesClusterZone', + validation: { + required: true + }, + select: function(args) { + var versionObjs; + var filterData = { zoneid: args.zone }; + $.ajax({ + url: createURL("listKubernetesSupportedVersions"), + data: filterData, + dataType: "json", + async: true, + success: function(json) { + var items = []; + versionObjs = json.listkubernetessupportedversionsresponse.kubernetessupportedversion; + if (versionObjs != null) { + for (var i = 0; i < versionObjs.length; i++) { + if (versionObjs[i].state == 'Enabled' && versionObjs[i].isostate == 'Ready') { + items.push({ + id: versionObjs[i].id, + description: versionObjs[i].name + }); + } + } + } + args.response.success({ + data: items + }); + } + }); + + args.$select.change(function() { + var $form = $(this).closest("form"); + $form.find('.form-item[rel=multimaster]').find('input[name=multimaster]').prop('checked', false); + $form.find('.form-item[rel=multimaster]').hide(); + $form.find('.form-item[rel=masternodes]').hide(); + var currentVersionId = $(this).val(); + if (currentVersionId != null && versionObjs != null) { + for (var i = 0; i < versionObjs.length; i++) { + if (currentVersionId == versionObjs[i].id) { + if (versionObjs[i].supportsha === true) { + $form.find('.form-item[rel=multimaster]').css('display', 'inline-block'); + } + minCpu = 0; + if (versionObjs[i].mincpunumber != null && versionObjs[i].mincpunumber != undefined) { + minCpu = versionObjs[i].mincpunumber; + } + minRamSize = 0; + if (versionObjs[i].minmemory != null && versionObjs[i].minmemory != undefined) { + minRamSize = versionObjs[i].minmemory; + } + break; + } + } + } + }); + } + }, + serviceoffering: { + label: 'label.menu.service.offerings', + dependsOn: ['kubernetesversion'], + //docID: 'helpKubernetesClusterServiceOffering', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listServiceOfferings"), + dataType: "json", + async: true, + success: function(json) { + var offeringObjs = []; + var items = json.listserviceofferingsresponse.serviceoffering; + if (items != null) { + for (var i = 0; i < items.length; i++) { + if (items[i].iscustomized == false && + items[i].cpunumber >= minCpu && items[i].memory >= minRamSize) { + offeringObjs.push({ + id: items[i].id, + description: items[i].name + }); + } + } + } + args.response.success({ + data: offeringObjs + }); + } + }); + } + }, + noderootdisksize: { + label: 'label.node.root.disk.size.gb', + //docID: 'helpKubernetesClusterNodeRootDiskSize', + validation: { + number: true + } + }, + network: { + label: 'label.network', + //docID: 'helpKubernetesClusterNetwork', + select: function(args) { + $.ajax({ + url: createURL("listNetworks"), + dataType: "json", + async: true, + success: function(json) { + var networkObjs = []; + networkObjs.push({ + id: "", + description: "" + }); + var items = json.listnetworksresponse.network; + if (items != null) { + for (var i = 0; i < items.length; i++) { + networkObjs.push({ + id: items[i].id, + description: items[i].name + }); + } + } + args.response.success({ + data: networkObjs + }); + } + }); + } + }, + multimaster: { + label: "label.ha.enabled", + dependsOn: 'kubernetesversion', + isBoolean: true, + isChecked: false, + }, + masternodes: { + label: 'label.master.nodes', + //docID: 'helpKubernetesClusterSize', + validation: { + required: true, + multiplecountnumber: true + }, + dependsOn: "multimaster", + isHidden: true, + }, + externalloadbalanceripaddress: { + label: 'label.external.loadbalancer.ip.address', + validation: { + ipv4AndIpv6AddressValidator: true + }, + dependsOn: "multimaster", + isHidden: true, + }, + size: { + label: 'label.cluster.size.worker.nodes', + //docID: 'helpKubernetesClusterSize', + validation: { + required: true, + naturalnumber: true + }, + }, + sshkeypair: { + label: 'label.ssh.key.pair', + //docID: 'helpKubernetesClusterSSH', + select: function(args) { + $.ajax({ + url: createURL("listSSHKeyPairs"), + dataType: "json", + async: true, + success: function(json) { + var keypairObjs = []; + keypairObjs.push({ + id: "", + description: "" + }); + var items = json.listsshkeypairsresponse.sshkeypair; + if (items != null) { + for (var i = 0; i < items.length; i++) { + keypairObjs.push({ + id: items[i].name, + description: items[i].name + }); + } + } + args.response.success({ + data: keypairObjs + }); + } + }); + } + }, + supportPrivateRegistry: { + label: 'label.private.registry', + isBoolean: true, + isChecked: false, + isHidden: true + }, + username: { + label: 'label.username', + dependsOn: 'supportPrivateRegistry', + validation: { + required: true + }, + isHidden: true + }, + password: { + label: 'label.password', + dependsOn: 'supportPrivateRegistry', + validation: { + required: true + }, + isHidden: true, + isPassword: true + }, + url: { + label: 'label.url', + dependsOn: 'supportPrivateRegistry', + validation: { + required: true + }, + isHidden: true, + }, + email: { + label: 'label.email', + dependsOn: 'supportPrivateRegistry', + validation: { + required: true + }, + isHidden: true, + } + } + }, + + action: function(args) { + var data = { + name: args.data.name, + description: args.data.description, + zoneid: args.data.zone, + kubernetesversionid: args.data.kubernetesversion, + serviceofferingid: args.data.serviceoffering, + size: args.data.size, + keypair: args.data.sshkeypair + }; + + if (args.data.noderootdisksize != null && args.data.noderootdisksize != "" && args.data.noderootdisksize > 0) { + $.extend(data, { + noderootdisksize: args.data.noderootdisksize + }); + } + + var masterNodes = 1; + if (args.data.multimaster === 'on') { + masterNodes = args.data.masternodes; + if (args.data.externalloadbalanceripaddress != null && args.data.externalloadbalanceripaddress != "") { + $.extend(data, { + externalloadbalanceripaddress: args.data.externalloadbalanceripaddress + }); + } + } + $.extend(data, { + masternodes: masterNodes + }); + + if (args.data.supportPrivateRegistry) { + $.extend(data, { + dockerregistryusername: args.data.username, + dockerregistrypassword: args.data.password, + dockerregistryurl: args.data.url, + dockerregistryemail: args.data.email + }); + } + + if (args.data.network != null && args.data.network.length > 0) { + $.extend(data, { + networkid: args.data.network + }); + } + $.ajax({ + url: createURL('createKubernetesCluster'), + data: data, + success: function(json) { + var jid = json.createkubernetesclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid + } + }); + }, + error: function(XMLHttpResponse) { + var errorMsg = parseXMLHttpResponse(XMLHttpResponse); + args.response.error(errorMsg); + } + }); + }, + + + messages: { + notification: function(args) { + return 'Kubernetes Cluster Add'; + } + }, + notification: { + poll: pollAsyncJobResult + } + } + }, + dataProvider: function(args) { + var data = { + page: args.page, + pagesize: pageSize + }; + listViewDataProvider(args, data); + if (args.filterBy != null) { //filter dropdown + if (args.filterBy.kind != null) { + switch (args.filterBy.kind) { + case "all": + break; + case "running": + $.extend(data, { + state: 'Running' + }); + break; + case "stopped": + $.extend(data, { + state: 'Stopped' + }); + break; + case "destroyed": + $.extend(data, { + state: 'Destroyed' + }); + break; + } + } + } + + $.ajax({ + url: createURL("listKubernetesClusters"), + data: data, + dataType: "json", + sync: true, + success: function(json) { + var items = []; + if (json.listkubernetesclustersresponse.kubernetescluster != null) { + items = json.listkubernetesclustersresponse.kubernetescluster; + } + args.response.success({ + actionFilter: cksActionfilter, + data: items + }); + } + }); + }, + + detailView: { + name: 'label.kubernetes.cluster.details', + isMaximized: true, + actions: { + start: { + label: 'label.start.kuberentes.cluster', + action: function(args) { + $.ajax({ + url: createURL("startKubernetesCluster"), + data: {"id": args.context.kubernetesclusters[0].id}, + dataType: "json", + async: true, + success: function(json) { + var jid = json.startkubernetesclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid + } + }); + } + }); + }, + messages: { + confirm: function(args) { + return 'message.confirm.start.kubernetes.cluster'; + }, + notification: function(args) { + return 'Started Kubernetes cluster.'; + } + }, + notification: { + poll: pollAsyncJobResult + } + }, + stop: { + label: 'label.stop.kuberentes.cluster', + action: function(args) { + $.ajax({ + url: createURL("stopKubernetesCluster"), + data: {"id": args.context.kubernetesclusters[0].id}, + dataType: "json", + async: true, + success: function(json) { + var jid = json.stopkubernetesclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid + } + }); + } + }); + }, + messages: { + confirm: function(args) { + return 'message.confirm.stop.kubernetes.cluster'; + }, + notification: function(args) { + return 'Stopped Kubernetes cluster.'; + } + }, + notification: { + poll: pollAsyncJobResult + } + }, + destroy: { + label: 'label.destroy.kubernetes.cluster', + compactLabel: 'label.destroy', + createForm: { + title: 'label.destroy.kubernetes.cluster', + desc: 'label.destroy.kubernetes.cluster', + isWarning: true, + fields: { + } + }, + messages: { + confirm: function(args) { + return 'message.confirm.destroy.kubernetes.cluster'; + }, + notification: function(args) { + return 'Destroyed Kubernetes cluster.'; + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesclusters[0].id + }; + $.ajax({ + url: createURL('deleteKubernetesCluster'), + data: data, + dataType: "json", + async: true, + success: function(json) { + args.response.success({ + _custom: { + jobId: json.deletekubernetesclusterresponse.jobid, + getUpdatedItem: function(json) { + return { 'toRemove': true }; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + downloadKubernetesClusterKubeConfig: { + label: 'label.download.kubernetes.cluster.config', + messages: { + notification: function(args) { + return 'label.download.kubernetes.cluster.config'; + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesclusters[0].id + } + $.ajax({ + url: createURL("getKubernetesClusterConfig"), + dataType: "json", + data: data, + async: false, + success: function(json) { + var jsonObj; + if (json.getkubernetesclusterconfigresponse.clusterconfig != null && + json.getkubernetesclusterconfigresponse.clusterconfig.configdata != null ) { + jsonObj = json.getkubernetesclusterconfigresponse.clusterconfig; + clusterKubeConfig = jsonObj.configdata; + downloadClusterKubeConfig(); + args.response.success({}); + } else { + args.response.error("Unable to retrieve Kubernetes cluster config"); + } + }, + error: function(XMLHttpResponse) { + var errorMsg = parseXMLHttpResponse(XMLHttpResponse); + args.response.error(errorMsg); + } + }); + }, + notification: { + poll: function(args) { + args.complete(); + } + } + }, + scaleKubernetesCluster: { + label: 'label.scale.kubernetes.cluster', + messages: { + notification: function(args) { + return 'label.scale.kubernetes.cluster'; + } + }, + createForm: { + title: 'label.scale.kubernetes.cluster', + desc: '', + preFilter: function(args) { + var options = args.$form.find('.form-item[rel=serviceoffering]').find('option'); + $.each(options, function(optionIndex, option) { + if ($(option).val() === args.context.kubernetesclusters[0].serviceofferingid) { + $(option).attr('selected','selected'); + } + }); + args.$form.find('.form-item[rel=size]').find('input[name=size]').val(args.context.kubernetesclusters[0].size); + }, + fields: { + serviceoffering: { + label: 'label.menu.service.offerings', + //docID: 'helpKubernetesClusterServiceOffering', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listKubernetesSupportedVersions"), + data: {id: args.context.kubernetesclusters[0].kubernetesversionid}, + dataType: "json", + async: false, + success: function(json) { + var versionObjs = json.listkubernetessupportedversionsresponse.kubernetessupportedversion; + if (versionObjs != null && versionObjs.length > 0) { + minCpu = 0; + if (versionObjs[0].mincpunumber != null && versionObjs[0].mincpunumber != undefined) { + minCpu = versionObjs[0].mincpunumber; + } + minRamSize = 0; + if (versionObjs[0].minmemory != null && versionObjs[0].minmemory != undefined) { + minRamSize = versionObjs[0].minmemory; + } + } + } + }); + $.ajax({ + url: createURL("listServiceOfferings"), + dataType: "json", + async: true, + success: function(json) { + var offeringObjs = []; + var items = json.listserviceofferingsresponse.serviceoffering; + if (items != null) { + for (var i = 0; i < items.length; i++) { + if (items[i].iscustomized == false && + items[i].cpunumber >= minCpu && items[i].memory >= minRamSize) { + offeringObjs.push({ + id: items[i].id, + description: items[i].name + }); + } + } + } + args.response.success({ + data: offeringObjs + }); + } + }); + } + }, + size: { + label: 'label.cluster.size', + //docID: 'helpKubernetesClusterSize', + validation: { + required: true, + number: true + }, + } + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesclusters[0].id, + serviceofferingid: args.data.serviceoffering, + size: args.data.size + }; + $.ajax({ + url: createURL('scaleKubernetesCluster'), + data: data, + dataType: "json", + success: function (json) { + var jid = json.scalekubernetesclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return cksActionfilter; + } + } + }); + } + }); //end ajax + }, + notification: { + poll: pollAsyncJobResult + } + }, + upgradeKubernetesCluster: { + label: 'label.upgrade.kubernetes.cluster', + messages: { + notification: function(args) { + return 'label.upgrade.kubernetes.cluster'; + } + }, + createForm: { + title: 'label.upgrade.kubernetes.cluster', + desc: '', + preFilter: function(args) {}, + fields: { + kubernetesversion: { + label: 'label.kubernetes.version', + //docID: 'helpKubernetesClusterZone', + validation: { + required: true + }, + select: function(args) { + var filterData = { minimumkubernetesversionid: args.context.kubernetesclusters[0].kubernetesversionid }; + $.ajax({ + url: createURL("listKubernetesSupportedVersions"), + data: filterData, + dataType: "json", + async: true, + success: function(json) { + var items = []; + var versionObjs = json.listkubernetessupportedversionsresponse.kubernetessupportedversion; + if (versionObjs != null) { + var clusterVersion = null; + for (var j = 0; j < versionObjs.length; j++) { + if (versionObjs[j].id == args.context.kubernetesclusters[0].kubernetesversionid) { + clusterVersion = versionObjs[j]; + break; + } + } + for (var i = 0; i < versionObjs.length; i++) { + if (versionObjs[i].id != args.context.kubernetesclusters[0].kubernetesversionid && + (clusterVersion == null || (clusterVersion != null && versionObjs[i].semanticversion != clusterVersion.semanticversion)) && + versionObjs[i].state == 'Enabled' && versionObjs[i].isostate == 'Ready') { + items.push({ + id: versionObjs[i].id, + description: versionObjs[i].name + }); + } + } + } + args.response.success({ + data: items + }); + } + }); + } + }, + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesclusters[0].id, + kubernetesversionid: args.data.kubernetesversion + }; + $.ajax({ + url: createURL('upgradeKubernetesCluster'), + data: data, + dataType: "json", + success: function (json) { + var jid = json.upgradekubernetesclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return cksActionfilter; + } + } + }); + } + }); //end ajax + }, + notification: { + poll: pollAsyncJobResult + } + }, + }, + tabs: { + // Details tab + details: { + title: 'label.details', + fields: [{ + id: { + label: 'label.id' + }, + name: { + label: 'label.name' + }, + zonename: { + label: 'label.zone.name' + }, + kubernetesversionname: { + label: 'label.kubernetes.version' + }, + masternodes : { + label: 'label.master.nodes' + }, + size : { + label: 'label.cluster.size' + }, + cpunumber: { + label: 'label.num.cpu.cores' + }, + memory: { + label: 'label.memory.mb' + }, + state: { + label: 'label.state', + }, + serviceofferingname: { + label: 'label.compute.offering' + }, + associatednetworkname: { + label: 'label.network' + }, + keypair: { + label: 'label.ssh.key.pair' + } + }], + + dataProvider: function(args) { + $.ajax({ + url: createURL("listKubernetesClusters&id=" + args.context.kubernetesclusters[0].id), + dataType: "json", + async: true, + success: function(json) { + var jsonObj; + if (json.listkubernetesclustersresponse.kubernetescluster != null && json.listkubernetesclustersresponse.kubernetescluster.length > 0) { + jsonObj = json.listkubernetesclustersresponse.kubernetescluster[0]; + } + args.response.success({ + actionFilter: cksActionfilter, + data: jsonObj + }); + } + }); + } + }, + clusteraccess: { + title: 'label.access', + custom : function (args) { + var showAccess = function() { + var state = args.context.kubernetesclusters[0].state; + if (state == "Created") { // Created + return jQuery('

').html("Kubernetes cluster setup is under progress, please check again in few minutes."); + } else if (state == "Error") { // Error + return jQuery('

').html("Kubernetes cluster is in error state, it cannot be accessed."); + } else if (state == "Destroying") { // Destroying + return jQuery('

').html("Kubernetes cluster is in destroying state, it cannot be accessed."); + } else if (state == "Destroyed") { // Destroyed + return jQuery('

').html("Kubernetes cluster is already destroyed, it cannot be accessed."); + } + var data = { + id: args.context.kubernetesclusters[0].kubernetesversionid + } + var version = ''; + $.ajax({ + url: createURL("listKubernetesSupportedVersions"), + dataType: "json", + data: data, + async: false, + success: function(json) { + var jsonObj; + if (json.listkubernetessupportedversionsresponse.kubernetessupportedversion != null) { + version = json.listkubernetessupportedversionsresponse.kubernetessupportedversion[0].semanticversion; + } + } + }); + return jQuery('

').html("Access Kubernetes cluster

Download cluster's kubeconfig file using action from Details tab.
Download kubectl tool for cluster's Kubernetes version from,
Linux: https://storage.googleapis.com/kubernetes-release/release/v" + version + "/bin/linux/amd64/kubectl
MacOS: https://storage.googleapis.com/kubernetes-release/release/v" + version + "/bin/darwin/amd64/kubectl
Windows: https://storage.googleapis.com/kubernetes-release/release/v" + version + "/bin/windows/amd64/kubectl.exe

Using kubectl and kubeconfig file to access cluster
kubectl --kubeconfig /custom/path/kube.conf {COMMAND}

List pods
kubectl --kubeconfig /custom/path/kube.conf get pods --all-namespaces
List nodes
kubectl --kubeconfig /custom/path/kube.conf get nodes --all-namespaces
List services
kubectl --kubeconfig /custom/path/kube.conf get services --all-namespaces

Access dashboard web UI
Run proxy locally
kubectl --kubeconfig /custom/path/kube.conf proxy
Open URL in browser
http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

Token for dashboard login can be retrieved using following command
kubectl --kubeconfig /custom/path/kube.conf describe secret $(kubectl --kubeconfig /custom/path/kube.conf get secrets -n kubernetes-dashboard | grep kubernetes-dashboard-token | awk '{print $1}') -n kubernetes-dashboard

More about accessing dashboard UI, https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui"); + }; + return showAccess(); + } + }, + clusterinstances: { + title: 'label.instances', + listView: { + section: 'clusterinstances', + preFilter: function(args) { + var hiddenFields = []; + if (!isAdmin()) { + hiddenFields.push('instancename'); + } + return hiddenFields; + }, + fields: { + name: { + label: 'label.name', + truncate: true + }, + instancename: { + label: 'label.internal.name' + }, + displayname: { + label: 'label.display.name', + truncate: true + }, + ipaddress: { + label: 'label.ip.address' + }, + zonename: { + label: 'label.zone.name' + }, + state: { + label: 'label.state', + indicator: { + 'Running': 'on', + 'Stopped': 'off', + 'Destroyed': 'off', + 'Error': 'off' + } + } + }, + dataProvider: function(args) { + var data = {}; + listViewDataProvider(args, data); + + $.ajax({ + url: createURL("listKubernetesClusters"), + data: {"id": args.context.kubernetesclusters[0].id}, + success: function(json) { + var items = json.listkubernetesclustersresponse.kubernetescluster; + + var vmlist = []; + $.each(items, function(idx, item) { + if ("virtualmachineids" in item) { + vmlist = vmlist.concat(item.virtualmachineids); + } + }); + + $.extend(data, { + ids: vmlist.join() + }); + + if (items && items.length > 0 && items[0].projectid != null && + items[0].projectid != undefined && items[0].projectid.length > 0) { + $.extend(data, { + projectid: items[0].projectid + }); + } + + if (data.ids.length == 0) { + args.response.success({ + data: [] + }); + } else { + $.ajax({ + url: createURL('listVirtualMachines'), + data: data, + success: function(json) { + var items = json.listvirtualmachinesresponse.virtualmachine; + if (items) { + $.each(items, function(idx, vm) { + if (vm.nic && vm.nic.length > 0 && vm.nic[0].ipaddress) { + items[idx].ipaddress = vm.nic[0].ipaddress; + } + }); + } + args.response.success({ + data: items + }); + }, + error: function(XMLHttpResponse) { + cloudStack.dialog.notice({ + message: parseXMLHttpResponse(XMLHttpResponse) + }); + args.response.error(); + } + }); + } + } + }); + }, + } + }, + firewall: { + title: 'label.firewall', + custom: function(args) { + var data = { + id: args.context.kubernetesclusters[0].networkid, + listAll: true + } + if (args.context.kubernetesclusters[0].projectid != null && + args.context.kubernetesclusters[0].projectid != undefined && + args.context.kubernetesclusters[0].projectid.length > 0) { + $.extend(data, { + projectid: args.context.kubernetesclusters[0].projectid + }); + $.extend(args.context, {"projectid": args.context.kubernetesclusters[0].projectid}); + } + $.ajax({ + url: createURL('listNetworks'), + data: data, + async: false, + dataType: "json", + success: function(json) { + var network = json.listnetworksresponse.network; + $.extend(args.context, {"networks": [network]}); + } + }); + data = { + associatedNetworkId: args.context.kubernetesclusters[0].networkid, + listAll: true, + forvirtualnetwork: true + } + if (args.context.kubernetesclusters[0].projectid != null && + args.context.kubernetesclusters[0].projectid != undefined && + args.context.kubernetesclusters[0].projectid.length > 0) { + $.extend(data, { + projectid: args.context.kubernetesclusters[0].projectid + }); + } + $.ajax({ + url: createURL('listPublicIpAddresses'), + data: data, + async: false, + dataType: "json", + success: function(json) { + var ips = json.listpublicipaddressesresponse.publicipaddress; + var fwip = ips[0]; + $.each(ips, function(idx, ip) { + if (ip.issourcenat || ip.isstaticnat) { + fwip = ip; + return false; + } + }); + $.extend(args.context, {"ipAddresses": [fwip]}); + } + }); + return cloudStack.sections.network.sections.ipAddresses.listView.detailView.tabs.ipRules.custom(args); + }, + }, + } + } + } + }, + kubernetesversions: { + id: 'kubernetesversions', + type: 'select', + title: "label.versions", + listView: { + fields: { + name: { + label: 'label.name' + }, + semanticversion: { + label: 'label.kubernetes.version' + }, + zonename: { + label: 'label.zone.name' + }, + isoname: { + label: 'label.iso.name' + }, + isostate: { + label: 'label.iso.state' + }, + mincpunumber: { + label: 'label.min.cpu.cores' + }, + minmemory: { + label: 'label.memory.minimum.mb' + }, + state: { + label: 'label.state', + indicator: { + 'Enabled': 'on', + 'Disabled': 'off' + } + } + }, + advSearchFields: { + name: { + label: 'label.name' + }, + zoneid: { + label: 'label.zone', + select: function(args) { + $.ajax({ + url: createURL('listZones'), + data: { + listAll: true + }, + success: function(json) { + var zones = json.listzonesresponse.zone ? json.listzonesresponse.zone : []; + + args.response.success({ + data: $.map(zones, function(zone) { + return { + id: zone.id, + description: zone.name + }; + }) + }); + } + }); + } + }, + }, + // List view actions + actions: { + add: { + label: 'label.add.kubernetes.version', + preFilter: function(args) { return isAdmin(); }, + createForm: { + title: 'label.add.kubernetes.version', + preFilter: cloudStack.preFilter.createTemplate, + fields: { + version: { + label: 'label.semantic.version', + //docID: 'Name of the cluster', + validation: { + required: true + } + }, + name: { + label: 'label.name', + //docID: 'Name of the cluster', + }, + zone: { + label: 'label.zone', + //docID: 'helpKubernetesClusterZone', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listZones&available=true"), + dataType: "json", + async: true, + success: function(json) { + var items = []; + var zoneObjs = json.listzonesresponse.zone; + if (zoneObjs != null) { + for (var i = 0; i < zoneObjs.length; i++) { + items.push({ + id: zoneObjs[i].id, + description: zoneObjs[i].name + }); + } + } + items.sort(function(a, b) { + return a.description.localeCompare(b.description); + }); + items.unshift({ + id: -1, + description: 'label.all.zones' + }); + args.response.success({ + data: items + }); + } + }); + } + }, + isourl: { + label: 'label.url', + //docID: 'Name of the cluster', + validation: { + required: true + } + }, + isochecksum: { + label: 'label.checksum', + //docID: 'Name of the cluster', + }, + mincpunumber: { + label: 'label.min.cpu.cores', + validation: { + required: true, + number: true + }, + }, + minmemory: { + label: 'label.memory.minimum.mb', + validation: { + required: true, + number: true + } + } + } + }, + + action: function(args) { + var data = { + name: args.data.name, + semanticversion: args.data.version, + url: args.data.isourl, + checksum: args.data.isochecksum + }; + if (args.data.zone != null && args.data.zone != -1) { + $.extend(data, { + zoneid: args.data.zone + }); + } + if (args.data.mincpunumber != null && args.data.mincpunumber != "" && args.data.mincpunumber > 0) { + $.extend(data, { + mincpunumber: args.data.mincpunumber + }); + } + if (args.data.minmemory != null && args.data.minmemory != "" && args.data.minmemory > 0) { + $.extend(data, { + minmemory: args.data.minmemory + }); + } + $.ajax({ + url: createURL('addKubernetesSupportedVersion'), + data: data, + success: function(json) { + var version = json.addkubernetessupportedversionresponse.kubernetessupportedversion; + args.response.success({ + data: version + }); + }, + error: function(XMLHttpResponse) { + var errorMsg = parseXMLHttpResponse(XMLHttpResponse); + args.response.error(errorMsg); + } + }); + }, + messages: { + notification: function(args) { + return 'Kubernetes Supported Version Add'; + } + } + } + }, + dataProvider: function(args) { + var data = { + page: args.page, + pagesize: pageSize + }; + listViewDataProvider(args, data); + $.ajax({ + url: createURL("listKubernetesSupportedVersions"), + data: data, + dataType: "json", + sync: true, + success: function(json) { + var items = []; + if (json.listkubernetessupportedversionsresponse.kubernetessupportedversion != null) { + items = json.listkubernetessupportedversionsresponse.kubernetessupportedversion; + } + args.response.success({ + data: items + }); + } + }); + }, + + detailView: { + name: 'label.kubernetes.version.details', + isMaximized: true, + actions: { + update: { + label: 'label.edit', + messages: { + notification: function(args) { + return 'label.update.kubernetes.version'; + } + }, + createForm: { + title: 'label.update.kubernetes.version', + desc: '', + preFilter: function(args) { + var formVersion = args.context.kubernetesversions[0]; + $.ajax({ + url: createURL('listKubernetesSupportedVersions'), + data: { + id: args.context.kubernetesversions[0].id + }, + dataType: "json", + async: false, + success: function (json) { + if (json.listkubernetessupportedversionsresponse.kubernetessupportedversion != null && + json.listkubernetessupportedversionsresponse.kubernetessupportedversion.length > 0) { + formVersion = json.listkubernetessupportedversionsresponse.kubernetessupportedversion[0]; + } + } + }); + if (formVersion.state != null) { + var options = args.$form.find('.form-item[rel=state]').find('option'); + $.each(options, function(optionIndex, option) { + if ($(option).val() === formVersion.state) { + $(option).attr('selected','selected'); + } + }); + } + }, + fields: { + state: { + label: 'label.state', + //docID: 'helpKubernetesClusterZone', + validation: { + required: true + }, + select: function(args) { + var items = []; + items.push({ + id: 'Enabled', + description: 'state.Enabled' + }, { + id: 'Disabled', + description: 'state.Disabled' + }); + args.response.success({ + data: items + }); + } + }, + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesversions[0].id, + state: args.data.state + }; + $.ajax({ + url: createURL('updateKubernetesSupportedVersion'), + data: data, + dataType: "json", + success: function (json) { + var jsonObj; + if (json.updatekubernetessupportedversionresponse.kubernetessupportedversion != null) { + jsonObj = json.updatekubernetessupportedversionresponse.kubernetessupportedversion; + } + args.response.success({ + data: jsonObj + }); + }, + error: function(XMLHttpResponse) { + var errorMsg = parseXMLHttpResponse(XMLHttpResponse); + args.response.error(errorMsg); + } + }); //end ajax + } + }, + destroy: { + label: 'label.delete.kubernetes.version', + compactLabel: 'label.delete', + preFilter: function(args) { return isAdmin(); }, + createForm: { + title: 'label.delete.kubernetes.version', + desc: 'label.delete.kubernetes.version', + isWarning: true, + fields: {} + }, + messages: { + confirm: function(args) { + return 'message.confirm.delete.kubernetes.version'; + }, + notification: function(args) { + return 'Deleted Kubernetes version.'; + } + }, + action: function(args) { + var data = { + id: args.context.kubernetesversions[0].id + }; + $.ajax({ + url: createURL('deleteKubernetesSupportedVersion'), + data: data, + dataType: "json", + async: true, + success: function(json) { + args.response.success({ + _custom: { + jobId: json.deletekubernetessupportedversionresponse.jobid, + getUpdatedItem: function(json) { + return { 'toRemove': true }; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + } + }, + tabs: { + // Details tab + details: { + title: 'label.details', + fields: [{ + id: { + label: 'label.id' + }, + name: { + label: 'label.name' + }, + zonename: { + label: 'label.zone.name' + }, + isoid: { + label: 'label.iso.id' + }, + isoname: { + label: 'label.iso.name' + }, + isostate: { + label: 'label.iso.name' + } + }], + + dataProvider: function(args) { + $.ajax({ + url: createURL("listKubernetesSupportedVersions&id=" + args.context.kubernetesversions[0].id), + dataType: "json", + async: true, + success: function(json) { + var jsonObj; + if (json.listkubernetessupportedversionsresponse.kubernetessupportedversion != null && json.listkubernetessupportedversionsresponse.kubernetessupportedversion.length > 0) { + jsonObj = json.listkubernetessupportedversionsresponse.kubernetessupportedversion[0]; + } + args.response.success({ + data: jsonObj + }); + } + }); + } + } + } + } + } + }, + } + }); + }; + + var cksActionfilter = cloudStack.actionFilter.cksActionfilter = function(args) { + var jsonObj = args.context.item; + var allowedActions = []; + if (jsonObj.state != "Destroyed" && jsonObj.state != "Destroying") { + if (jsonObj.state == "Stopped") { + allowedActions.push("start"); + } else { + allowedActions.push("downloadKubernetesClusterKubeConfig"); + allowedActions.push("stop"); + } + if (jsonObj.state == "Created" || jsonObj.state == "Running") { + allowedActions.push("scaleKubernetesCluster"); + allowedActions.push("upgradeKubernetesCluster"); + } + allowedActions.push("destroy"); + } + return allowedActions; + } + +}(cloudStack)); diff --git a/ui/plugins/cks/config.js b/ui/plugins/cks/config.js new file mode 100644 index 00000000000..a5ea16358b1 --- /dev/null +++ b/ui/plugins/cks/config.js @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +(function (cloudStack) { + cloudStack.plugins.cks.config = { + title: 'Kubernetes Service', + desc: 'Kubernetes Service', + externalLink: 'http://www.cloudstack.org/', + authorName: 'Apache CloudStack', + authorEmail: 'dev@cloudstack.apache.org' + }; +}(cloudStack)); diff --git a/ui/plugins/cks/icon.png b/ui/plugins/cks/icon.png new file mode 100644 index 00000000000..1d049675c27 Binary files /dev/null and b/ui/plugins/cks/icon.png differ diff --git a/ui/plugins/plugins.js b/ui/plugins/plugins.js index 6edfe88fe1d..30cdf4f5dac 100644 --- a/ui/plugins/plugins.js +++ b/ui/plugins/plugins.js @@ -18,6 +18,7 @@ cloudStack.plugins = [ //'testPlugin', 'cloudian', - 'quota' + 'quota', + 'cks' ]; }(jQuery, cloudStack)); diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index 6151a6acc6e..920dbd768f3 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -2800,16 +2800,37 @@ jQuery.validator.addMethod("ipv6CustomJqueryValidator", function(value, element) return jQuery.validator.methods.ipv6.call(this, value, element); }, "The specified IPv6 address is invalid."); - $.validator.addMethod("allzonesonly", function(value, element){ - if ((value.indexOf("-1") != -1) &&(value.length > 1)) + if ((value.indexOf("-1") != -1) && (value.length > 1)) return false; return true; }, "All Zones cannot be combined with any other zone"); +$.validator.addMethod("naturalnumber", function(value, element){ + if (this.optional(element) && value.length == 0) + return true; + if (isNaN(value)) + return false; + value = parseInt(value); + return (typeof value === 'number') && (value > 0) && (Math.floor(value) === value) && value !== Infinity; + +}, +"Please enter a valid number, 1 or greater"); + +$.validator.addMethod("multiplecountnumber", function(value, element){ + if (this.optional(element) && value.length == 0) + return true; + if (isNaN(value)) + return false; + value = parseInt(value); + return (typeof value === 'number') && (value > 1) && (Math.floor(value) === value) && value !== Infinity; + +}, +"Please enter a valid number, 2 or greater"); + cloudStack.createTemplateMethod = function (isSnapshot){ return { label: 'label.create.template',