CloudStack Kubernetes Service (#3680)

This commit is contained in:
Abhishek Kumar 2020-03-06 13:21:23 +05:30 committed by GitHub
parent 9c6b02fd8b
commit 8cc70c7d87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 12613 additions and 65 deletions

View File

@ -225,8 +225,8 @@ public class ApiConstants {
public static final String LOCK = "lock";
public static final String LUN = "lun";
public static final String LBID = "lbruleid";
public static final String MAX = "max";
public static final String MAC_ADDRESS = "macaddress";
public static final String MAX = "max";
public static final String MAX_SNAPS = "maxsnaps";
public static final String MAX_CPU_NUMBER = "maxcpunumber";
public static final String MAX_MEMORY = "maxmemory";
@ -782,6 +782,23 @@ public class ApiConstants {
public static final String LAST_UPDATED = "lastupdated";
public static final String PERFORM_FRESH_CHECKS = "performfreshchecks";
public static final String CONSOLE_END_POINT = "consoleendpoint";
public static final String EXTERNAL_LOAD_BALANCER_IP_ADDRESS = "externalloadbalanceripaddress";
public static final String DOCKER_REGISTRY_USER_NAME = "dockerregistryusername";
public static final String DOCKER_REGISTRY_PASSWORD = "dockerregistrypassword";
public static final String DOCKER_REGISTRY_URL = "dockerregistryurl";
public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail";
public static final String ISO_NAME = "isoname";
public static final String ISO_STATE = "isostate";
public static final String SEMANTIC_VERSION = "semanticversion";
public static final String KUBERNETES_VERSION_ID = "kubernetesversionid";
public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname";
public static final String MASTER_NODES = "masternodes";
public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion";
public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid";
public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize";
public static final String SUPPORTS_HA = "supportsha";
public enum HostDetails {
all, capacity, events, stats, min;
}

View File

@ -61,6 +61,8 @@ public class ListCapabilitiesCmd extends BaseCmd {
response.setAllowUserExpungeRecoverVM((Boolean)capabilities.get("allowUserExpungeRecoverVM"));
response.setAllowUserExpungeRecoverVolume((Boolean)capabilities.get("allowUserExpungeRecoverVolume"));
response.setAllowUserViewAllDomainAccounts((Boolean)capabilities.get("allowUserViewAllDomainAccounts"));
response.setKubernetesServiceEnabled((Boolean)capabilities.get("kubernetesServiceEnabled"));
response.setKubernetesClusterExperimentalFeaturesEnabled((Boolean)capabilities.get("kubernetesClusterExperimentalFeaturesEnabled"));
if (capabilities.containsKey("apiLimitInterval")) {
response.setApiLimitInterval((Integer)capabilities.get("apiLimitInterval"));
}

View File

@ -61,6 +61,10 @@ public class DeleteIsoCmd extends BaseAsyncCmd {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Long getZoneId() {
return zoneId;
}

View File

@ -127,10 +127,18 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
return bootable;
}
public void setBootable(Boolean bootable) {
this.bootable = bootable;
}
public String getDisplayText() {
return displayText;
}
public void setDisplayText(String displayText) {
this.displayText = displayText;
}
public Boolean isFeatured() {
return featured;
}
@ -139,6 +147,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
return publicIso;
}
public void setPublic(Boolean publicIso) {
this.publicIso = publicIso;
}
public Boolean isExtractable() {
return extractable;
}
@ -147,6 +159,10 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
return isoName;
}
public void setIsoName(String isoName) {
this.isoName = isoName;
}
public Long getOsTypeId() {
return osTypeId;
}
@ -155,22 +171,42 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
public Long getDomainId() {
return domainId;
}
public void setDomainId(Long domainId) {
this.domainId = domainId;
}
public String getAccountName() {
return accountName;
}
public void setAccountName(String accountName) {
this.accountName = accountName;
}
public String getChecksum() {
return checksum;
}
public void setChecksum(String checksum) {
this.checksum = checksum;
}
public String getImageStoreUuid() {
return imageStoreUuid;
}

View File

@ -92,6 +92,14 @@ public class CapabilitiesResponse extends BaseResponse {
@Param(description = "true if users can see all accounts within the same domain, false otherwise")
private boolean allowUserViewAllDomainAccounts;
@SerializedName("kubernetesserviceenabled")
@Param(description = "true if Kubernetes Service plugin is enabled, false otherwise")
private boolean kubernetesServiceEnabled;
@SerializedName("kubernetesclusterexperimentalfeaturesenabled")
@Param(description = "true if experimental features for Kubernetes cluster such as Docker private registry are enabled, false otherwise")
private boolean kubernetesClusterExperimentalFeaturesEnabled;
public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) {
this.securityGroupsEnabled = securityGroupsEnabled;
}
@ -159,4 +167,12 @@ public class CapabilitiesResponse extends BaseResponse {
public void setAllowUserViewAllDomainAccounts(boolean allowUserViewAllDomainAccounts) {
this.allowUserViewAllDomainAccounts = allowUserViewAllDomainAccounts;
}
public void setKubernetesServiceEnabled(boolean kubernetesServiceEnabled) {
this.kubernetesServiceEnabled = kubernetesServiceEnabled;
}
public void setKubernetesClusterExperimentalFeaturesEnabled(boolean kubernetesClusterExperimentalFeaturesEnabled) {
this.kubernetesClusterExperimentalFeaturesEnabled = kubernetesClusterExperimentalFeaturesEnabled;
}
}

View File

@ -483,6 +483,11 @@
<artifactId>cloud-plugin-backup-dummy</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-integrations-kubernetes-service</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>

View File

@ -175,6 +175,9 @@ public interface IpAddressManager {
PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, boolean isSystem)
throws InsufficientAddressCapacityException;
PublicIp getAvailablePublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, boolean isSystem)
throws InsufficientAddressCapacityException;
@DB
void allocateNicValues(NicProfile nic, DataCenter dc, VirtualMachineProfile vm, Network network, String requestedIpv4, String requestedIpv6)
throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException;

View File

@ -304,3 +304,74 @@ CREATE TABLE `cloud`.`router_health_check` (
UNIQUE `i_router_health_checks__router_id__check_name__check_type`(`router_id`, `check_name`, `check_type`),
INDEX `i_router_health_checks__router_id`(`router_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- Kubernetes service
CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_supported_version` (
`id` bigint unsigned NOT NULL auto_increment,
`uuid` varchar(40) DEFAULT NULL,
`name` varchar(255) NOT NULL COMMENT 'the name of this Kubernetes version',
`semantic_version` varchar(32) NOT NULL COMMENT 'the semantic version for this Kubernetes version',
`iso_id` bigint unsigned NOT NULL COMMENT 'the ID of the binaries ISO for this Kubernetes version',
`zone_id` bigint unsigned DEFAULT NULL COMMENT 'the ID of the zone for which this Kubernetes version is made available',
`state` char(32) DEFAULT NULL COMMENT 'the enabled or disabled state for this Kubernetes version',
`min_cpu` int(10) unsigned NOT NULL COMMENT 'the minimum CPU needed by cluster nodes for using this Kubernetes version',
`min_ram_size` bigint(20) unsigned NOT NULL COMMENT 'the minimum RAM in MB needed by cluster nodes for this Kubernetes version',
`created` datetime NOT NULL COMMENT 'date created',
`removed` datetime COMMENT 'date removed or null, if still present',
PRIMARY KEY(`id`),
CONSTRAINT `fk_kubernetes_supported_version__iso_id` FOREIGN KEY `fk_kubernetes_supported_version__iso_id`(`iso_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_kubernetes_supported_version__zone_id` FOREIGN KEY `fk_kubernetes_supported_version__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster` (
`id` bigint unsigned NOT NULL auto_increment,
`uuid` varchar(40) DEFAULT NULL,
`name` varchar(255) NOT NULL,
`description` varchar(4096) COMMENT 'display text for this Kubernetes cluster',
`zone_id` bigint unsigned NOT NULL COMMENT 'the ID of the zone in which this Kubernetes cluster is deployed',
`kubernetes_version_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes version of this Kubernetes cluster',
`service_offering_id` bigint unsigned COMMENT 'service offering id for the cluster VM',
`template_id` bigint unsigned COMMENT 'the ID of the template used by this Kubernetes cluster',
`network_id` bigint unsigned COMMENT 'the ID of the network used by this Kubernetes cluster',
`master_node_count` bigint NOT NULL default '0' COMMENT 'the number of the master nodes deployed for this Kubernetes cluster',
`node_count` bigint NOT NULL default '0' COMMENT 'the number of the worker nodes deployed for this Kubernetes cluster',
`account_id` bigint unsigned NOT NULL COMMENT 'the ID of owner account of this Kubernetes cluster',
`domain_id` bigint unsigned NOT NULL COMMENT 'the ID of the domain of this cluster',
`state` char(32) NOT NULL COMMENT 'the current state of this Kubernetes cluster',
`key_pair` varchar(40),
`cores` bigint unsigned NOT NULL COMMENT 'total number of CPU cores used by this Kubernetes cluster',
`memory` bigint unsigned NOT NULL COMMENT 'total memory used by this Kubernetes cluster',
`node_root_disk_size` bigint(20) unsigned DEFAULT 0 COMMENT 'root disk size of root disk for each node',
`endpoint` varchar(255) COMMENT 'url endpoint of the Kubernetes cluster manager api access',
`created` datetime NOT NULL COMMENT 'date created',
`removed` datetime COMMENT 'date removed or null, if still present',
`gc` tinyint unsigned NOT NULL DEFAULT 1 COMMENT 'gc this Kubernetes cluster or not',
PRIMARY KEY(`id`),
CONSTRAINT `fk_cluster__zone_id` FOREIGN KEY `fk_cluster__zone_id`(`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE,
CONSTRAINT `fk_cluster__kubernetes_version_id` FOREIGN KEY `fk_cluster__kubernetes_version_id`(`kubernetes_version_id`) REFERENCES `kubernetes_supported_version` (`id`) ON DELETE CASCADE,
CONSTRAINT `fk_cluster__service_offering_id` FOREIGN KEY `fk_cluster__service_offering_id`(`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_cluster__template_id` FOREIGN KEY `fk_cluster__template_id`(`template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_cluster__network_id` FOREIGN KEY `fk_cluster__network_id`(`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_vm_map` (
`id` bigint unsigned NOT NULL auto_increment,
`cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster',
`vm_id` bigint unsigned NOT NULL COMMENT 'the ID of the VM',
PRIMARY KEY(`id`),
CONSTRAINT `fk_kubernetes_cluster_vm_map__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_vm_map__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `cloud`.`kubernetes_cluster_details` (
`id` bigint unsigned NOT NULL auto_increment,
`cluster_id` bigint unsigned NOT NULL COMMENT 'the ID of the Kubernetes cluster',
`name` varchar(255) NOT NULL,
`value` varchar(10240) NOT NULL,
`display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user else false',
PRIMARY KEY(`id`),
CONSTRAINT `fk_kubernetes_cluster_details__cluster_id` FOREIGN KEY `fk_kubernetes_cluster_details__cluster_id`(`cluster_id`) REFERENCES `kubernetes_cluster`(`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -0,0 +1,135 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-integrations-kubernetes-service</artifactId>
<name>Apache CloudStack Plugin - Kubernetes Service</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.14.0.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-db</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-ca</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-security</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-schema</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-components-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-managed-context</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.persistence</groupId>
<artifactId>javax.persistence</artifactId>
<version>${cs.jpa.version}</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>${cs.gson.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${cs.guava.version}</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>${cs.log4j.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${org.springframework.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aop</artifactId>
<version>${org.springframework.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${org.springframework.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${org.springframework.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${cs.codec.version}</version>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>${cs.hamcrest.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
<version>${cs.bcprov.version}</version>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
<version>${cs.joda-time.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,134 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import java.util.Date;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.api.Displayable;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.utils.fsm.StateMachine2;
/**
* KubernetesCluster describes the properties of a Kubernetes cluster
* StateMachine maintains its states.
*
*/
public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm.StateObject<KubernetesCluster.State>, Identity, InternalIdentity, Displayable {
enum Event {
StartRequested,
StopRequested,
DestroyRequested,
RecoveryRequested,
ScaleUpRequested,
ScaleDownRequested,
UpgradeRequested,
OperationSucceeded,
OperationFailed,
CreateFailed,
FaultsDetected;
}
enum State {
Created("Initial State of Kubernetes cluster. At this state its just a logical/DB entry with no resources consumed"),
Starting("Resources needed for Kubernetes cluster are being provisioned"),
Running("Necessary resources are provisioned and Kubernetes cluster is in operational ready state to launch Kubernetes"),
Stopping("Resources for the Kubernetes cluster are being destroyed"),
Stopped("All resources for the Kubernetes cluster are destroyed, Kubernetes cluster may still have ephemeral resource like persistent volumes provisioned"),
Scaling("Transient state in which resources are either getting scaled up/down"),
Upgrading("Transient state in which cluster is getting upgraded"),
Alert("State to represent Kubernetes clusters which are not in expected desired state (operationally in active control place, stopped cluster VM's etc)."),
Recovering("State in which Kubernetes cluster is recovering from alert state"),
Destroyed("End state of Kubernetes cluster in which all resources are destroyed, cluster will not be usable further"),
Destroying("State in which resources for the Kubernetes cluster is getting cleaned up or yet to be cleaned up by garbage collector"),
Error("State of the failed to create Kubernetes clusters");
protected static final StateMachine2<State, KubernetesCluster.Event, KubernetesCluster> s_fsm = new StateMachine2<State, KubernetesCluster.Event, KubernetesCluster>();
public static StateMachine2<State, KubernetesCluster.Event, KubernetesCluster> getStateMachine() { return s_fsm; }
static {
s_fsm.addTransition(State.Created, Event.StartRequested, State.Starting);
s_fsm.addTransition(State.Starting, Event.OperationSucceeded, State.Running);
s_fsm.addTransition(State.Starting, Event.OperationFailed, State.Alert);
s_fsm.addTransition(State.Starting, Event.CreateFailed, State.Error);
s_fsm.addTransition(State.Starting, Event.StopRequested, State.Stopping);
s_fsm.addTransition(State.Running, Event.StopRequested, State.Stopping);
s_fsm.addTransition(State.Alert, Event.StopRequested, State.Stopping);
s_fsm.addTransition(State.Stopping, Event.OperationSucceeded, State.Stopped);
s_fsm.addTransition(State.Stopping, Event.OperationFailed, State.Alert);
s_fsm.addTransition(State.Stopped, Event.StartRequested, State.Starting);
s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert);
s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling);
s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling);
s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running);
s_fsm.addTransition(State.Scaling, Event.OperationFailed, State.Alert);
s_fsm.addTransition(State.Running, Event.UpgradeRequested, State.Upgrading);
s_fsm.addTransition(State.Upgrading, Event.OperationSucceeded, State.Running);
s_fsm.addTransition(State.Upgrading, Event.OperationFailed, State.Alert);
s_fsm.addTransition(State.Alert, Event.RecoveryRequested, State.Recovering);
s_fsm.addTransition(State.Recovering, Event.OperationSucceeded, State.Running);
s_fsm.addTransition(State.Recovering, Event.OperationFailed, State.Alert);
s_fsm.addTransition(State.Running, Event.DestroyRequested, State.Destroying);
s_fsm.addTransition(State.Stopped, Event.DestroyRequested, State.Destroying);
s_fsm.addTransition(State.Alert, Event.DestroyRequested, State.Destroying);
s_fsm.addTransition(State.Error, Event.DestroyRequested, State.Destroying);
s_fsm.addTransition(State.Destroying, Event.OperationSucceeded, State.Destroyed);
}
String _description;
State(String description) {
_description = description;
}
}
long getId();
String getName();
String getDescription();
long getZoneId();
long getKubernetesVersionId();
long getServiceOfferingId();
long getTemplateId();
long getNetworkId();
long getDomainId();
long getAccountId();
long getMasterNodeCount();
long getNodeCount();
long getTotalNodeCount();
String getKeyPair();
long getCores();
long getMemory();
long getNodeRootDiskSize();
String getEndpoint();
boolean isCheckForGc();
@Override
State getState();
Date getCreated();
}

View File

@ -0,0 +1,84 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import org.apache.cloudstack.api.ResourceDetail;
@Entity
@Table(name = "kubernetes_cluster_details")
public class KubernetesClusterDetailsVO implements ResourceDetail {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "cluster_id")
private long resourceId;
@Column(name = "name")
private String name;
@Column(name = "value", length = 10240)
private String value;
@Column(name = "display")
private boolean display;
public KubernetesClusterDetailsVO() {
}
public KubernetesClusterDetailsVO(long id, String name, String value, boolean display) {
this.resourceId = id;
this.name = name;
this.value = value;
this.display = display;
}
@Override
public long getId() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public String getValue() {
return value;
}
@Override
public long getResourceId() {
return resourceId;
}
@Override
public boolean isDisplay() {
return display;
}
}

View File

@ -0,0 +1,26 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
public class KubernetesClusterEventTypes {
public static final String EVENT_KUBERNETES_CLUSTER_CREATE = "KUBERNETES.CLUSTER.CREATE";
public static final String EVENT_KUBERNETES_CLUSTER_DELETE = "KUBERNETES.CLUSTER.DELETE";
public static final String EVENT_KUBERNETES_CLUSTER_START = "KUBERNETES.CLUSTER.START";
public static final String EVENT_KUBERNETES_CLUSTER_STOP = "KUBERNETES.CLUSTER.STOP";
public static final String EVENT_KUBERNETES_CLUSTER_SCALE = "KUBERNETES.CLUSTER.SCALE";
public static final String EVENT_KUBERNETES_CLUSTER_UPGRADE = "KUBERNETES.CLUSTER.UPGRADE";
}

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd;
import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd;
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd;
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd;
import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd;
import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import com.cloud.utils.component.PluggableService;
import com.cloud.utils.exception.CloudRuntimeException;
public interface KubernetesClusterService extends PluggableService, Configurable {
static final String MIN_KUBERNETES_VERSION_HA_SUPPORT = "1.16.0";
static final int MIN_KUBERNETES_CLUSTER_NODE_CPU = 2;
static final int MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE = 2048;
static final ConfigKey<Boolean> KubernetesServiceEnabled = new ConfigKey<Boolean>("Advanced", Boolean.class,
"cloud.kubernetes.service.enabled",
"false",
"Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change",
false);
static final ConfigKey<String> KubernetesClusterHyperVTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.hyperv",
"Kubernetes-Service-Template-HyperV",
"Name of the template to be used for creating Kubernetes cluster nodes on HyperV",
true);
static final ConfigKey<String> KubernetesClusterKVMTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.kvm",
"Kubernetes-Service-Template-KVM",
"Name of the template to be used for creating Kubernetes cluster nodes on KVM",
true);
static final ConfigKey<String> KubernetesClusterVMwareTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.vmware",
"Kubernetes-Service-Template-VMware",
"Name of the template to be used for creating Kubernetes cluster nodes on VMware",
true);
static final ConfigKey<String> KubernetesClusterXenserverTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.xenserver",
"Kubernetes-Service-Template-Xenserver",
"Name of the template to be used for creating Kubernetes cluster nodes on Xenserver",
true);
static final ConfigKey<String> KubernetesClusterNetworkOffering = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.network.offering",
"DefaultNetworkOfferingforKubernetesService",
"Name of the network offering that will be used to create isolated network in which Kubernetes cluster VMs will be launched",
false);
static final ConfigKey<Long> KubernetesClusterStartTimeout = new ConfigKey<Long>("Advanced", Long.class,
"cloud.kubernetes.cluster.start.timeout",
"3600",
"Timeout interval (in seconds) in which start operation for a Kubernetes cluster should be completed",
true);
static final ConfigKey<Long> KubernetesClusterScaleTimeout = new ConfigKey<Long>("Advanced", Long.class,
"cloud.kubernetes.cluster.scale.timeout",
"3600",
"Timeout interval (in seconds) in which scale operation for a Kubernetes cluster should be completed",
true);
static final ConfigKey<Long> KubernetesClusterUpgradeTimeout = new ConfigKey<Long>("Advanced", Long.class,
"cloud.kubernetes.cluster.upgrade.timeout",
"3600",
"Timeout interval (in seconds) in which upgrade operation for a Kubernetes cluster should be completed. Not strictly obeyed while upgrade is in progress on a node",
true);
static final ConfigKey<Boolean> KubernetesClusterExperimentalFeaturesEnabled = new ConfigKey<Boolean>("Advanced", Boolean.class,
"cloud.kubernetes.cluster.experimental.features.enabled",
"false",
"Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not",
true);
KubernetesCluster findById(final Long id);
KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException;
boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate) throws CloudRuntimeException;
boolean stopKubernetesCluster(long kubernetesClusterId) throws CloudRuntimeException;
boolean deleteKubernetesCluster(Long kubernetesClusterId) throws CloudRuntimeException;
ListResponse<KubernetesClusterResponse> listKubernetesClusters(ListKubernetesClustersCmd cmd);
KubernetesClusterConfigResponse getKubernetesClusterConfig(GetKubernetesClusterConfigCmd cmd);
KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId);
boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws CloudRuntimeException;
boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException;
}

View File

@ -0,0 +1,340 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import java.util.Date;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name = "kubernetes_cluster")
public class KubernetesClusterVO implements KubernetesCluster {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "uuid")
private String uuid;
@Column(name = "name")
private String name;
@Column(name = "description", length = 4096)
private String description;
@Column(name = "zone_id")
private long zoneId;
@Column(name = "kubernetes_version_id")
private long kubernetesVersionId;
@Column(name = "service_offering_id")
private long serviceOfferingId;
@Column(name = "template_id")
private long templateId;
@Column(name = "network_id")
private long networkId;
@Column(name = "domain_id")
private long domainId;
@Column(name = "account_id")
private long accountId;
@Column(name = "master_node_count")
private long masterNodeCount;
@Column(name = "node_count")
private long nodeCount;
@Column(name = "cores")
private long cores;
@Column(name = "memory")
private long memory;
@Column(name = "node_root_disk_size")
private long nodeRootDiskSize;
@Column(name = "state")
private State state;
@Column(name = "key_pair")
private String keyPair;
@Column(name = "endpoint")
private String endpoint;
@Column(name = GenericDao.CREATED_COLUMN)
private Date created;
@Column(name = GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name = "gc")
private boolean checkForGc;
@Override
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@Override
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public long getZoneId() {
return zoneId;
}
public void setZoneId(long zoneId) {
this.zoneId = zoneId;
}
@Override
public long getKubernetesVersionId() {
return kubernetesVersionId;
}
public void setKubernetesVersionId(long kubernetesVersionId) {
this.kubernetesVersionId = kubernetesVersionId;
}
@Override
public long getServiceOfferingId() {
return serviceOfferingId;
}
public void setServiceOfferingId(long serviceOfferingId) {
this.serviceOfferingId = serviceOfferingId;
}
@Override
public long getTemplateId() {
return templateId;
}
public void setTemplateId(long templateId) {
this.templateId = templateId;
}
@Override
public long getNetworkId() {
return networkId;
}
public void setNetworkId(long networkId) {
this.networkId = networkId;
}
@Override
public long getDomainId() {
return domainId;
}
public void setDomainId(long domainId) {
this.domainId = domainId;
}
@Override
public long getAccountId() {
return accountId;
}
public void setAccountId(long accountId) {
this.accountId = accountId;
}
@Override
public long getMasterNodeCount() {
return masterNodeCount;
}
public void setMasterNodeCount(long masterNodeCount) {
this.masterNodeCount = masterNodeCount;
}
@Override
public long getNodeCount() {
return nodeCount;
}
public void setNodeCount(long nodeCount) {
this.nodeCount = nodeCount;
}
@Override
public long getTotalNodeCount() {
return this.masterNodeCount + this.nodeCount;
}
@Override
public long getCores() {
return cores;
}
public void setCores(long cores) {
this.cores = cores;
}
@Override
public long getMemory() {
return memory;
}
public void setMemory(long memory) {
this.memory = memory;
}
@Override
public long getNodeRootDiskSize() {
return nodeRootDiskSize;
}
public void setNodeRootDiskSize(long nodeRootDiskSize) {
this.nodeRootDiskSize = nodeRootDiskSize;
}
@Override
public State getState() {
return state;
}
public void setState(State state) {
this.state = state;
}
@Override
public String getEndpoint() {
return endpoint;
}
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
public String getKeyPair() {
return keyPair;
}
public void setKeyPair(String keyPair) {
this.keyPair = keyPair;
}
@Override
public boolean isDisplay() {
return true;
}
public Date getRemoved() {
if (removed == null)
return null;
return new Date(removed.getTime());
}
@Override
public boolean isCheckForGc() {
return checkForGc;
}
public void setCheckForGc(boolean check) {
checkForGc = check;
}
@Override
public Date getCreated() {
return created;
}
public KubernetesClusterVO() {
this.uuid = UUID.randomUUID().toString();
}
public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId,
long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state,
String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) {
this.uuid = UUID.randomUUID().toString();
this.name = name;
this.description = description;
this.zoneId = zoneId;
this.kubernetesVersionId = kubernetesVersionId;
this.serviceOfferingId = serviceOfferingId;
this.templateId = templateId;
this.networkId = networkId;
this.domainId = domainId;
this.accountId = accountId;
this.masterNodeCount = masterNodeCount;
this.nodeCount = nodeCount;
this.state = state;
this.keyPair = keyPair;
this.cores = cores;
this.memory = memory;
if (nodeRootDiskSize != null && nodeRootDiskSize > 0) {
this.nodeRootDiskSize = nodeRootDiskSize;
}
this.endpoint = endpoint;
this.checkForGc = false;
}
@Override
public Class<?> getEntityType() {
return KubernetesCluster.class;
}
}

View File

@ -0,0 +1,30 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
/**
* KubernetesClusterVmMap will describe mapping of ID of KubernetesCuster
* and ID of its VirtualMachine. A KubernetesCluster can have multiple VMs
* deployed for it therefore a list of KubernetesClusterVmMap are associated
* with a KubernetesCluster.
* A particular VM can be deployed only for a single KubernetesCluster.
*/
public interface KubernetesClusterVmMap {
long getId();
long getClusterId();
long getVmId();
}

View File

@ -0,0 +1,76 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
@Entity
@Table(name = "kubernetes_cluster_vm_map")
public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
@Override
public long getId() {
return id;
}
@Override
public long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
}
@Override
public long getVmId() {
return vmId;
}
public void setVmId(long vmId) {
this.vmId = vmId;
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
long id;
@Column(name = "cluster_id")
long clusterId;
@Column(name = "vm_id")
long vmId;
public KubernetesClusterVmMapVO() {
}
public KubernetesClusterVmMapVO(long clusterId, long vmId) {
this.vmId = vmId;
this.clusterId = clusterId;
}
}

View File

@ -0,0 +1,380 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.ca.CAManager;
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.VlanDao;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
import com.cloud.network.IpAddress;
import com.cloud.network.IpAddressManager;
import com.cloud.network.Network;
import com.cloud.network.NetworkModel;
import com.cloud.network.dao.NetworkDao;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.Storage;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.SSHKeyPairDao;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.fsm.NoTransitionException;
import com.cloud.utils.fsm.StateMachine2;
import com.cloud.vm.UserVmService;
import com.cloud.vm.dao.UserVmDao;
import com.google.common.base.Strings;
public class KubernetesClusterActionWorker {
public static final String CLUSTER_NODE_VM_USER = "core";
public static final int CLUSTER_API_PORT = 6443;
public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222;
protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterActionWorker.class);
protected StateMachine2<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> _stateMachine = KubernetesCluster.State.getStateMachine();
@Inject
protected CAManager caManager;
@Inject
protected ConfigurationDao configurationDao;
@Inject
protected DataCenterDao dataCenterDao;
@Inject
protected AccountDao accountDao;
@Inject
protected IpAddressManager ipAddressManager;
@Inject
protected NetworkOrchestrationService networkMgr;
@Inject
protected NetworkDao networkDao;
@Inject
protected NetworkModel networkModel;
@Inject
protected ServiceOfferingDao serviceOfferingDao;
@Inject
protected SSHKeyPairDao sshKeyPairDao;
@Inject
protected VMTemplateDao templateDao;
@Inject
protected TemplateApiService templateService;
@Inject
protected UserVmDao userVmDao;
@Inject
protected UserVmService userVmService;
@Inject
protected VlanDao vlanDao;
protected KubernetesClusterDao kubernetesClusterDao;
protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao;
protected KubernetesClusterDetailsDao kubernetesClusterDetailsDao;
protected KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
protected KubernetesCluster kubernetesCluster;
protected Account owner;
protected File sshKeyFile;
protected String publicIpAddress;
protected int sshPort;
protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
this.kubernetesCluster = kubernetesCluster;
this.kubernetesClusterDao = clusterManager.kubernetesClusterDao;
this.kubernetesClusterDetailsDao = clusterManager.kubernetesClusterDetailsDao;
this.kubernetesClusterVmMapDao = clusterManager.kubernetesClusterVmMapDao;
this.kubernetesSupportedVersionDao = clusterManager.kubernetesSupportedVersionDao;
}
protected void init() {
this.owner = accountDao.findById(kubernetesCluster.getAccountId());
this.sshKeyFile = getManagementServerSshPublicKeyFile();
}
protected String readResourceFile(String resource) throws IOException {
return IOUtils.toString(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream(resource)), StringUtils.getPreferredCharset());
}
protected void logMessage(final Level logLevel, final String message, final Exception e) {
if (logLevel == Level.INFO) {
if (LOGGER.isInfoEnabled()) {
if (e != null) {
LOGGER.info(message, e);
} else {
LOGGER.info(message);
}
}
} else if (logLevel == Level.DEBUG) {
if (LOGGER.isDebugEnabled()) {
if (e != null) {
LOGGER.debug(message, e);
} else {
LOGGER.debug(message);
}
}
} else if (logLevel == Level.WARN) {
if (e != null) {
LOGGER.warn(message, e);
} else {
LOGGER.warn(message);
}
} else {
if (e != null) {
LOGGER.error(message, e);
} else {
LOGGER.error(message);
}
}
}
protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster,
final List<UserVm> clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
logMessage(logLevel, message, e);
stateTransitTo(kubernetesCluster.getId(), event);
detachIsoKubernetesVMs(clusterVMs);
if (e == null) {
throw new CloudRuntimeException(message);
}
throw new CloudRuntimeException(message, e);
}
protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
logMessage(logLevel, message, e);
if (kubernetesClusterId != null && event != null) {
stateTransitTo(kubernetesClusterId, event);
}
if (e == null) {
throw new CloudRuntimeException(message);
}
throw new CloudRuntimeException(message, e);
}
protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event) throws CloudRuntimeException {
logTransitStateAndThrow(logLevel, message, kubernetesClusterId, event, null);
}
protected void logAndThrow(final Level logLevel, final String message) throws CloudRuntimeException {
logTransitStateAndThrow(logLevel, message, null, null, null);
}
protected void logAndThrow(final Level logLevel, final String message, final Exception ex) throws CloudRuntimeException {
logTransitStateAndThrow(logLevel, message, null, null, ex);
}
protected File getManagementServerSshPublicKeyFile() {
boolean devel = Boolean.parseBoolean(configurationDao.getValue("developer"));
String keyFile = String.format("%s/.ssh/id_rsa", System.getProperty("user.home"));
if (devel) {
keyFile += ".cloud";
}
return new File(keyFile);
}
protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) {
return Transaction.execute(new TransactionCallback<KubernetesClusterVmMapVO>() {
@Override
public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) {
KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId);
kubernetesClusterVmMapDao.persist(newClusterVmMap);
return newClusterVmMap;
}
});
}
private UserVm fetchMasterVmIfMissing(final UserVm masterVm) {
if (masterVm != null) {
return masterVm;
}
List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
if (CollectionUtils.isEmpty(clusterVMs)) {
LOGGER.warn(String.format("Unable to retrieve VMs for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
return null;
}
List<Long> vmIds = new ArrayList<>();
for (KubernetesClusterVmMapVO vmMap : clusterVMs) {
vmIds.add(vmMap.getVmId());
}
Collections.sort(vmIds);
return userVmDao.findById(vmIds.get(0));
}
protected String getMasterVmPrivateIp() {
String ip = null;
UserVm vm = fetchMasterVmIfMissing(null);
if (vm != null) {
ip = vm.getPrivateIpAddress();
}
return ip;
}
protected Pair<String, Integer> getKubernetesClusterServerIpSshPort(UserVm masterVm) {
int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT;
KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS);
if (detail != null && !Strings.isNullOrEmpty(detail.getValue())) {
return new Pair<>(detail.getValue(), port);
}
Network network = networkDao.findById(kubernetesCluster.getNetworkId());
if (network == null) {
LOGGER.warn(String.format("Network for Kubernetes cluster ID: %s cannot be found", kubernetesCluster.getUuid()));
return new Pair<>(null, port);
}
if (Network.GuestType.Isolated.equals(network.getGuestType())) {
List<? extends IpAddress> addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
if (CollectionUtils.isEmpty(addresses)) {
LOGGER.warn(String.format("No public IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
return new Pair<>(null, port);
}
for (IpAddress address : addresses) {
if (address.isSourceNat()) {
return new Pair<>(address.getAddress().addr(), port);
}
}
LOGGER.warn(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
return new Pair<>(null, port);
} else if (Network.GuestType.Shared.equals(network.getGuestType())) {
port = 22;
masterVm = fetchMasterVmIfMissing(masterVm);
if (masterVm == null) {
LOGGER.warn(String.format("Unable to retrieve master VM for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
return new Pair<>(null, port);
}
return new Pair<>(masterVm.getPrivateIpAddress(), port);
}
LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
return new Pair<>(null, port);
}
protected void attachIsoKubernetesVMs(List<UserVm> clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException {
KubernetesSupportedVersion version = kubernetesSupportedVersion;
if (kubernetesSupportedVersion == null) {
version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
}
KubernetesCluster.Event failedEvent = KubernetesCluster.Event.OperationFailed;
KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId());
if (cluster != null && cluster.getState() == KubernetesCluster.State.Starting) {
failedEvent = KubernetesCluster.Event.CreateFailed;
}
if (version == null) {
logTransitStateAndThrow(Level.ERROR, String .format("Unable to find Kubernetes version for cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
}
VMTemplateVO iso = templateDao.findById(version.getIsoId());
if (iso == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not found.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
}
if (!iso.getFormat().equals(Storage.ImageFormat.ISO)) {
logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Invalid Binaries ISO.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
}
if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) {
logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster ID: %s. Binaries ISO not active.", kubernetesCluster.getUuid()), kubernetesCluster.getId(), failedEvent);
}
for (UserVm vm : clusterVMs) {
try {
templateService.attachIso(iso.getId(), vm.getId());
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Attached binaries ISO for VM: %s in cluster: %s", vm.getUuid(), kubernetesCluster.getName()));
}
} catch (CloudRuntimeException ex) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM: %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex);
}
}
}
protected void attachIsoKubernetesVMs(List<UserVm> clusterVMs) throws CloudRuntimeException {
attachIsoKubernetesVMs(clusterVMs, null);
}
protected void detachIsoKubernetesVMs(List<UserVm> clusterVMs) {
for (UserVm vm : clusterVMs) {
boolean result = false;
try {
result = templateService.detachIso(vm.getId());
} catch (CloudRuntimeException ex) {
LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid()), ex);
}
if (result) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Detached Kubernetes binaries from VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
}
continue;
}
LOGGER.warn(String.format("Failed to detach binaries ISO from VM ID: %s in the Kubernetes cluster ID: %s ", vm.getUuid(), kubernetesCluster.getUuid()));
}
}
protected List<KubernetesClusterVmMapVO> getKubernetesClusterVMMaps() {
List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
if (!CollectionUtils.isEmpty(clusterVMs)) {
clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId())));
}
return clusterVMs;
}
protected List<UserVm> getKubernetesClusterVMs() {
List<UserVm> vmList = new ArrayList<>();
List<KubernetesClusterVmMapVO> clusterVMs = getKubernetesClusterVMMaps();
if (!CollectionUtils.isEmpty(clusterVMs)) {
for (KubernetesClusterVmMapVO vmMap : clusterVMs) {
vmList.add(userVmDao.findById(vmMap.getVmId()));
}
}
return vmList;
}
protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) {
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
try {
return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao);
} catch (NoTransitionException nte) {
LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster ID: %s in state %s on event %s", kubernetesCluster.getUuid(), kubernetesCluster.getState().toString(), e.toString()), nte);
return false;
}
}
}

View File

@ -0,0 +1,243 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.context.CallContext;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Level;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMap;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.dao.NetworkVO;
import com.cloud.network.rules.FirewallRule;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.User;
import com.cloud.uservm.UserVm;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.ReservationContext;
import com.cloud.vm.ReservationContextImpl;
import com.cloud.vm.UserVmVO;
public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceModifierActionWorker {
@Inject
protected AccountManager accountManager;
private List<KubernetesClusterVmMapVO> clusterVMs;
public KubernetesClusterDestroyWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
}
private void validateClusterSate() {
if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Running)
|| kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)
|| kubernetesCluster.getState().equals(KubernetesCluster.State.Alert)
|| kubernetesCluster.getState().equals(KubernetesCluster.State.Error)
|| kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) {
String msg = String.format("Cannot perform delete operation on cluster ID: %s in state: %s",kubernetesCluster.getUuid(), kubernetesCluster.getState());
LOGGER.warn(msg);
throw new PermissionDeniedException(msg);
}
}
private boolean destroyClusterVMs() {
boolean vmDestroyed = true;
if (!CollectionUtils.isEmpty(clusterVMs)) {
for (KubernetesClusterVmMapVO clusterVM : clusterVMs) {
long vmID = clusterVM.getVmId();
// delete only if VM exists and is not removed
UserVmVO userVM = userVmDao.findById(vmID);
if (userVM == null || userVM.isRemoved()) {
continue;
}
try {
UserVm vm = userVmService.destroyVm(vmID, true);
if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
LOGGER.warn(String.format("Unable to expunge VM '%s' ID: %s, destroying Kubernetes cluster will probably fail"
, vm.getInstanceName()
, vm.getUuid()));
}
kubernetesClusterVmMapDao.expunge(clusterVM.getId());
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Destroyed VM ID: %s as part of Kubernetes cluster ID: %s cleanup", vm.getUuid(), kubernetesCluster.getUuid()));
}
} catch (ResourceUnavailableException | ConcurrentOperationException e) {
LOGGER.warn(String.format("Failed to destroy VM ID: %s part of the Kubernetes cluster ID: %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getUuid(), kubernetesCluster.getUuid()), e);
return false;
}
}
}
return vmDestroyed;
}
private boolean updateKubernetesClusterEntryForGC() {
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
kubernetesClusterVO.setCheckForGc(true);
return kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
}
private void destroyKubernetesClusterNetwork() throws ManagementServerException {
NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId());
if (network != null && network.getRemoved() == null) {
Account owner = accountManager.getAccount(network.getAccountId());
User callerUser = accountManager.getActiveUser(CallContext.current().getCallingUserId());
ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner);
boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true);
if (!networkDestroyed) {
String msg = String.format("Failed to destroy network ID: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid());
LOGGER.warn(msg);
throw new ManagementServerException(msg);
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Destroyed network: %s as part of Kubernetes cluster ID: %s cleanup", network.getUuid(), kubernetesCluster.getUuid()));
}
}
}
private void deleteKubernetesClusterNetworkRules() throws ManagementServerException {
NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId());
if (network == null || !Network.GuestType.Isolated.equals(network.getGuestType())) {
return;
}
List<Long> removedVmIds = new ArrayList<>();
if (!CollectionUtils.isEmpty(clusterVMs)) {
for (KubernetesClusterVmMapVO clusterVM : clusterVMs) {
removedVmIds.add(clusterVM.getVmId());
}
}
IpAddress publicIp = getSourceNatIp(network);
if (publicIp == null) {
throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s", network.getUuid()));
}
try {
removeLoadBalancingRule(publicIp, network, owner, CLUSTER_API_PORT);
} catch (ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to KubernetesCluster load balancing rule for network ID: %s", network.getUuid()));
}
FirewallRule firewallRule = removeApiFirewallRule(publicIp);
if (firewallRule == null) {
logMessage(Level.WARN, "Firewall rule for API access can't be removed", null);
}
firewallRule = removeSshFirewallRule(publicIp);
if (firewallRule == null) {
logMessage(Level.WARN, "Firewall rule for SSH access can't be removed", null);
}
try {
removePortForwardingRules(publicIp, network, owner, removedVmIds);
} catch (ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to KubernetesCluster port forwarding rules for network ID: %s", network.getUuid()));
}
}
private void validateClusterVMsDestroyed() {
if(clusterVMs!=null && !clusterVMs.isEmpty()) { // Wait for few seconds to get all VMs really expunged
final int maxRetries = 3;
int retryCounter = 0;
while (retryCounter < maxRetries) {
boolean allVMsRemoved = true;
for (KubernetesClusterVmMap clusterVM : clusterVMs) {
UserVmVO userVM = userVmDao.findById(clusterVM.getVmId());
if (userVM != null && !userVM.isRemoved()) {
allVMsRemoved = false;
break;
}
}
if (allVMsRemoved) {
break;
}
try {
Thread.sleep(10000);
} catch (InterruptedException ie) {}
retryCounter++;
}
}
}
public boolean destroy() throws CloudRuntimeException {
init();
validateClusterSate();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Destroying Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested);
this.clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
boolean vmsDestroyed = destroyClusterVMs();
boolean cleanupNetwork = true;
final KubernetesClusterDetailsVO clusterDetails = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "networkCleanup");
if (clusterDetails != null) {
cleanupNetwork = Boolean.parseBoolean(clusterDetails.getValue());
}
// if there are VM's that were not expunged, we can not delete the network
if (vmsDestroyed) {
if (cleanupNetwork) {
validateClusterVMsDestroyed();
try {
destroyKubernetesClusterNetwork();
} catch (ManagementServerException e) {
String msg = String.format("Failed to destroy network of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid());
LOGGER.warn(msg, e);
updateKubernetesClusterEntryForGC();
throw new CloudRuntimeException(msg, e);
}
} else {
try {
deleteKubernetesClusterNetworkRules();
} catch (ManagementServerException e) {
String msg = String.format("Failed to remove network rules of Kubernetes cluster ID: %s", kubernetesCluster.getUuid());
LOGGER.warn(msg, e);
updateKubernetesClusterEntryForGC();
throw new CloudRuntimeException(msg, e);
}
}
} else {
String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster ID: %s cleanup", kubernetesCluster.getUuid());
LOGGER.warn(msg);
updateKubernetesClusterEntryForGC();
throw new CloudRuntimeException(msg);
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
boolean deleted = kubernetesClusterDao.remove(kubernetesCluster.getId());
if (!deleted) {
logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), null);
updateKubernetesClusterEntryForGC();
return false;
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Kubernetes cluster ID: %s is successfully deleted", kubernetesCluster.getUuid()));
}
return true;
}
}

View File

@ -0,0 +1,513 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd;
import org.apache.cloudstack.api.command.user.vm.StartVMCmd;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Level;
import com.cloud.capacity.CapacityManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.dao.FirewallRulesDao;
import com.cloud.network.dao.LoadBalancerDao;
import com.cloud.network.dao.LoadBalancerVO;
import com.cloud.network.firewall.FirewallService;
import com.cloud.network.lb.LoadBalancingRulesService;
import com.cloud.network.rules.FirewallRule;
import com.cloud.network.rules.FirewallRuleVO;
import com.cloud.network.rules.PortForwardingRuleVO;
import com.cloud.network.rules.RulesService;
import com.cloud.network.rules.dao.PortForwardingRulesDao;
import com.cloud.offering.ServiceOffering;
import com.cloud.resource.ResourceManager;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.SSHKeyPairVO;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackWithException;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.ExecutionException;
import com.cloud.utils.net.Ip;
import com.cloud.utils.net.NetUtils;
import com.cloud.vm.Nic;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.base.Strings;
public class KubernetesClusterResourceModifierActionWorker extends KubernetesClusterActionWorker {
@Inject
protected CapacityManager capacityManager;
@Inject
protected ClusterDao clusterDao;
@Inject
protected ClusterDetailsDao clusterDetailsDao;
@Inject
protected FirewallRulesDao firewallRulesDao;
@Inject
protected FirewallService firewallService;
@Inject
protected LoadBalancingRulesService lbService;
@Inject
protected RulesService rulesService;
@Inject
protected PortForwardingRulesDao portForwardingRulesDao;
@Inject
protected ResourceManager resourceManager;
@Inject
protected LoadBalancerDao loadBalancerDao;
@Inject
protected VMInstanceDao vmInstanceDao;
@Inject
protected UserVmManager userVmManager;
protected String kubernetesClusterNodeNamePrefix;
protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
}
protected void init() {
super.init();
kubernetesClusterNodeNamePrefix = getKubernetesClusterNodeNamePrefix();
}
private String getKubernetesNodeConfig(final String joinIp, final boolean ejectIso) throws IOException {
String k8sNodeConfig = readResourceFile("/conf/k8s-node.yml");
final String sshPubKey = "{{ k8s.ssh.pub.key }}";
final String joinIpKey = "{{ k8s_master.join_ip }}";
final String clusterTokenKey = "{{ k8s_master.cluster.token }}";
final String ejectIsoKey = "{{ k8s.eject.iso }}";
String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
String sshKeyPair = kubernetesCluster.getKeyPair();
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey);
k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp);
k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
/* genarate /.docker/config.json file on the nodes only if Kubernetes cluster is created to
* use docker private registry */
String dockerUserName = null;
String dockerPassword = null;
String dockerRegistryUrl = null;
String dockerRegistryEmail = null;
List<KubernetesClusterDetailsVO> details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId());
for (KubernetesClusterDetailsVO detail : details) {
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) {
dockerUserName = detail.getValue();
}
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) {
dockerPassword = detail.getValue();
}
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) {
dockerRegistryUrl = detail.getValue();
}
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_EMAIL)) {
dockerRegistryEmail = detail.getValue();
}
}
if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) {
// do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section
// optional or conditionally applied
String dockerConfigString = "write-files:\n" +
" - path: /.docker/config.json\n" +
" owner: core:core\n" +
" permissions: '0644'\n" +
" content: |\n" +
" {\n" +
" \"auths\": {\n" +
" {{docker.url}}: {\n" +
" \"auth\": {{docker.secret}},\n" +
" \"email\": {{docker.email}}\n" +
" }\n" +
" }\n" +
" }";
k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString);
final String dockerUrlKey = "{{docker.url}}";
final String dockerAuthKey = "{{docker.secret}}";
final String dockerEmailKey = "{{docker.email}}";
final String usernamePasswordKey = dockerUserName + ":" + dockerPassword;
String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(StringUtils.getPreferredCharset()));
k8sNodeConfig = k8sNodeConfig.replace(dockerUrlKey, "\"" + dockerRegistryUrl + "\"");
k8sNodeConfig = k8sNodeConfig.replace(dockerAuthKey, "\"" + base64Auth + "\"");
k8sNodeConfig = k8sNodeConfig.replace(dockerEmailKey, "\"" + dockerRegistryEmail + "\"");
}
return k8sNodeConfig;
}
protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
final int cpu_requested = offering.getCpu() * offering.getSpeed();
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
List<HostVO> hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId());
final Map<String, Pair<HostVO, Integer>> hosts_with_resevered_capacity = new ConcurrentHashMap<String, Pair<HostVO, Integer>>();
for (HostVO h : hosts) {
hosts_with_resevered_capacity.put(h.getUuid(), new Pair<HostVO, Integer>(h, 0));
}
boolean suitable_host_found = false;
for (int i = 1; i <= nodesCount + 1; i++) {
suitable_host_found = false;
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
Pair<HostVO, Integer> hp = hostEntry.getValue();
HostVO h = hp.first();
int reserved = hp.second();
reserved++;
ClusterVO cluster = clusterDao.findById(h.getClusterId());
ClusterDetailsVO cluster_detail_cpu = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", h.getUuid(), reserved));
}
if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved));
}
hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
suitable_host_found = true;
break;
}
}
if (!suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d", zone.getUuid(), i));
}
break;
}
}
if (suitable_host_found) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid()));
}
return new DeployDestination(zone, null, null, null);
}
String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%1$s memory=%2$s)",
cpu_requested * nodesCount, ram_requested * nodesCount);
LOGGER.warn(msg);
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}
protected DeployDestination plan() throws InsufficientServerCapacityException {
ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking deployment destination for Kubernetes cluster ID: %s in zone ID: %s", kubernetesCluster.getUuid(), zone.getUuid()));
}
return plan(kubernetesCluster.getTotalNodeCount(), zone, offering);
}
protected void startKubernetesVM(final UserVm vm) throws ManagementServerException {
try {
StartVMCmd startVm = new StartVMCmd();
startVm = ComponentContext.inject(startVm);
Field f = startVm.getClass().getDeclaredField("id");
f.setAccessible(true);
f.set(startVm, vm.getId());
userVmService.startVirtualMachine(startVm);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Started VM ID: %s in the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
}
} catch (IllegalAccessException | NoSuchFieldException | ExecutionException |
ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) {
throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), ex);
}
UserVm startVm = userVmDao.findById(vm.getId());
if (!startVm.getState().equals(VirtualMachine.State.Running)) {
throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
}
protected List<UserVm> provisionKubernetesClusterNodeVms(final long nodeCount, final int offset, final String publicIpAddress) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
List<UserVm> nodes = new ArrayList<>();
for (int i = offset + 1; i <= nodeCount; i++) {
UserVm vm = createKubernetesNode(publicIpAddress, i);
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
startKubernetesVM(vm);
vm = userVmDao.findById(vm.getId());
if (vm == null) {
throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
}
nodes.add(vm);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned node VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
}
}
return nodes;
}
protected List<UserVm> provisionKubernetesClusterNodeVms(final long nodeCount, final String publicIpAddress) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress);
}
protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
UserVm nodeVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Account owner = accountDao.findById(kubernetesCluster.getAccountId());
Network.IpAddresses addrs = new Network.IpAddresses(null, null);
long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
Map<String, String> customParameterMap = new HashMap<String, String>();
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance));
String k8sNodeConfig = null;
try {
k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
} catch (IOException e) {
logAndThrow(Level.ERROR, "Failed to read Kubernetes node configuration file", e);
}
String base64UserData = Base64.encodeBase64String(k8sNodeConfig.getBytes(StringUtils.getPreferredCharset()));
nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
hostName, hostName, null, null, null,
null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
null, addrs, null, null, null, customParameterMap, null, null, null, null);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created node VM ID: %s, %s in the Kubernetes cluster ID: %s", nodeVm.getUuid(), hostName, kubernetesCluster.getUuid()));
}
return nodeVm;
}
protected IpAddress getSourceNatIp(Network network) {
List<? extends IpAddress> addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true);
if (CollectionUtils.isEmpty(addresses)) {
return null;
}
for (IpAddress address : addresses) {
if (address.isSourceNat()) {
return address;
}
}
return null;
}
protected void provisionFirewallRules(final IpAddress publicIp, final Account account, int startPort, int endPort) throws NoSuchFieldException,
IllegalAccessException, ResourceUnavailableException, NetworkRuleConflictException {
List<String> sourceCidrList = new ArrayList<String>();
sourceCidrList.add("0.0.0.0/0");
CreateFirewallRuleCmd rule = new CreateFirewallRuleCmd();
rule = ComponentContext.inject(rule);
Field addressField = rule.getClass().getDeclaredField("ipAddressId");
addressField.setAccessible(true);
addressField.set(rule, publicIp.getId());
Field protocolField = rule.getClass().getDeclaredField("protocol");
protocolField.setAccessible(true);
protocolField.set(rule, "TCP");
Field startPortField = rule.getClass().getDeclaredField("publicStartPort");
startPortField.setAccessible(true);
startPortField.set(rule, startPort);
Field endPortField = rule.getClass().getDeclaredField("publicEndPort");
endPortField.setAccessible(true);
endPortField.set(rule, endPort);
Field cidrField = rule.getClass().getDeclaredField("cidrlist");
cidrField.setAccessible(true);
cidrField.set(rule, sourceCidrList);
firewallService.createIngressFirewallRule(rule);
firewallService.applyIngressFwRules(publicIp.getId(), account);
}
/**
* To provision SSH port forwarding rules for the given Kubernetes cluster
* for its given virtual machines
* @param publicIp
* @param network
* @param account
* @param List<Long> clusterVMIds (when empty then method must be called while
* down-scaling of the KubernetesCluster therefore no new rules
* to be added)
* @param firewallRuleSourcePortStart
* @throws ResourceUnavailableException
* @throws NetworkRuleConflictException
*/
protected void provisionSshPortForwardingRules(IpAddress publicIp, Network network, Account account,
List<Long> clusterVMIds, int firewallRuleSourcePortStart) throws ResourceUnavailableException,
NetworkRuleConflictException {
if (!CollectionUtils.isEmpty(clusterVMIds)) {
final long publicIpId = publicIp.getId();
final long networkId = network.getId();
final long accountId = account.getId();
final long domainId = account.getDomainId();
for (int i = 0; i < clusterVMIds.size(); ++i) {
long vmId = clusterVMIds.get(i);
Nic vmNic = networkModel.getNicInNetwork(vmId, networkId);
final Ip vmIp = new Ip(vmNic.getIPv4Address());
final long vmIdFinal = vmId;
final int srcPortFinal = firewallRuleSourcePortStart + i;
PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>() {
@Override
public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
PortForwardingRuleVO newRule =
new PortForwardingRuleVO(null, publicIpId,
srcPortFinal, srcPortFinal,
vmIp,
22, 22,
"tcp", networkId, accountId, domainId, vmIdFinal);
newRule.setDisplay(true);
newRule.setState(FirewallRule.State.Add);
newRule = portForwardingRulesDao.persist(newRule);
return newRule;
}
});
rulesService.applyPortForwardingRules(publicIp.getId(), account);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned SSH port forwarding rule from port %d to 22 on %s to the VM IP : %s in Kubernetes cluster ID: %s", srcPortFinal, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getUuid()));
}
}
}
}
protected FirewallRule removeApiFirewallRule(final IpAddress publicIp) {
FirewallRule rule = null;
List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall);
for (FirewallRuleVO firewallRule : firewallRules) {
if (firewallRule.getSourcePortStart() == CLUSTER_API_PORT &&
firewallRule.getSourcePortEnd() == CLUSTER_API_PORT) {
rule = firewallRule;
firewallService.revokeIngressFwRule(firewallRule.getId(), true);
break;
}
}
return rule;
}
protected FirewallRule removeSshFirewallRule(final IpAddress publicIp) {
FirewallRule rule = null;
List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall);
for (FirewallRuleVO firewallRule : firewallRules) {
if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT) {
rule = firewallRule;
firewallService.revokeIngressFwRule(firewallRule.getId(), true);
break;
}
}
return rule;
}
protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, final List<Long> removedVMIds) throws ResourceUnavailableException {
if (!CollectionUtils.isEmpty(removedVMIds)) {
for (Long vmId : removedVMIds) {
List<PortForwardingRuleVO> pfRules = portForwardingRulesDao.listByNetwork(network.getId());
for (PortForwardingRuleVO pfRule : pfRules) {
if (pfRule.getVirtualMachineId() == vmId) {
portForwardingRulesDao.remove(pfRule.getId());
break;
}
}
}
rulesService.applyPortForwardingRules(publicIp.getId(), account);
}
}
protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network,
final Account account, final int port) throws ResourceUnavailableException {
List<LoadBalancerVO> rules = loadBalancerDao.listByIpAddress(publicIp.getId());
for (LoadBalancerVO rule : rules) {
if (rule.getNetworkId() == network.getId() &&
rule.getAccountId() == account.getId() &&
rule.getSourcePortStart() == port &&
rule.getSourcePortEnd() == port) {
lbService.deleteLoadBalancerRule(rule.getId(), true);
break;
}
}
}
protected String getKubernetesClusterNodeNamePrefix() {
String prefix = kubernetesCluster.getName();
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
if (prefix.length() == 0) {
prefix = kubernetesCluster.getUuid();
}
prefix = "k8s-" + prefix;
}
if (prefix.length() > 40) {
prefix = prefix.substring(0, 40);
}
return prefix;
}
protected String getKubernetesClusterNodeAvailableName(final String hostName) {
String name = hostName;
int suffix = 1;
while (vmInstanceDao.findVMByHostName(name) != null) {
name = String.format("%s-%d", hostName, suffix);
suffix++;
}
return name;
}
}

View File

@ -0,0 +1,431 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.context.CallContext;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Level;
import com.cloud.dc.DataCenter;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.VirtualMachineMigrationException;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.rules.FirewallRule;
import com.cloud.offering.ServiceOffering;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.base.Strings;
public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker {
@Inject
protected VMInstanceDao vmInstanceDao;
private ServiceOffering serviceOffering;
private Long clusterSize;
private KubernetesCluster.State originalState;
private Network network;
private long scaleTimeoutTime;
public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster,
final ServiceOffering serviceOffering,
final Long clusterSize,
final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
this.serviceOffering = serviceOffering;
this.clusterSize = clusterSize;
this.originalState = kubernetesCluster.getState();
}
protected void init() {
super.init();
this.network = networkDao.findById(kubernetesCluster.getNetworkId());
}
private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message, final Exception e) throws CloudRuntimeException {
KubernetesCluster cluster = kubernetesClusterDao.findById(kubernetesCluster.getId());
if (cluster != null && KubernetesCluster.State.Scaling.equals(cluster.getState())) {
logTransitStateAndThrow(logLevel, message, kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
} else {
logAndThrow(logLevel, message, e);
}
}
private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final String message) throws CloudRuntimeException {
logTransitStateToFailedIfNeededAndThrow(logLevel, message, null);
}
/**
* Scale network rules for an existing Kubernetes cluster while scaling it
* Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n.
* Also remove port forwarding rules for removed virtual machines and create port-forwarding rule
* to forward public IP traffic to all node VMs' private IP.
* @param clusterVMIds
* @param removedVMIds
* @throws ManagementServerException
*/
private void scaleKubernetesClusterNetworkRules(final List<Long> clusterVMIds, final List<Long> removedVMIds) throws ManagementServerException {
if (!Network.GuestType.Isolated.equals(network.getGuestType())) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid()));
}
return;
}
IpAddress publicIp = getSourceNatIp(network);
if (publicIp == null) {
throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
}
// Remove existing SSH firewall rules
FirewallRule firewallRule = removeSshFirewallRule(publicIp);
if (firewallRule == null) {
throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned");
}
int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount());
// Provision new SSH firewall rules
try {
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s",
CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
try {
removePortForwardingRules(publicIp, network, owner, removedVMIds);
} catch (ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
try {
provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1);
} catch (ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
}
private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory,
final Long size, final Long serviceOfferingId) {
return Transaction.execute((TransactionCallback<KubernetesClusterVO>) status -> {
KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId());
updatedCluster.setCores(cores);
updatedCluster.setMemory(memory);
if (size != null) {
updatedCluster.setNodeCount(size);
}
if (serviceOfferingId != null) {
updatedCluster.setServiceOfferingId(serviceOfferingId);
}
kubernetesClusterDao.persist(updatedCluster);
return updatedCluster;
});
}
private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException {
final ServiceOffering serviceOffering = newServiceOffering == null ?
serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering;
final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId();
final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount());
final long cores = serviceOffering.getCpu() * size;
final long memory = serviceOffering.getRamSize() * size;
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId);
if (kubernetesClusterVO == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
return kubernetesClusterVO;
}
private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) {
File pkFile = getManagementServerSshPublicKeyFile();
int retryCounter = 0;
String hostName = userVm.getHostName();
if (!Strings.isNullOrEmpty(hostName)) {
hostName = hostName.toLowerCase();
}
while (retryCounter < retries) {
retryCounter++;
try {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
10000, 10000, 60000);
if (!result.first()) {
LOGGER.warn(String.format("Draining node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid()));
} else {
result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
pkFile, null, String.format("sudo kubectl delete node %s", hostName),
10000, 10000, 30000);
if (result.first()) {
return true;
} else {
LOGGER.warn(String.format("Deleting node: %s on VM ID: %s in Kubernetes cluster ID: %s unsuccessful", hostName, userVm.getUuid(), kubernetesCluster.getUuid()));
}
}
break;
} catch (Exception e) {
String msg = String.format("Failed to remove Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid());
LOGGER.warn(msg, e);
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ie) {
LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s on VM ID: %s removal", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie);
}
retryCounter++;
}
return false;
}
private void validateKubernetesClusterScaleOfferingParameters() throws CloudRuntimeException {
if (KubernetesCluster.State.Created.equals(originalState)) {
return;
}
final long originalNodeCount = kubernetesCluster.getTotalNodeCount();
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
if (vmList == null || vmList.isEmpty() || vmList.size() < originalNodeCount) {
logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, it is in unstable state as not enough existing VM instances found!", kubernetesCluster.getUuid()));
} else {
for (KubernetesClusterVmMapVO vmMapVO : vmList) {
VMInstanceVO vmInstance = vmInstanceDao.findById(vmMapVO.getVmId());
if (vmInstance != null && vmInstance.getState().equals(VirtualMachine.State.Running) &&
vmInstance.getHypervisorType() != Hypervisor.HypervisorType.XenServer &&
vmInstance.getHypervisorType() != Hypervisor.HypervisorType.VMware &&
vmInstance.getHypervisorType() != Hypervisor.HypervisorType.Simulator) {
logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling Kubernetes cluster with running VMs on hypervisor %s is not supported!", kubernetesCluster.getUuid(), vmInstance.getHypervisorType()));
}
}
}
}
private void validateKubernetesClusterScaleSizeParameters() throws CloudRuntimeException {
final long originalClusterSize = kubernetesCluster.getNodeCount();
if (network == null) {
logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster network not found", kubernetesCluster.getUuid()));
}
// Check capacity and transition state
final long newVmRequiredCount = clusterSize - originalClusterSize;
final ServiceOffering clusterServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (clusterServiceOffering == null) {
logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s, cluster service offering not found", kubernetesCluster.getUuid()));
}
if (newVmRequiredCount > 0) {
final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
try {
if (originalState.equals(KubernetesCluster.State.Running)) {
plan(newVmRequiredCount, zone, clusterServiceOffering);
} else {
plan(kubernetesCluster.getTotalNodeCount() + newVmRequiredCount, zone, clusterServiceOffering);
}
} catch (InsufficientCapacityException e) {
logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster ID: %s in zone ID: %s, insufficient capacity", kubernetesCluster.getUuid(), zone.getUuid()));
}
}
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
if (CollectionUtils.isEmpty(vmList) || vmList.size() < kubernetesCluster.getTotalNodeCount()) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, it is in unstable state as not enough existing VM instances found", kubernetesCluster.getUuid()));
}
}
private void scaleKubernetesClusterOffering() throws CloudRuntimeException {
validateKubernetesClusterScaleOfferingParameters();
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
}
if (KubernetesCluster.State.Created.equals(originalState)) {
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
return;
}
final long size = kubernetesCluster.getTotalNodeCount();
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
final long tobeScaledVMCount = Math.min(vmList.size(), size);
for (long i = 0; i < tobeScaledVMCount; i++) {
KubernetesClusterVmMapVO vmMapVO = vmList.get((int) i);
UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
boolean result = false;
try {
result = userVmManager.upgradeVirtualMachine(userVM.getId(), serviceOffering.getId(), new HashMap<String, String>());
} catch (ResourceUnavailableException | ManagementServerException | ConcurrentOperationException | VirtualMachineMigrationException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
if (!result) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, unable to scale cluster VM ID: %s", kubernetesCluster.getUuid(), userVM.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
if (System.currentTimeMillis() > scaleTimeoutTime) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
}
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
}
private void scaleDownKubernetesClusterSize() throws CloudRuntimeException {
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested);
}
final List<KubernetesClusterVmMapVO> originalVmList = getKubernetesClusterVMMaps();
int i = originalVmList.size() - 1;
List<Long> removedVmIds = new ArrayList<>();
while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) {
KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i);
UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, failed to remove Kubernetes node: %s running on VM ID: %s", kubernetesCluster.getUuid(), userVM.getHostName(), userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
// For removing port-forwarding network rules
removedVmIds.add(userVM.getId());
try {
UserVm vm = userVmService.destroyVm(userVM.getId(), true);
if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'."
, kubernetesCluster.getUuid()
, vm.getInstanceName()),
kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
} catch (ResourceUnavailableException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s"
, kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
kubernetesClusterVmMapDao.expunge(vmMapVO.getId());
if (System.currentTimeMillis() > scaleTimeoutTime) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster ID: %s failed, scaling action timed out", kubernetesCluster.getUuid()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
i--;
}
// Scale network rules to update firewall rule
try {
scaleKubernetesClusterNetworkRules(null, removedVmIds);
} catch (ManagementServerException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
}
private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException {
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
}
List<UserVm> clusterVMs = new ArrayList<>();
List<Long> clusterVMIds = new ArrayList<>();
try {
clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to provision node VM in the cluster", kubernetesCluster.getUuid()), e);
}
attachIsoKubernetesVMs(clusterVMs);
for (UserVm vm : clusterVMs) {
clusterVMIds.add(vm.getId());
}
try {
scaleKubernetesClusterNetworkRules(clusterVMIds, null);
} catch (ManagementServerException e) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to update network rules", kubernetesCluster.getUuid()), e);
}
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
kubernetesClusterVO.setNodeCount(clusterSize);
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort,
CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000);
detachIsoKubernetesVMs(clusterVMs);
if (!readyNodesCountValid) { // Scaling failed
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid()));
}
}
private void scaleKubernetesClusterSize() throws CloudRuntimeException {
validateKubernetesClusterScaleSizeParameters();
final long originalClusterSize = kubernetesCluster.getNodeCount();
final long newVmRequiredCount = clusterSize - originalClusterSize;
if (KubernetesCluster.State.Created.equals(originalState)) {
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), newVmRequiredCount > 0 ? KubernetesCluster.Event.ScaleUpRequested : KubernetesCluster.Event.ScaleDownRequested);
}
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
return;
}
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = publicIpSshPort.first();
sshPort = publicIpSshPort.second();
if (Strings.isNullOrEmpty(publicIpAddress)) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid()));
}
if (newVmRequiredCount < 0) { // downscale
scaleDownKubernetesClusterSize();
} else { // upscale, same node count handled above
scaleUpKubernetesClusterSize(newVmRequiredCount);
}
kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null);
}
public boolean scaleCluster() throws CloudRuntimeException {
init();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Scaling Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000;
final long originalClusterSize = kubernetesCluster.getNodeCount();
final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (existingServiceOffering == null) {
logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getUuid()));
}
final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId();
final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize;
final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize;
if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) {
if (newVMRequired > 0) {
scaleKubernetesClusterOffering();
scaleKubernetesClusterSize();
} else {
scaleKubernetesClusterSize();
scaleKubernetesClusterOffering();
}
} else if (serviceOfferingScalingNeeded) {
scaleKubernetesClusterOffering();
} else if (clusterSizeScalingNeeded) {
scaleKubernetesClusterSize();
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;
}
}

View File

@ -0,0 +1,640 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.IOException;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.framework.ca.Certificate;
import org.apache.cloudstack.utils.security.CertUtils;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Level;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Vlan;
import com.cloud.dc.VlanVO;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientAddressCapacityException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.KubernetesVersionManagerImpl;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.addr.PublicIp;
import com.cloud.network.rules.LoadBalancer;
import com.cloud.offering.ServiceOffering;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.SSHKeyPairVO;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.Ip;
import com.cloud.utils.net.NetUtils;
import com.cloud.vm.Nic;
import com.cloud.vm.ReservationContext;
import com.cloud.vm.ReservationContextImpl;
import com.cloud.vm.VirtualMachine;
import com.google.common.base.Strings;
public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
public KubernetesClusterStartWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
}
private Pair<String, Map<Long, Network.IpAddresses>> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException {
String masterIp = null;
Map<Long, Network.IpAddresses> requestedIps = null;
if (Network.GuestType.Shared.equals(network.getGuestType())) {
List<Long> vlanIds = new ArrayList<>();
List<VlanVO> vlans = vlanDao.listVlansByNetworkId(network.getId());
for (VlanVO vlan : vlans) {
vlanIds.add(vlan.getId());
}
PublicIp ip = ipAddressManager.getAvailablePublicIpAddressFromVlans(zone.getId(), null, account, Vlan.VlanType.DirectAttached, vlanIds,network.getId(), null, false);
if (ip != null) {
masterIp = ip.getAddress().toString();
}
requestedIps = new HashMap<>();
Ip ipAddress = ip.getAddress();
boolean isIp6 = ipAddress.isIp6();
requestedIps.put(network.getId(), new Network.IpAddresses(ipAddress.isIp4() ? ip.getAddress().addr() : null, null));
} else {
masterIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null);
}
return new Pair<>(masterIp, requestedIps);
}
private boolean isKubernetesVersionSupportsHA() {
boolean haSupported = false;
final KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
if (version != null) {
try {
if (KubernetesVersionManagerImpl.compareSemanticVersions(version.getSemanticVersion(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT) >= 0) {
haSupported = true;
}
} catch (IllegalArgumentException e) {
LOGGER.error(String.format("Unable to compare Kubernetes version for cluster version ID: %s with %s", version.getUuid(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT), e);
}
}
return haSupported;
}
private String getKubernetesMasterConfig(final String masterIp, final String serverIp,
final String hostName, final boolean haSupported,
final boolean ejectIso) throws IOException {
String k8sMasterConfig = readResourceFile("/conf/k8s-master.yml");
final String apiServerCert = "{{ k8s_master.apiserver.crt }}";
final String apiServerKey = "{{ k8s_master.apiserver.key }}";
final String caCert = "{{ k8s_master.ca.crt }}";
final String sshPubKey = "{{ k8s.ssh.pub.key }}";
final String clusterToken = "{{ k8s_master.cluster.token }}";
final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}";
final String ejectIsoKey = "{{ k8s.eject.iso }}";
final List<String> addresses = new ArrayList<>();
addresses.add(masterIp);
if (!serverIp.equals(masterIp)) {
addresses.add(serverIp);
}
final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes",
"kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"),
addresses, 3650, null);
final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate());
final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey());
final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates());
k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n "));
k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n "));
k8sMasterConfig = k8sMasterConfig.replace(caCert, tlsCaCert.replace("\n", "\n "));
String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
String sshKeyPair = kubernetesCluster.getKeyPair();
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey);
k8sMasterConfig = k8sMasterConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
String initArgs = "";
if (haSupported) {
initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ",
serverIp,
CLUSTER_API_PORT,
KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
}
initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp);
k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs);
k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
return k8sMasterConfig;
}
private UserVm createKubernetesMaster(final Network network, String serverIp) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
UserVm masterVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Pair<String, Map<Long, Network.IpAddresses>> ipAddresses = getKubernetesMasterIpAddresses(zone, network, owner);
String masterIp = ipAddresses.first();
Map<Long, Network.IpAddresses> requestedIps = ipAddresses.second();
if (Network.GuestType.Shared.equals(network.getGuestType()) && Strings.isNullOrEmpty(serverIp)) {
serverIp = masterIp;
}
Network.IpAddresses addrs = new Network.IpAddresses(masterIp, null);
long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
Map<String, String> customParameterMap = new HashMap<String, String>();
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = kubernetesClusterNodeNamePrefix + "-master";
if (kubernetesCluster.getMasterNodeCount() > 1) {
hostName += "-1";
}
hostName = getKubernetesClusterNodeAvailableName(hostName);
boolean haSupported = isKubernetesVersionSupportsHA();
String k8sMasterConfig = null;
try {
k8sMasterConfig = getKubernetesMasterConfig(masterIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
} catch (IOException e) {
logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e);
}
String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset()));
masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
hostName, hostName, null, null, null,
null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", masterVm.getUuid(), hostName, kubernetesCluster.getUuid()));
}
return masterVm;
}
private String getKubernetesAdditionalMasterConfig(final String joinIp, final boolean ejectIso) throws IOException {
String k8sMasterConfig = readResourceFile("/conf/k8s-master-add.yml");
final String joinIpKey = "{{ k8s_master.join_ip }}";
final String clusterTokenKey = "{{ k8s_master.cluster.token }}";
final String sshPubKey = "{{ k8s.ssh.pub.key }}";
final String clusterHACertificateKey = "{{ k8s_master.cluster.ha.certificate.key }}";
final String ejectIsoKey = "{{ k8s.eject.iso }}";
String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\"";
String sshKeyPair = kubernetesCluster.getKeyPair();
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey);
k8sMasterConfig = k8sMasterConfig.replace(joinIpKey, joinIp);
k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
return k8sMasterConfig;
}
private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
UserVm additionalMasterVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
VirtualMachineTemplate template = templateDao.findById(kubernetesCluster.getTemplateId());
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Network.IpAddresses addrs = new Network.IpAddresses(null, null);
long rootDiskSize = kubernetesCluster.getNodeRootDiskSize();
Map<String, String> customParameterMap = new HashMap<String, String>();
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1));
String k8sMasterConfig = null;
try {
k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(template.getHypervisorType()));
} catch (IOException e) {
logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e);
}
String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset()));
additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner,
hostName, hostName, null, null, null,
null, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
null, addrs, null, null, null, customParameterMap, null, null, null, null);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster ID: %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getUuid()));
}
return additionalMasterVm;
}
private UserVm provisionKubernetesClusterMasterVm(final Network network, final String publicIpAddress) throws
ManagementServerException, InsufficientCapacityException, ResourceUnavailableException {
UserVm k8sMasterVM = null;
k8sMasterVM = createKubernetesMaster(network, publicIpAddress);
addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId());
startKubernetesVM(k8sMasterVM);
k8sMasterVM = userVmDao.findById(k8sMasterVM.getId());
if (k8sMasterVM == null) {
throw new ManagementServerException(String.format("Failed to provision master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned the master VM ID: %s in to the Kubernetes cluster ID: %s", k8sMasterVM.getUuid(), kubernetesCluster.getUuid()));
}
return k8sMasterVM;
}
private List<UserVm> provisionKubernetesClusterAdditionalMasterVms(final String publicIpAddress) throws
InsufficientCapacityException, ManagementServerException, ResourceUnavailableException {
List<UserVm> additionalMasters = new ArrayList<>();
if (kubernetesCluster.getMasterNodeCount() > 1) {
for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) {
UserVm vm = null;
vm = createKubernetesAdditionalMaster(publicIpAddress, i);
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
startKubernetesVM(vm);
vm = userVmDao.findById(vm.getId());
if (vm == null) {
throw new ManagementServerException(String.format("Failed to provision additional master VM for Kubernetes cluster ID: %s" , kubernetesCluster.getUuid()));
}
additionalMasters.add(vm);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned additional master VM ID: %s in to the Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()));
}
}
}
return additionalMasters;
}
private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException {
final ReservationContext context = new ReservationContextImpl(null, null, null, owner);
Network network = networkDao.findById(kubernetesCluster.getNetworkId());
if (network == null) {
String msg = String.format("Network for Kubernetes cluster ID: %s not found", kubernetesCluster.getUuid());
LOGGER.warn(msg);
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
throw new ManagementServerException(msg);
}
try {
networkMgr.startNetwork(network.getId(), destination, context);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Network ID: %s is started for the Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
}
} catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) {
String msg = String.format("Failed to start Kubernetes cluster ID: %s as unable to start associated network ID: %s" , kubernetesCluster.getUuid(), network.getUuid());
LOGGER.error(msg, e);
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
throw new ManagementServerException(msg, e);
}
return network;
}
private void provisionLoadBalancerRule(final IpAddress publicIp, final Network network,
final Account account, final List<Long> clusterVMIds, final int port) throws NetworkRuleConflictException,
InsufficientAddressCapacityException {
LoadBalancer lb = lbService.createPublicLoadBalancerRule(null, "api-lb", "LB rule for API access",
port, port, port, port,
publicIp.getId(), NetUtils.TCP_PROTO, "roundrobin", network.getId(),
account.getId(), false, NetUtils.TCP_PROTO, true);
Map<Long, List<String>> vmIdIpMap = new HashMap<>();
for (int i = 0; i < kubernetesCluster.getMasterNodeCount(); ++i) {
List<String> ips = new ArrayList<>();
Nic masterVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId());
ips.add(masterVmNic.getIPv4Address());
vmIdIpMap.put(clusterVMIds.get(i), ips);
}
lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap);
}
/**
* Setup network rules for Kubernetes cluster
* Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes
* API server is running. Also create load balancing rule to forward public
* IP traffic to master VMs' private IP.
* Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n
* for SSH access. Also create port-forwarding rule to forward public IP traffic to all
* @param network
* @param clusterVMs
* @throws ManagementServerException
*/
private void setupKubernetesClusterNetworkRules(Network network, List<UserVm> clusterVMs) throws ManagementServerException {
if (!Network.GuestType.Isolated.equals(network.getGuestType())) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Network ID: %s for Kubernetes cluster ID: %s is not an isolated network, therefore, no need for network rules", network.getUuid(), kubernetesCluster.getUuid()));
}
return;
}
List<Long> clusterVMIds = new ArrayList<>();
for (UserVm vm : clusterVMs) {
clusterVMIds.add(vm.getId());
}
IpAddress publicIp = getSourceNatIp(network);
if (publicIp == null) {
throw new ManagementServerException(String.format("No source NAT IP addresses found for network ID: %s, Kubernetes cluster ID: %s", network.getUuid(), kubernetesCluster.getUuid()));
}
try {
provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s",
CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
try {
int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1;
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster ID: %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
// Load balancer rule fo API access for master node VMs
try {
provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT);
} catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) {
throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
// Port forwarding rule fo SSH access on each node VM
try {
provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT);
} catch (ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
}
private void startKubernetesClusterVMs() {
List <UserVm> clusterVms = getKubernetesClusterVMs();
for (final UserVm vm : clusterVms) {
if (vm == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
try {
startKubernetesVM(vm);
} catch (ManagementServerException ex) {
LOGGER.warn(String.format("Failed to start VM ID: %s in Kubernetes cluster ID: %s due to ", vm.getUuid(), kubernetesCluster.getUuid()) + ex);
// dont bail out here. proceed further to stop the reset of the VM's
}
}
for (final UserVm userVm : clusterVms) {
UserVm vm = userVmDao.findById(userVm.getId());
if (vm == null || !vm.getState().equals(VirtualMachine.State.Running)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
}
}
private boolean isKubernetesClusterKubeConfigAvailable(final long timeoutTime) {
if (Strings.isNullOrEmpty(publicIpAddress)) {
KubernetesClusterDetailsVO kubeConfigDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData");
if (kubeConfigDetail != null && !Strings.isNullOrEmpty(kubeConfigDetail.getValue())) {
return true;
}
}
String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime);
if (!Strings.isNullOrEmpty(kubeConfig)) {
final String masterVMPrivateIpAddress = getMasterVmPrivateIp();
if (!Strings.isNullOrEmpty(masterVMPrivateIpAddress)) {
kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", masterVMPrivateIpAddress, CLUSTER_API_PORT),
String.format("server: https://%s:%d", publicIpAddress, CLUSTER_API_PORT));
}
kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "kubeConfigData", Base64.encodeBase64String(kubeConfig.getBytes(StringUtils.getPreferredCharset())), false);
return true;
}
return false;
}
private boolean isKubernetesClusterDashboardServiceRunning(final boolean onCreate, final Long timeoutTime) {
if (!onCreate) {
KubernetesClusterDetailsVO dashboardServiceRunningDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "dashboardServiceRunning");
if (dashboardServiceRunningDetail != null && Boolean.parseBoolean(dashboardServiceRunningDetail.getValue())) {
return true;
}
}
if (KubernetesClusterUtil.isKubernetesClusterDashboardServiceRunning(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime, 15000)) {
kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "dashboardServiceRunning", String.valueOf(true), false);
return true;
}
return false;
}
private void updateKubernetesClusterEntryEndpoint() {
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
kubernetesClusterVO.setEndpoint(String.format("https://%s:%d/", publicIpAddress, CLUSTER_API_PORT));
kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
}
public boolean startKubernetesClusterOnCreate() {
init();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000;
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested);
DeployDestination dest = null;
try {
dest = plan();
} catch (InsufficientCapacityException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the cluster failed due to insufficient capacity in the Kubernetes cluster: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
Network network = null;
try {
network = startKubernetesClusterNetwork(dest);
} catch (ManagementServerException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as its network cannot be started", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = publicIpSshPort.first();
if (Strings.isNullOrEmpty(publicIpAddress) &&
(Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
}
List<UserVm> clusterVMs = new ArrayList<>();
UserVm k8sMasterVM = null;
try {
k8sMasterVM = provisionKubernetesClusterMasterVm(network, publicIpAddress);
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
clusterVMs.add(k8sMasterVM);
if (Strings.isNullOrEmpty(publicIpAddress)) {
publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sMasterVM);
publicIpAddress = publicIpSshPort.first();
if (Strings.isNullOrEmpty(publicIpAddress)) {
logTransitStateAndThrow(Level.WARN, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
}
}
try {
List<UserVm> additionalMasterVMs = provisionKubernetesClusterAdditionalMasterVms(publicIpAddress);
clusterVMs.addAll(additionalMasterVMs);
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional master VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
try {
List<UserVm> nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress);
clusterVMs.addAll(nodeVMs);
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Kubernetes cluster ID: %s VMs successfully provisioned", kubernetesCluster.getUuid()));
}
try {
setupKubernetesClusterNetworkRules(network, clusterVMs);
} catch (ManagementServerException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s, unable to setup network rules", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e);
}
attachIsoKubernetesVMs(clusterVMs);
if (!KubernetesClusterUtil.isKubernetesClusterMasterVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) {
String msg = String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to access master node VMs of the cluster", kubernetesCluster.getUuid());
if (kubernetesCluster.getMasterNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) {
msg = String.format("%s. Make sure external load-balancer has port forwarding rules for SSH access on ports %d-%d and API access on port %d",
msg,
CLUSTER_NODES_DEFAULT_START_SSH_PORT,
CLUSTER_NODES_DEFAULT_START_SSH_PORT + kubernetesCluster.getTotalNodeCount() - 1,
CLUSTER_API_PORT);
}
logTransitStateDetachIsoAndThrow(Level.ERROR, msg, kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null);
}
boolean k8sApiServerSetup = KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000);
if (!k8sApiServerSetup) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to provision API endpoint for the cluster", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.CreateFailed, null);
}
sshPort = publicIpSshPort.second();
updateKubernetesClusterEntryEndpoint();
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort,
CLUSTER_NODE_VM_USER, sshKeyFile, startTimeoutTime, 15000);
detachIsoKubernetesVMs(clusterVMs);
if (!readyNodesCountValid) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s as it does not have desired number of nodes in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
}
if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed);
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;
}
public boolean startStoppedKubernetesCluster() throws CloudRuntimeException {
init();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Starting Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000;
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested);
startKubernetesClusterVMs();
try {
InetAddress address = InetAddress.getByName(new URL(kubernetesCluster.getEndpoint()).getHost());
} catch (MalformedURLException | UnknownHostException ex) {
logTransitStateAndThrow(Level.ERROR, String.format("Kubernetes cluster ID: %s has invalid API endpoint. Can not verify if cluster is in ready state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
Pair<String, Integer> sshIpPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = sshIpPort.first();
sshPort = sshIpPort.second();
if (Strings.isNullOrEmpty(publicIpAddress)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s as no public IP found for the cluster" , kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, publicIpAddress, CLUSTER_API_PORT, startTimeoutTime, 15000)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to retrieve kube-config for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster ID: %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Kubernetes cluster ID: %s successfully started", kubernetesCluster.getUuid()));
}
return true;
}
public boolean reconcileAlertCluster() {
init();
final long startTimeoutTime = System.currentTimeMillis() + 3 * 60 * 1000;
List<KubernetesClusterVmMapVO> vmMapVOList = getKubernetesClusterVMMaps();
if (CollectionUtils.isEmpty(vmMapVOList) || vmMapVOList.size() != kubernetesCluster.getTotalNodeCount()) {
return false;
}
Pair<String, Integer> sshIpPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = sshIpPort.first();
sshPort = sshIpPort.second();
if (Strings.isNullOrEmpty(publicIpAddress)) {
return false;
}
long actualNodeCount = 0;
try {
actualNodeCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile);
} catch (Exception e) {
return false;
}
if (kubernetesCluster.getTotalNodeCount() != actualNodeCount) {
return false;
}
if (Strings.isNullOrEmpty(sshIpPort.first())) {
return false;
}
if (!KubernetesClusterUtil.isKubernetesClusterServerRunning(kubernetesCluster, sshIpPort.first(),
KubernetesClusterActionWorker.CLUSTER_API_PORT, startTimeoutTime, 0)) {
return false;
}
updateKubernetesClusterEntryEndpoint();
if (!isKubernetesClusterKubeConfigAvailable(startTimeoutTime)) {
return false;
}
if (!isKubernetesClusterDashboardServiceRunning(false, startTimeoutTime)) {
return false;
}
// mark the cluster to be running
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.RecoveryRequested);
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;
}
}

View File

@ -0,0 +1,62 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.util.List;
import org.apache.log4j.Level;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.uservm.UserVm;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
public class KubernetesClusterStopWorker extends KubernetesClusterActionWorker {
public KubernetesClusterStopWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
}
public boolean stop() throws CloudRuntimeException {
init();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Stopping Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested);
List<UserVm> clusterVMs = getKubernetesClusterVMs();
for (UserVm vm : clusterVMs) {
if (vm == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to find all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
try {
userVmService.stopVirtualMachine(vm.getId(), false);
} catch (ConcurrentOperationException ex) {
LOGGER.warn(String.format("Failed to stop VM ID: %s in Kubernetes cluster ID: %s", vm.getUuid(), kubernetesCluster.getUuid()), ex);
}
}
for (final UserVm userVm : clusterVMs) {
UserVm vm = userVmDao.findById(userVm.getId());
if (vm == null || !vm.getState().equals(VirtualMachine.State.Stopped)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to stop all VMs in Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;
}
}

View File

@ -0,0 +1,169 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Level;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.KubernetesVersionManagerImpl;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.ssh.SshHelper;
import com.google.common.base.Strings;
public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorker {
private List<UserVm> clusterVMs = new ArrayList<>();
private KubernetesSupportedVersion upgradeVersion;
private File upgradeScriptFile;
private long upgradeTimeoutTime;
public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster,
final KubernetesSupportedVersion upgradeVersion,
final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
this.upgradeVersion = upgradeVersion;
}
private void retrieveUpgradeScriptFile() {
try {
String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh");
upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh");
BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile));
upgradeScriptFileWriter.write(upgradeScriptData);
upgradeScriptFileWriter.close();
} catch (IOException e) {
logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to prepare upgrade script", kubernetesCluster.getUuid()), e);
}
}
private Pair<Boolean, String> runInstallScriptOnVM(final UserVm vm, final int index) throws Exception {
int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index;
String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress;
SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
"~/", upgradeScriptFile.getAbsolutePath(), "0755");
String cmdStr = String.format("sudo ./%s %s %s %s %s",
upgradeScriptFile.getName(),
upgradeVersion.getSemanticVersion(),
index == 0 ? "true" : "false",
KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false",
Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType()));
return SshHelper.sshExecute(publicIpAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
cmdStr,
10000, 10000, 10 * 60 * 1000);
}
private void upgradeKubernetesClusterNodes() {
Pair<Boolean, String> result = null;
for (int i = 0; i < clusterVMs.size(); ++i) {
UserVm vm = clusterVMs.get(i);
String hostName = vm.getHostName();
if (!Strings.isNullOrEmpty(hostName)) {
hostName = hostName.toLowerCase();
}
result = null;
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
}
try {
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
10000, 10000, 60000);
} catch (Exception e) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);
}
if (!result.first()) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to drain Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
if (System.currentTimeMillis() > upgradeTimeoutTime) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
try {
result = runInstallScriptOnVM(vm, i);
} catch (Exception e) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);
}
if (!result.first()) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to upgrade Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
if (System.currentTimeMillis() > upgradeTimeoutTime) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, upgrade action timed out", kubernetesCluster.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to uncordon Kubernetes node on VM ID: %s", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
if (i == 0) { // Wait for master to get in Ready state
if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %s, unable to get master Kubernetes node on VM ID: %s in ready state", kubernetesCluster.getUuid(), vm.getUuid()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
}
}
}
public boolean upgradeCluster() throws CloudRuntimeException {
init();
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Upgrading Kubernetes cluster ID: %s", kubernetesCluster.getUuid()));
}
upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000;
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = publicIpSshPort.first();
sshPort = publicIpSshPort.second();
if (Strings.isNullOrEmpty(publicIpAddress)) {
logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve associated public IP", kubernetesCluster.getUuid()));
}
clusterVMs = getKubernetesClusterVMs();
if (CollectionUtils.isEmpty(clusterVMs)) {
logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster ID: %s, unable to retrieve VMs for cluster", kubernetesCluster.getUuid()));
}
retrieveUpgradeScriptFile();
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested);
attachIsoKubernetesVMs(clusterVMs, upgradeVersion);
upgradeKubernetesClusterNodes();
detachIsoKubernetesVMs(clusterVMs);
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
kubernetesClusterVO.setKubernetesVersionId(upgradeVersion.getId());
boolean updated = kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO);
if (!updated) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
} else {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
}
return updated;
}
}

View File

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import java.util.List;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.fsm.StateDao;
public interface KubernetesClusterDao extends GenericDao<KubernetesClusterVO, Long>,
StateDao<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> {
List<KubernetesClusterVO> listByAccount(long accountId);
List<KubernetesClusterVO> findKubernetesClustersToGarbageCollect();
List<KubernetesClusterVO> findKubernetesClustersInState(KubernetesCluster.State state);
List<KubernetesClusterVO> listByNetworkId(long networkId);
List<KubernetesClusterVO> listAllByKubernetesVersion(long kubernetesVersionId);
}

View File

@ -0,0 +1,112 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import java.util.List;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
@Component
public class KubernetesClusterDaoImpl extends GenericDaoBase<KubernetesClusterVO, Long> implements KubernetesClusterDao {
private final SearchBuilder<KubernetesClusterVO> AccountIdSearch;
private final SearchBuilder<KubernetesClusterVO> GarbageCollectedSearch;
private final SearchBuilder<KubernetesClusterVO> StateSearch;
private final SearchBuilder<KubernetesClusterVO> SameNetworkSearch;
private final SearchBuilder<KubernetesClusterVO> KubernetesVersionSearch;
public KubernetesClusterDaoImpl() {
AccountIdSearch = createSearchBuilder();
AccountIdSearch.and("account", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
AccountIdSearch.done();
GarbageCollectedSearch = createSearchBuilder();
GarbageCollectedSearch.and("gc", GarbageCollectedSearch.entity().isCheckForGc(), SearchCriteria.Op.EQ);
GarbageCollectedSearch.and("state", GarbageCollectedSearch.entity().getState(), SearchCriteria.Op.EQ);
GarbageCollectedSearch.done();
StateSearch = createSearchBuilder();
StateSearch.and("state", StateSearch.entity().getState(), SearchCriteria.Op.EQ);
StateSearch.done();
SameNetworkSearch = createSearchBuilder();
SameNetworkSearch.and("network_id", SameNetworkSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
SameNetworkSearch.done();
KubernetesVersionSearch = createSearchBuilder();
KubernetesVersionSearch.and("kubernetesVersionId", KubernetesVersionSearch.entity().getKubernetesVersionId(), SearchCriteria.Op.EQ);
KubernetesVersionSearch.done();
}
@Override
public List<KubernetesClusterVO> listByAccount(long accountId) {
SearchCriteria<KubernetesClusterVO> sc = AccountIdSearch.create();
sc.setParameters("account", accountId);
return listBy(sc, null);
}
@Override
public List<KubernetesClusterVO> findKubernetesClustersToGarbageCollect() {
SearchCriteria<KubernetesClusterVO> sc = GarbageCollectedSearch.create();
sc.setParameters("gc", true);
sc.setParameters("state", KubernetesCluster.State.Destroying);
return listBy(sc);
}
@Override
public List<KubernetesClusterVO> findKubernetesClustersInState(KubernetesCluster.State state) {
SearchCriteria<KubernetesClusterVO> sc = StateSearch.create();
sc.setParameters("state", state);
return listBy(sc);
}
@Override
public boolean updateState(KubernetesCluster.State currentState, KubernetesCluster.Event event, KubernetesCluster.State nextState,
KubernetesCluster vo, Object data) {
// TODO: ensure this update is correct
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
KubernetesClusterVO ccVo = (KubernetesClusterVO)vo;
ccVo.setState(nextState);
super.update(ccVo.getId(), ccVo);
txn.commit();
return true;
}
@Override
public List<KubernetesClusterVO> listByNetworkId(long networkId) {
SearchCriteria<KubernetesClusterVO> sc = SameNetworkSearch.create();
sc.setParameters("network_id", networkId);
return this.listBy(sc);
}
@Override
public List<KubernetesClusterVO> listAllByKubernetesVersion(long kubernetesVersionId) {
SearchCriteria<KubernetesClusterVO> sc = KubernetesVersionSearch.create();
sc.setParameters("kubernetesVersionId", kubernetesVersionId);
return this.listBy(sc);
}
}

View File

@ -0,0 +1,28 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.utils.db.GenericDao;
public interface KubernetesClusterDetailsDao extends GenericDao<KubernetesClusterDetailsVO, Long>, ResourceDetailsDao<KubernetesClusterDetailsVO> {
}

View File

@ -0,0 +1,32 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
@Component
public class KubernetesClusterDetailsDaoImpl extends ResourceDetailsDaoBase<KubernetesClusterDetailsVO> implements KubernetesClusterDetailsDao {
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new KubernetesClusterDetailsVO(resourceId, key, value, display));
}
}

View File

@ -0,0 +1,26 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.utils.db.GenericDao;
import java.util.List;
public interface KubernetesClusterVmMapDao extends GenericDao<KubernetesClusterVmMapVO, Long> {
public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId);
}

View File

@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.dao;
import java.util.List;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Component
public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClusterVmMapVO, Long> implements KubernetesClusterVmMapDao {
private final SearchBuilder<KubernetesClusterVmMapVO> clusterIdSearch;
public KubernetesClusterVmMapDaoImpl() {
clusterIdSearch = createSearchBuilder();
clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
clusterIdSearch.done();
}
@Override
public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId) {
SearchCriteria<KubernetesClusterVmMapVO> sc = clusterIdSearch.create();
sc.setParameters("clusterId", clusterId);
return listBy(sc, null);
}
}

View File

@ -0,0 +1,311 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.utils;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.ssh.SshHelper;
import com.google.common.base.Strings;
public class KubernetesClusterUtil {
protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterUtil.class);
public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, String ipAddress, int port,
String user, File sshKeyFile, String nodeName) throws Exception {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
user, sshKeyFile, null,
String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()),
10000, 10000, 20000);
if (result.first() && nodeName.equals(result.second().trim())) {
return true;
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster ID: %s. Output: %s", nodeName, kubernetesCluster.getUuid(), result.second()));
}
return false;
}
public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port,
final String user, final File sshKeyFile, final String nodeName,
final long timeoutTime, final int waitDuration) {
while (System.currentTimeMillis() < timeoutTime) {
boolean ready = false;
try {
ready = isKubernetesClusterNodeReady(kubernetesCluster, ipAddress, port, user, sshKeyFile, nodeName);
} catch (Exception e) {
LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster ID: %s", nodeName, kubernetesCluster.getUuid()), e);
}
if (ready) {
return true;
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ie) {
LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s node: %s to become ready", kubernetesCluster.getUuid(), nodeName), ie);
}
}
return false;
}
/**
* Mark a given node in a given Kubernetes cluster as schedulable.
* kubectl uncordon command will be called through SSH using IP address and port of the host virtual machine or load balancer.
* Multiple retries with a given delay can be used.
* uncordon is required when a particular node in Kubernetes cluster is drained (usually during upgrade)
* @param kubernetesCluster
* @param ipAddress
* @param port
* @param user
* @param sshKeyFile
* @param userVm
* @param timeoutTime
* @param waitDuration
* @return
*/
public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kubernetesCluster,
final String ipAddress, final int port,
final String user, final File sshKeyFile,
final UserVm userVm, final long timeoutTime,
final int waitDuration) {
String hostName = userVm.getHostName();
if (!Strings.isNullOrEmpty(hostName)) {
hostName = hostName.toLowerCase();
}
while (System.currentTimeMillis() < timeoutTime) {
Pair<Boolean, String> result = null;
try {
result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null,
String.format("sudo kubectl uncordon %s", hostName),
10000, 10000, 30000);
if (result.first()) {
return true;
}
} catch (Exception e) {
LOGGER.warn(String.format("Failed to uncordon node: %s on VM ID: %s in Kubernetes cluster ID: %s", hostName, userVm.getUuid(), kubernetesCluster.getUuid()), e);
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ie) {
LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster ID: %s node: %s on VM ID: %s", kubernetesCluster.getUuid(), hostName, userVm.getUuid()), ie);
}
}
return false;
}
public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
final int port, final String user, final File sshKeyFile,
final String namespace, String serviceName) {
try {
String cmd = "sudo kubectl get pods --all-namespaces";
if (!Strings.isNullOrEmpty(namespace)) {
cmd = String.format("sudo kubectl get pods --namespace=%s", namespace);
}
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
sshKeyFile, null, cmd,
10000, 10000, 10000);
if (result.first() && !Strings.isNullOrEmpty(result.second())) {
String[] lines = result.second().split("\n");
for (String line :
lines) {
if (line.contains(serviceName) && line.contains("Running")) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Service : %s in namespace: %s for the Kubernetes cluster ID: %s is running", serviceName, namespace, kubernetesCluster.getUuid()));
}
return true;
}
}
}
} catch (Exception e) {
LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster ID: %s", serviceName, namespace, kubernetesCluster.getUuid()), e);
}
return false;
}
public static boolean isKubernetesClusterDashboardServiceRunning(final KubernetesCluster kubernetesCluster, String ipAddress,
final int port, final String user, final File sshKeyFile,
final long timeoutTime, final long waitDuration) {
boolean running = false;
// Check if dashboard service is up running.
while (System.currentTimeMillis() < timeoutTime) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster ID: %s to come up", kubernetesCluster.getUuid()));
}
if (isKubernetesClusterAddOnServiceRunning(kubernetesCluster, ipAddress, port, user, sshKeyFile, "kubernetes-dashboard", "kubernetes-dashboard")) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Dashboard service for the Kubernetes cluster ID: %s is in running state", kubernetesCluster.getUuid()));
}
running = true;
break;
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ex) {
LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s API dashboard service to be available", kubernetesCluster.getUuid()), ex);
}
}
return running;
}
public static String getKubernetesClusterConfig(final KubernetesCluster kubernetesCluster, final String ipAddress, final int port,
final String user, final File sshKeyFile, final long timeoutTime) {
String kubeConfig = "";
while (System.currentTimeMillis() < timeoutTime) {
try {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
sshKeyFile, null, "sudo cat /etc/kubernetes/admin.conf",
10000, 10000, 10000);
if (result.first() && !Strings.isNullOrEmpty(result.second())) {
kubeConfig = result.second();
break;
} else {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second()));
}
}
} catch (Exception e) {
LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
}
return kubeConfig;
}
public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, final String ipAddress,
final int port, final String user, final File sshKeyFile) throws Exception {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
user, sshKeyFile, null,
"sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l",
10000, 10000, 20000);
if (result.first()) {
return Integer.parseInt(result.second().trim().replace("\"", ""));
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster ID: %s. Output: %s", kubernetesCluster.getUuid(), result.second()));
}
}
return 0;
}
public static boolean isKubernetesClusterServerRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
final int port, final long timeoutTime, final long waitDuration) {
boolean k8sApiServerSetup = false;
while (System.currentTimeMillis() < timeoutTime) {
try {
String versionOutput = IOUtils.toString(new URL(String.format("https://%s:%d/version", ipAddress, port)), StringUtils.getPreferredCharset());
if (!Strings.isNullOrEmpty(versionOutput)) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Kubernetes cluster ID: %s API has been successfully provisioned, %s", kubernetesCluster.getUuid(), versionOutput));
}
k8sApiServerSetup = true;
break;
}
} catch (Exception e) {
LOGGER.warn(String.format("API endpoint for Kubernetes cluster ID: %s not available", kubernetesCluster.getUuid()), e);
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ie) {
LOGGER.error(String.format("Error while waiting for Kubernetes cluster ID: %s API endpoint to be available", kubernetesCluster.getUuid()), ie);
}
}
return k8sApiServerSetup;
}
public static boolean isKubernetesClusterMasterVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress,
final int port, final long timeoutTime) {
boolean masterVmRunning = false;
while (!masterVmRunning && System.currentTimeMillis() < timeoutTime) {
try (Socket socket = new Socket()) {
socket.connect(new InetSocketAddress(ipAddress, port), 10000);
masterVmRunning = true;
} catch (IOException e) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid()));
}
try {
Thread.sleep(10000);
} catch (InterruptedException ex) {
LOGGER.warn(String.format("Error while waiting for Kubernetes cluster ID: %s master node VMs to be accessible", kubernetesCluster.getUuid()), ex);
}
}
}
return masterVmRunning;
}
public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster,
final String ipAddress, final int port,
final String user, final File sshKeyFile,
final long timeoutTime, final long waitDuration) {
while (System.currentTimeMillis() < timeoutTime) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster ID: %s with total %d provisioned nodes", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount()));
}
try {
int nodesCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, ipAddress, port,
user, sshKeyFile);
if (nodesCount == kubernetesCluster.getTotalNodeCount()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Kubernetes cluster ID: %s has %d ready nodes now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount()));
}
return true;
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Kubernetes cluster ID: %s has total %d provisioned nodes while %d ready now", kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), nodesCount));
}
}
} catch (Exception e) {
LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster ID: %s", kubernetesCluster.getUuid()), e);
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ex) {
LOGGER.warn(String.format("Error while waiting during Kubernetes cluster ID: %s ready node check", kubernetesCluster.getUuid()), ex);
}
}
return false;
}
public static String generateClusterToken(final KubernetesCluster kubernetesCluster) {
String token = kubernetesCluster.getUuid();
token = token.replaceAll("-", "");
token = token.substring(0, 22);
token = token.substring(0, 6) + "." + token.substring(6);
return token;
}
public static String generateClusterHACertificateKey(final KubernetesCluster kubernetesCluster) {
String uuid = kubernetesCluster.getUuid();
StringBuilder token = new StringBuilder(uuid.replaceAll("-", ""));
while (token.length() < 64) {
token.append(token);
}
return token.toString().substring(0, 64);
}
}

View File

@ -0,0 +1,49 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
/**
* KubernetesSupportedVersion describes the properties of supported kubernetes version
*
*/
public interface KubernetesSupportedVersion extends InternalIdentity, Identity {
public enum State {
Disabled, Enabled
}
long getId();
String getName();
String getSemanticVersion();
long getIsoId();
Long getZoneId();
State getState();
/**
* @return minimum # of cpu.
*/
int getMinimumCpu();
/**
* @return minimum ram size in megabytes
*/
int getMinimumRamSize();
}

View File

@ -0,0 +1,168 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
import java.util.Date;
import java.util.UUID;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name = "kubernetes_supported_version")
public class KubernetesSupportedVersionVO implements KubernetesSupportedVersion {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "uuid")
private String uuid;
@Column(name = "name")
private String name;
@Column(name = "semantic_version")
private String semanticVersion;
@Column(name = "iso_id")
private long isoId;
@Column(name = "zone_id")
private Long zoneId;
@Column(name = "state")
@Enumerated(value = EnumType.STRING)
State state = State.Enabled;
@Column(name = "min_cpu")
private int minimumCpu;
@Column(name = "min_ram_size")
private int minimumRamSize;
@Column(name = GenericDao.CREATED_COLUMN)
Date created;
@Column(name = GenericDao.REMOVED_COLUMN)
Date removed;
public KubernetesSupportedVersionVO() {
this.uuid = UUID.randomUUID().toString();
}
public KubernetesSupportedVersionVO(String name, String semanticVersion, long isoId, Long zoneId,
int minimumCpu, int minimumRamSize) {
this.uuid = UUID.randomUUID().toString();
this.name = name;
this.semanticVersion = semanticVersion;
this.isoId = isoId;
this.zoneId = zoneId;
this.minimumCpu = minimumCpu;
this.minimumRamSize = minimumRamSize;
}
@Override
public long getId() {
return id;
}
@Override
public String getUuid() {
return uuid;
}
@Override
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String getSemanticVersion() {
return semanticVersion;
}
public void setSemanticVersion(String semanticVersion) {
this.semanticVersion = semanticVersion;
}
@Override
public long getIsoId() {
return isoId;
}
public void setIsoId(long isoId) {
this.isoId = isoId;
}
@Override
public Long getZoneId() {
return zoneId;
}
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
@Override
public State getState() {
return this.state;
}
public void setState(State state) {
this.state = state;
}
@Override
public int getMinimumCpu() {
return minimumCpu;
}
public void setMinimumCpu(int minimumCpu) {
this.minimumCpu = minimumCpu;
}
@Override
public int getMinimumRamSize() {
return minimumRamSize;
}
public void setMinimumRamSize(int minimumRamSize) {
this.minimumRamSize = minimumRamSize;
}
public Date getCreated() {
return created;
}
public Date getRemoved() {
return removed;
}
}

View File

@ -0,0 +1,24 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
public class KubernetesVersionEventTypes {
public static final String EVENT_KUBERNETES_VERSION_ADD = "KUBERNETES.VERSION.ADD";
public static final String EVENT_KUBERNETES_VERSION_DELETE = "KUBERNETES.VERSION.DELETE";
public static final String EVENT_KUBERNETES_VERSION_UPDATE = "KUBERNETES.VERSION.UPDATE";
}

View File

@ -0,0 +1,388 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.log4j.Logger;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.event.ActionEvent;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.AccountManager;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Strings;
public class KubernetesVersionManagerImpl extends ManagerBase implements KubernetesVersionService {
public static final Logger LOGGER = Logger.getLogger(KubernetesVersionManagerImpl.class.getName());
@Inject
private KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
@Inject
private KubernetesClusterDao kubernetesClusterDao;
@Inject
private AccountManager accountManager;
@Inject
private VMTemplateDao templateDao;
@Inject
private TemplateJoinDao templateJoinDao;
@Inject
private VMTemplateZoneDao templateZoneDao;
@Inject
private DataCenterDao dataCenterDao;
@Inject
private TemplateApiService templateService;
private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) {
KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse();
response.setObjectName("kubernetessupportedversion");
response.setId(kubernetesSupportedVersion.getUuid());
response.setName(kubernetesSupportedVersion.getName());
response.setSemanticVersion(kubernetesSupportedVersion.getSemanticVersion());
if (kubernetesSupportedVersion.getState() != null) {
response.setState(kubernetesSupportedVersion.getState().toString());
}
response.setMinimumCpu(kubernetesSupportedVersion.getMinimumCpu());
response.setMinimumRamSize(kubernetesSupportedVersion.getMinimumRamSize());
DataCenterVO zone = dataCenterDao.findById(kubernetesSupportedVersion.getZoneId());
if (zone != null) {
response.setZoneId(zone.getUuid());
response.setZoneName(zone.getName());
}
if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(),
KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) {
response.setSupportsHA(true);
} else {
response.setSupportsHA(false);
}
TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId());
if (template != null) {
response.setIsoId(template.getUuid());
response.setIsoName(template.getName());
response.setIsoState(template.getState().toString());
}
return response;
}
private ListResponse<KubernetesSupportedVersionResponse> createKubernetesSupportedVersionListResponse(List<KubernetesSupportedVersionVO> versions) {
List<KubernetesSupportedVersionResponse> responseList = new ArrayList<>();
for (KubernetesSupportedVersionVO version : versions) {
responseList.add(createKubernetesSupportedVersionResponse(version));
}
ListResponse<KubernetesSupportedVersionResponse> response = new ListResponse<>();
response.setResponses(responseList);
return response;
}
private static boolean isSemanticVersion(final String version) {
if(!version.matches("[0-9]+(\\.[0-9]+)*")) {
return false;
}
String[] parts = version.split("\\.");
if (parts.length < 3) {
return false;
}
return true;
}
private List <KubernetesSupportedVersionVO> filterKubernetesSupportedVersions(List <KubernetesSupportedVersionVO> versions, final String minimumSemanticVersion) {
if (!Strings.isNullOrEmpty(minimumSemanticVersion)) {
for (int i = versions.size() - 1; i >= 0; --i) {
KubernetesSupportedVersionVO version = versions.get(i);
try {
if (compareSemanticVersions(minimumSemanticVersion, version.getSemanticVersion()) > 0) {
versions.remove(i);
}
} catch (IllegalArgumentException e) {
LOGGER.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion));
versions.remove(i);
}
}
}
return versions;
}
private VirtualMachineTemplate registerKubernetesVersionIso(final Long zoneId, final String versionName, final String isoUrl, final String isoChecksum)throws IllegalAccessException, NoSuchFieldException,
IllegalArgumentException, ResourceAllocationException {
String isoName = String.format("%s-Kubernetes-Binaries-ISO", versionName);
RegisterIsoCmd registerIsoCmd = new RegisterIsoCmd();
registerIsoCmd = ComponentContext.inject(registerIsoCmd);
registerIsoCmd.setIsoName(isoName);
registerIsoCmd.setPublic(true);
if (zoneId != null) {
registerIsoCmd.setZoneId(zoneId);
}
registerIsoCmd.setDisplayText(isoName);
registerIsoCmd.setBootable(false);
registerIsoCmd.setUrl(isoUrl);
if (!Strings.isNullOrEmpty(isoChecksum)) {
registerIsoCmd.setChecksum(isoChecksum);
}
registerIsoCmd.setAccountName(accountManager.getSystemAccount().getAccountName());
registerIsoCmd.setDomainId(accountManager.getSystemAccount().getDomainId());
return templateService.registerIso(registerIsoCmd);
}
private void deleteKubernetesVersionIso(long templateId) throws IllegalAccessException, NoSuchFieldException,
IllegalArgumentException {
DeleteIsoCmd deleteIsoCmd = new DeleteIsoCmd();
deleteIsoCmd = ComponentContext.inject(deleteIsoCmd);
deleteIsoCmd.setId(templateId);
templateService.deleteIso(deleteIsoCmd);
}
public static int compareSemanticVersions(String v1, String v2) throws IllegalArgumentException {
if (Strings.isNullOrEmpty(v1) || Strings.isNullOrEmpty(v2)) {
throw new IllegalArgumentException(String.format("Invalid version comparision with versions %s, %s", v1, v2));
}
if(!isSemanticVersion(v1)) {
throw new IllegalArgumentException(String.format("Invalid version format, %s", v1));
}
if(!isSemanticVersion(v2)) {
throw new IllegalArgumentException(String.format("Invalid version format, %s", v2));
}
String[] thisParts = v1.split("\\.");
String[] thatParts = v2.split("\\.");
int length = Math.max(thisParts.length, thatParts.length);
for(int i = 0; i < length; i++) {
int thisPart = i < thisParts.length ?
Integer.parseInt(thisParts[i]) : 0;
int thatPart = i < thatParts.length ?
Integer.parseInt(thatParts[i]) : 0;
if(thisPart < thatPart)
return -1;
if(thisPart > thatPart)
return 1;
}
return 0;
}
/**
* Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion
* Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR.
* That is, MINOR versions cannot be skipped during upgrade.
* For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2
* @param currentVersion
* @param upgradeVersion
* @return
* @throws IllegalArgumentException
*/
public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException {
int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion);
if (versionDiff == 0) {
throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
} else if (versionDiff < 0) {
throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
}
String[] thisParts = currentVersion.split("\\.");
String[] thatParts = upgradeVersion.split("\\.");
int majorVerDiff = Integer.parseInt(thatParts[0]) - Integer.parseInt(thisParts[0]);
int minorVerDiff = Integer.parseInt(thatParts[1]) - Integer.parseInt(thisParts[1]);
if (majorVerDiff != 0 || minorVerDiff > 1) {
throw new IllegalArgumentException(String.format("Kubernetes clusters can be upgraded between next minor or patch version releases, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
}
return true;
}
@Override
public ListResponse<KubernetesSupportedVersionResponse> listKubernetesSupportedVersions(final ListKubernetesSupportedVersionsCmd cmd) {
if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
}
final Long versionId = cmd.getId();
final Long zoneId = cmd.getZoneId();
String minimumSemanticVersion = cmd.getMinimumSemanticVersion();
final Long minimumKubernetesVersionId = cmd.getMinimumKubernetesVersionId();
if (!Strings.isNullOrEmpty(minimumSemanticVersion) && minimumKubernetesVersionId != null) {
throw new CloudRuntimeException(String.format("Both parameters %s and %s can not be passed together", ApiConstants.MIN_SEMANTIC_VERSION, ApiConstants.MIN_KUBERNETES_VERSION_ID));
}
if (minimumKubernetesVersionId != null) {
KubernetesSupportedVersionVO minVersion = kubernetesSupportedVersionDao.findById(minimumKubernetesVersionId);
if (minVersion == null) {
throw new InvalidParameterValueException(String.format("Invalid %s passed", ApiConstants.MIN_KUBERNETES_VERSION_ID));
}
minimumSemanticVersion = minVersion.getSemanticVersion();
}
List <KubernetesSupportedVersionVO> versions = new ArrayList<>();
if (versionId != null) {
KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId);
if (version != null && (zoneId == null || version.getZoneId() == null || version.getZoneId().equals(zoneId))) {
versions.add(version);
}
} else {
if (zoneId == null) {
versions = kubernetesSupportedVersionDao.listAll();
} else {
versions = kubernetesSupportedVersionDao.listAllInZone(zoneId);
}
}
versions = filterKubernetesSupportedVersions(versions, minimumSemanticVersion);
return createKubernetesSupportedVersionListResponse(versions);
}
@Override
@ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_ADD, eventDescription = "Adding Kubernetes supported version")
public KubernetesSupportedVersionResponse addKubernetesSupportedVersion(final AddKubernetesSupportedVersionCmd cmd) {
if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
}
String name = cmd.getName();
final String semanticVersion = cmd.getSemanticVersion();
final Long zoneId = cmd.getZoneId();
final String isoUrl = cmd.getUrl();
final String isoChecksum = cmd.getChecksum();
final Integer minimumCpu = cmd.getMinimumCpu();
final Integer minimumRamSize = cmd.getMinimumRamSize();
if (minimumCpu == null || minimumCpu < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU) {
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_CPU_NUMBER));
}
if (minimumRamSize == null || minimumRamSize < KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.MIN_MEMORY));
}
if (compareSemanticVersions(semanticVersion, MIN_KUBERNETES_VERSION) < 0) {
throw new InvalidParameterValueException(String.format("New supported Kubernetes version cannot be added as %s is minimum version supported by Kubernetes Service", MIN_KUBERNETES_VERSION));
}
if (zoneId != null && dataCenterDao.findById(zoneId) == null) {
throw new InvalidParameterValueException("Invalid zone specified");
}
if (Strings.isNullOrEmpty(isoUrl)) {
throw new InvalidParameterValueException(String.format("Invalid URL for ISO specified, %s", isoUrl));
}
if (Strings.isNullOrEmpty(name)) {
name = String.format("v%s", semanticVersion);
if (zoneId != null) {
name = String.format("%s-%s", name, dataCenterDao.findById(zoneId).getName());
}
}
VMTemplateVO template = null;
try {
VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum);
template = templateDao.findById(vmTemplate.getId());
} catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException | ResourceAllocationException ex) {
LOGGER.error(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl), ex);
throw new CloudRuntimeException(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl));
}
KubernetesSupportedVersionVO supportedVersionVO = new KubernetesSupportedVersionVO(name, semanticVersion, template.getId(), zoneId, minimumCpu, minimumRamSize);
supportedVersionVO = kubernetesSupportedVersionDao.persist(supportedVersionVO);
return createKubernetesSupportedVersionResponse(supportedVersionVO);
}
@Override
@ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE, eventDescription = "Deleting Kubernetes supported version", async = true)
public boolean deleteKubernetesSupportedVersion(final DeleteKubernetesSupportedVersionCmd cmd) {
if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
}
final Long versionId = cmd.getId();
KubernetesSupportedVersion version = kubernetesSupportedVersionDao.findById(versionId);
if (version == null) {
throw new InvalidParameterValueException("Invalid Kubernetes version id specified");
}
List<KubernetesClusterVO> clusters = kubernetesClusterDao.listAllByKubernetesVersion(versionId);
if (clusters.size() > 0) {
throw new CloudRuntimeException(String.format("Unable to delete Kubernetes version ID: %s. Existing clusters currently using the version.", version.getUuid()));
}
VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId());
if (template == null) {
LOGGER.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid()));
}
if (template != null && template.getRemoved() == null) { // Delete ISO
try {
deleteKubernetesVersionIso(template.getId());
} catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) {
LOGGER.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex);
throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()));
}
}
return kubernetesSupportedVersionDao.remove(version.getId());
}
@Override
@ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_UPDATE, eventDescription = "Updating Kubernetes supported version")
public KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(final UpdateKubernetesSupportedVersionCmd cmd) {
if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
throw new CloudRuntimeException("Kubernetes Service plugin is disabled");
}
final Long versionId = cmd.getId();
KubernetesSupportedVersion.State state = null;
KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(versionId);
if (version == null) {
throw new InvalidParameterValueException("Invalid Kubernetes version id specified");
}
try {
state = KubernetesSupportedVersion.State.valueOf(cmd.getState());
} catch (IllegalArgumentException iae) {
throw new InvalidParameterValueException(String.format("Invalid value for %s parameter", ApiConstants.STATE));
}
if (!state.equals(version.getState())) {
version = kubernetesSupportedVersionDao.createForUpdate(version.getId());
version.setState(state);
if (!kubernetesSupportedVersionDao.update(version.getId(), version)) {
throw new CloudRuntimeException(String.format("Failed to update Kubernetes supported version ID: %s", version.getUuid()));
}
version = kubernetesSupportedVersionDao.findById(versionId);
}
return createKubernetesSupportedVersionResponse(version);
}
@Override
public List<Class<?>> getCommands() {
List<Class<?>> cmdList = new ArrayList<Class<?>>();
if (!KubernetesClusterService.KubernetesServiceEnabled.value()) {
return cmdList;
}
cmdList.add(AddKubernetesSupportedVersionCmd.class);
cmdList.add(ListKubernetesSupportedVersionsCmd.class);
cmdList.add(DeleteKubernetesSupportedVersionCmd.class);
cmdList.add(UpdateKubernetesSupportedVersionCmd.class);
return cmdList;
}
}

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.ListResponse;
import com.cloud.utils.component.PluggableService;
import com.cloud.utils.exception.CloudRuntimeException;
public interface KubernetesVersionService extends PluggableService {
static final String MIN_KUBERNETES_VERSION = "1.11.0";
ListResponse<KubernetesSupportedVersionResponse> listKubernetesSupportedVersions(ListKubernetesSupportedVersionsCmd cmd);
KubernetesSupportedVersionResponse addKubernetesSupportedVersion(AddKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
boolean deleteKubernetesSupportedVersion(DeleteKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
KubernetesSupportedVersionResponse updateKubernetesSupportedVersion(UpdateKubernetesSupportedVersionCmd cmd) throws CloudRuntimeException;
}

View File

@ -0,0 +1,27 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version.dao;
import java.util.List;
import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
import com.cloud.utils.db.GenericDao;
public interface KubernetesSupportedVersionDao extends GenericDao<KubernetesSupportedVersionVO, Long> {
List<KubernetesSupportedVersionVO> listAllInZone(long dataCenterId);
}

View File

@ -0,0 +1,42 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version.dao;
import java.util.List;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchCriteria;
@Component
public class KubernetesSupportedVersionDaoImpl extends GenericDaoBase<KubernetesSupportedVersionVO, Long> implements KubernetesSupportedVersionDao {
public KubernetesSupportedVersionDaoImpl() {
}
@Override
public List<KubernetesSupportedVersionVO> listAllInZone(long dataCenterId) {
SearchCriteria<KubernetesSupportedVersionVO> sc = createSearchCriteria();
SearchCriteria<KubernetesSupportedVersionVO> scc = createSearchCriteria();
scc.addOr("zoneId", SearchCriteria.Op.EQ, dataCenterId);
scc.addOr("zoneId", SearchCriteria.Op.NULL);
sc.addAnd("zoneId", SearchCriteria.Op.SC, scc);
return listBy(sc);
}
}

View File

@ -0,0 +1,153 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.kubernetes.version;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.admin.AdminCmd;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.KubernetesVersionService;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Strings;
@APICommand(name = AddKubernetesSupportedVersionCmd.APINAME,
description = "Add a supported Kubernetes version",
responseObject = KubernetesSupportedVersionResponse.class,
responseView = ResponseObject.ResponseView.Full,
entityType = {KubernetesSupportedVersion.class},
authorized = {RoleType.Admin})
public class AddKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd {
public static final Logger LOGGER = Logger.getLogger(AddKubernetesSupportedVersionCmd.class.getName());
public static final String APINAME = "addKubernetesSupportedVersion";
@Inject
private KubernetesVersionService kubernetesVersionService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING,
description = "the name of the Kubernetes supported version")
private String name;
@Parameter(name = ApiConstants.SEMANTIC_VERSION, type = CommandType.STRING, required = true,
description = "the semantic version of the Kubernetes version")
private String semanticVersion;
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID,
entityType = ZoneResponse.class,
description = "the ID of the zone in which Kubernetes supported version will be available")
private Long zoneId;
@Parameter(name = ApiConstants.URL, type = CommandType.STRING,
description = "the URL of the binaries ISO for Kubernetes supported version")
private String url;
@Parameter(name = ApiConstants.CHECKSUM, type = CommandType.STRING,
description = "the checksum value of the binaries ISO. " + ApiConstants.CHECKSUM_PARAMETER_PREFIX_DESCRIPTION)
private String checksum;
@Parameter(name = ApiConstants.MIN_CPU_NUMBER, type = CommandType.INTEGER, required = true,
description = "the minimum number of CPUs to be set with the Kubernetes version")
private Integer minimumCpu;
@Parameter(name = ApiConstants.MIN_MEMORY, type = CommandType.INTEGER, required = true,
description = "the minimum RAM size in MB to be set with the Kubernetes version")
private Integer minimumRamSize;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getName() {
return name;
}
public String getSemanticVersion() {
if(Strings.isNullOrEmpty(semanticVersion)) {
throw new InvalidParameterValueException("Version can not be null");
}
if(!semanticVersion.matches("[0-9]+(\\.[0-9]+)*")) {
throw new IllegalArgumentException("Invalid version format. Semantic version needed");
}
return semanticVersion;
}
public Long getZoneId() {
return zoneId;
}
public String getUrl() {
return url;
}
public String getChecksum() {
return checksum;
}
public Integer getMinimumCpu() {
return minimumCpu;
}
public Integer getMinimumRamSize() {
return minimumRamSize;
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccountId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
KubernetesSupportedVersionResponse response = kubernetesVersionService.addKubernetesSupportedVersion(this);
if (response == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Kubernetes supported version");
}
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,104 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.kubernetes.version;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.admin.AdminCmd;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.KubernetesVersionEventTypes;
import com.cloud.kubernetes.version.KubernetesVersionService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = DeleteKubernetesSupportedVersionCmd.APINAME,
description = "Deletes a Kubernetes cluster",
responseObject = SuccessResponse.class,
entityType = {KubernetesSupportedVersion.class},
authorized = {RoleType.Admin})
public class DeleteKubernetesSupportedVersionCmd extends BaseAsyncCmd implements AdminCmd {
public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesSupportedVersionCmd.class.getName());
public static final String APINAME = "deleteKubernetesSupportedVersion";
@Inject
private KubernetesVersionService kubernetesVersionService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesSupportedVersionResponse.class,
description = "the ID of the Kubernetes supported version",
required = true)
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccountId();
}
@Override
public String getEventType() {
return KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_DELETE;
}
@Override
public String getEventDescription() {
return "Deleting Kubernetes supported version " + getId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
if (!kubernetesVersionService.deleteKubernetesSupportedVersion(this)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes supported version ID: %d", getId()));
}
SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,103 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.kubernetes.version;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.admin.AdminCmd;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.kubernetes.version.KubernetesVersionService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = UpdateKubernetesSupportedVersionCmd.APINAME,
description = "Update a supported Kubernetes version",
responseObject = KubernetesSupportedVersionResponse.class,
responseView = ResponseObject.ResponseView.Full,
entityType = {KubernetesSupportedVersion.class},
authorized = {RoleType.Admin})
public class UpdateKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd {
public static final Logger LOGGER = Logger.getLogger(UpdateKubernetesSupportedVersionCmd.class.getName());
public static final String APINAME = "updateKubernetesSupportedVersion";
@Inject
private KubernetesVersionService kubernetesVersionService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID,
entityType = KubernetesSupportedVersionResponse.class,
description = "the ID of the Kubernetes supported version",
required = true)
private Long id;
@Parameter(name = ApiConstants.STATE, type = CommandType.STRING,
description = "the enabled or disabled state of the Kubernetes supported version",
required = true)
private String state;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
public String getState() {
return state;
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return 0;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
KubernetesSupportedVersionResponse response = kubernetesVersionService.updateKubernetesSupportedVersion(this);
if (response == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Kubernetes supported version");
}
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,297 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.ACL;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandJobType;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCreateCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject.ResponseView;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.DomainResponse;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.NetworkResponse;
import org.apache.cloudstack.api.response.ProjectResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = CreateKubernetesClusterCmd.APINAME,
description = "Creates a Kubernetes cluster",
responseObject = KubernetesClusterResponse.class,
responseView = ResponseView.Restricted,
entityType = {KubernetesCluster.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName());
public static final String APINAME = "createKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "name for the Kubernetes cluster")
private String name;
@Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, required = true, description = "description for the Kubernetes cluster")
private String description;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true,
description = "availability zone in which Kubernetes cluster to be launched")
private Long zoneId;
@Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID, entityType = KubernetesSupportedVersionResponse.class, required = true,
description = "Kubernetes version with which cluster to be launched")
private Long kubernetesVersionId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
required = true, description = "the ID of the service offering for the virtual machines in the cluster.")
private Long serviceOfferingId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" +
" virtual machine. Must be used with domainId.")
private String accountName;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class,
description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.")
private Long domainId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class,
description = "Deploy cluster for the project")
private Long projectId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class,
description = "Network in which Kubernetes cluster is to be launched")
private Long networkId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING,
description = "name of the ssh key pair used to login to the virtual machines")
private String sshKeyPairName;
@Parameter(name=ApiConstants.MASTER_NODES, type = CommandType.LONG,
description = "number of Kubernetes cluster master nodes, default is 1")
private Long masterNodes;
@Parameter(name=ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, type = CommandType.STRING,
description = "external load balancer IP address while using shared network with Kubernetes HA cluster")
private String externalLoadBalancerIpAddress;
@Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
required = true, description = "number of Kubernetes cluster worker nodes")
private Long clusterSize;
@Parameter(name = ApiConstants.DOCKER_REGISTRY_USER_NAME, type = CommandType.STRING,
description = "user name for the docker image private registry")
private String dockerRegistryUserName;
@Parameter(name = ApiConstants.DOCKER_REGISTRY_PASSWORD, type = CommandType.STRING,
description = "password for the docker image private registry")
private String dockerRegistryPassword;
@Parameter(name = ApiConstants.DOCKER_REGISTRY_URL, type = CommandType.STRING,
description = "URL for the docker image private registry")
private String dockerRegistryUrl;
@Parameter(name = ApiConstants.DOCKER_REGISTRY_EMAIL, type = CommandType.STRING,
description = "email of the docker image private registry user")
private String dockerRegistryEmail;
@Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG,
description = "root disk size of root disk for each node")
private Long nodeRootDiskSize;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getAccountName() {
if (accountName == null) {
return CallContext.current().getCallingAccount().getAccountName();
}
return accountName;
}
public String getDisplayName() {
return description;
}
public Long getDomainId() {
if (domainId == null) {
return CallContext.current().getCallingAccount().getDomainId();
}
return domainId;
}
public Long getServiceOfferingId() {
return serviceOfferingId;
}
public Long getZoneId() {
return zoneId;
}
public Long getKubernetesVersionId() {
return kubernetesVersionId;
}
public Long getNetworkId() { return networkId;}
public String getName() {
return name;
}
public String getSSHKeyPairName() {
return sshKeyPairName;
}
public Long getMasterNodes() {
if (masterNodes == null) {
return 1L;
}
return masterNodes;
}
public String getExternalLoadBalancerIpAddress() {
return externalLoadBalancerIpAddress;
}
public Long getClusterSize() {
return clusterSize;
}
public String getDockerRegistryUserName() {
return dockerRegistryUserName;
}
public String getDockerRegistryPassword() {
return dockerRegistryPassword;
}
public String getDockerRegistryUrl() {
return dockerRegistryUrl;
}
public String getDockerRegistryEmail() {
return dockerRegistryEmail;
}
public Long getNodeRootDiskSize() {
return nodeRootDiskSize;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
public static String getResultObjectName() {
return "kubernetescluster";
}
@Override
public long getEntityOwnerId() {
Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true);
if (accountId == null) {
return CallContext.current().getCallingAccount().getId();
}
return accountId;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE;
}
@Override
public String getCreateEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE;
}
@Override
public String getCreateEventDescription() {
return "creating Kubernetes cluster";
}
@Override
public String getEventDescription() {
return "creating Kubernetes cluster. Cluster Id: " + getEntityId();
}
@Override
public ApiCommandJobType getInstanceType() {
return ApiCommandJobType.VirtualMachine;
}
@Override
public void execute() {
try {
if (!kubernetesClusterService.startKubernetesCluster(getEntityId(), true)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start Kubernetes cluster");
}
KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getEntityId());
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
@Override
public void create() throws CloudRuntimeException {
try {
KubernetesCluster cluster = kubernetesClusterService.createKubernetesCluster(this);
if (cluster == null) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Kubernetes cluster");
}
setEntityId(cluster.getId());
setEntityUuid(cluster.getUuid());
} catch (CloudRuntimeException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
}

View File

@ -0,0 +1,109 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = DeleteKubernetesClusterCmd.APINAME,
description = "Deletes a Kubernetes cluster",
responseObject = SuccessResponse.class,
entityType = {KubernetesCluster.class},
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class DeleteKubernetesClusterCmd extends BaseAsyncCmd {
public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesClusterCmd.class.getName());
public static final String APINAME = "deleteKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID,
type = CommandType.UUID,
entityType = KubernetesClusterResponse.class,
required = true,
description = "the ID of the Kubernetes cluster")
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
if (!kubernetesClusterService.deleteKubernetesCluster(id)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster ID: %d", getId()));
}
SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_DELETE;
}
@Override
public String getEventDescription() {
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
return String.format("Deleting Kubernetes cluster ID: %s", cluster.getUuid());
}
}

View File

@ -0,0 +1,98 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.user.Account;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = GetKubernetesClusterConfigCmd.APINAME,
description = "Get Kubernetes cluster config",
responseObject = KubernetesClusterConfigResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class GetKubernetesClusterConfigCmd extends BaseCmd {
public static final Logger LOGGER = Logger.getLogger(GetKubernetesClusterConfigCmd.class.getName());
public static final String APINAME = "getKubernetesClusterConfig";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class,
description = "the ID of the Kubernetes cluster")
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public long getEntityOwnerId() {
Account account = CallContext.current().getCallingAccount();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public void execute() throws ServerApiException {
try {
KubernetesClusterConfigResponse response = kubernetesClusterService.getKubernetesClusterConfig(this);
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
}

View File

@ -0,0 +1,100 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject.ResponseView;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.log4j.Logger;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = ListKubernetesClustersCmd.APINAME,
description = "Lists Kubernetes clusters",
responseObject = KubernetesClusterResponse.class,
responseView = ResponseView.Restricted,
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class ListKubernetesClustersCmd extends BaseListProjectAndAccountResourcesCmd {
public static final Logger LOGGER = Logger.getLogger(ListKubernetesClustersCmd.class.getName());
public static final String APINAME = "listKubernetesClusters";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class,
description = "the ID of the Kubernetes cluster")
private Long id;
@Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "state of the Kubernetes cluster")
private String state;
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the Kubernetes cluster" +
" (a substring match is made against the parameter value, data for all matching Kubernetes clusters will be returned)")
private String name;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
public String getState() {
return state;
}
public String getName() {
return name;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public void execute() throws ServerApiException {
try {
ListResponse<KubernetesClusterResponse> response = kubernetesClusterService.listKubernetesClusters(this);
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
}

View File

@ -0,0 +1,128 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.api.ACL;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = ScaleKubernetesClusterCmd.APINAME,
description = "Scales a created, running or stopped Kubernetes cluster",
responseObject = KubernetesClusterResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
entityType = {KubernetesCluster.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName());
public static final String APINAME = "scaleKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class,
description = "the ID of the Kubernetes cluster")
private Long id;
@ACL(accessType = SecurityChecker.AccessType.UseEntry)
@Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
description = "the ID of the service offering for the virtual machines in the cluster.")
private Long serviceOfferingId;
@Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
description = "number of Kubernetes cluster nodes")
private Long clusterSize;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
public Long getServiceOfferingId() {
return serviceOfferingId;
}
public Long getClusterSize() {
return clusterSize;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE;
}
@Override
public String getEventDescription() {
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
return String.format("Scaling Kubernetes cluster ID: %s", cluster.getUuid());
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
if (!kubernetesClusterService.scaleKubernetesCluster(this)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster ID: %d", getId()));
}
final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId());
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,120 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = StartKubernetesClusterCmd.APINAME, description = "Starts a stopped Kubernetes cluster",
responseObject = KubernetesClusterResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
entityType = {KubernetesCluster.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class StartKubernetesClusterCmd extends BaseAsyncCmd {
public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName());
public static final String APINAME = "startKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class, required = true,
description = "the ID of the Kubernetes cluster")
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_START;
}
@Override
public String getEventDescription() {
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
return String.format("Starting Kubernetes cluster ID: %s", cluster.getUuid());
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
public KubernetesCluster validateRequest() {
if (getId() == null || getId() < 1L) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid Kubernetes cluster ID provided");
}
final KubernetesCluster kubernetesCluster = kubernetesClusterService.findById(getId());
if (kubernetesCluster == null) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Given Kubernetes cluster was not found");
}
return kubernetesCluster;
}
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
final KubernetesCluster kubernetesCluster = validateRequest();
try {
if (!kubernetesClusterService.startKubernetesCluster(kubernetesCluster.getId(), false)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId()));
}
final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(kubernetesCluster.getId());
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = StopKubernetesClusterCmd.APINAME, description = "Stops a running Kubernetes cluster",
responseObject = SuccessResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
entityType = {KubernetesCluster.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class StopKubernetesClusterCmd extends BaseAsyncCmd {
public static final Logger LOGGER = Logger.getLogger(StopKubernetesClusterCmd.class.getName());
public static final String APINAME = "stopKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class, required = true,
description = "the ID of the Kubernetes cluster")
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_STOP;
}
@Override
public String getEventDescription() {
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
return String.format("Stopping Kubernetes cluster ID: %s", cluster.getUuid());
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
if (!kubernetesClusterService.stopKubernetesCluster(getId())) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId()));
}
final SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,118 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = UpgradeKubernetesClusterCmd.APINAME, description = "Upgrades a running Kubernetes cluster",
responseObject = KubernetesClusterResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
entityType = {KubernetesCluster.class},
requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd {
public static final Logger LOGGER = Logger.getLogger(UpgradeKubernetesClusterCmd.class.getName());
public static final String APINAME = "upgradeKubernetesCluster";
@Inject
public KubernetesClusterService kubernetesClusterService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesClusterResponse.class, required = true,
description = "the ID of the Kubernetes cluster")
private Long id;
@Parameter(name = ApiConstants.KUBERNETES_VERSION_ID, type = CommandType.UUID,
entityType = KubernetesSupportedVersionResponse.class, required = true,
description = "the ID of the Kubernetes version for upgrade")
private Long kubernetesVersionId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
public Long getKubernetesVersionId() {
return kubernetesVersionId;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_UPGRADE;
}
@Override
public String getEventDescription() {
KubernetesCluster cluster = _entityMgr.findById(KubernetesCluster.class, getId());
return String.format("Upgrading Kubernetes cluster ID: %s", cluster.getUuid());
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
try {
if (!kubernetesClusterService.upgradeKubernetesCluster(this)) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %d", getId()));
}
final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId());
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (CloudRuntimeException ex) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
}
}
}

View File

@ -0,0 +1,109 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.version;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.kubernetes.version.KubernetesVersionService;
import com.google.common.base.Strings;
@APICommand(name = ListKubernetesSupportedVersionsCmd.APINAME,
description = "Lists container clusters",
responseObject = KubernetesSupportedVersionResponse.class,
responseView = ResponseObject.ResponseView.Restricted,
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class ListKubernetesSupportedVersionsCmd extends BaseListCmd {
public static final Logger LOGGER = Logger.getLogger(ListKubernetesSupportedVersionsCmd.class.getName());
public static final String APINAME = "listKubernetesSupportedVersions";
@Inject
private KubernetesVersionService kubernetesVersionService;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID,
entityType = KubernetesSupportedVersionResponse.class,
description = "the ID of the Kubernetes supported version")
private Long id;
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID,
entityType = ZoneResponse.class,
description = "the ID of the zone in which Kubernetes supported version will be available")
private Long zoneId;
@Parameter(name = ApiConstants.MIN_SEMANTIC_VERSION, type = CommandType.STRING,
description = "the minimum semantic version for the Kubernetes supported version to be listed")
private String minimumSemanticVersion;
@Parameter(name = ApiConstants.MIN_KUBERNETES_VERSION_ID, type = CommandType.UUID,
entityType = KubernetesSupportedVersionResponse.class,
description = "the ID of the minimum Kubernetes supported version")
private Long minimumKubernetesVersionId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
public Long getZoneId() {
return zoneId;
}
public String getMinimumSemanticVersion() {
if(!Strings.isNullOrEmpty(minimumSemanticVersion) &&
!minimumSemanticVersion.matches("[0-9]+(\\.[0-9]+)*")) {
throw new IllegalArgumentException("Invalid version format");
}
return minimumSemanticVersion;
}
public Long getMinimumKubernetesVersionId() {
return minimumKubernetesVersionId;
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + "response";
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ServerApiException, ConcurrentOperationException {
ListResponse<KubernetesSupportedVersionResponse> response = kubernetesVersionService.listKubernetesSupportedVersions(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,61 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
public class KubernetesClusterConfigResponse extends BaseResponse {
@SerializedName(ApiConstants.ID)
@Param(description = "the id of the container cluster")
private String id;
@SerializedName(ApiConstants.NAME)
@Param(description = "Name of the container cluster")
private String name;
@SerializedName("configdata")
@Param(description = "the config data of the cluster")
private String configData;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getConfigData() {
return configData;
}
public void setConfigData(String configData) {
this.configData = configData;
}
}

View File

@ -0,0 +1,329 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import java.util.List;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@SuppressWarnings("unused")
@EntityReference(value = {KubernetesCluster.class})
public class KubernetesClusterResponse extends BaseResponse implements ControlledEntityResponse {
@SerializedName(ApiConstants.ID)
@Param(description = "the id of the Kubernetes cluster")
private String id;
@SerializedName(ApiConstants.NAME)
@Param(description = "the name of the Kubernetes cluster")
private String name;
@SerializedName(ApiConstants.DESCRIPTION)
@Param(description = "the description of the Kubernetes cluster")
private String description;
@SerializedName(ApiConstants.ZONE_ID)
@Param(description = "the name of the zone of the Kubernetes cluster")
private String zoneId;
@SerializedName(ApiConstants.ZONE_NAME)
@Param(description = "the name of the zone of the Kubernetes cluster")
private String zoneName;
@SerializedName(ApiConstants.SERVICE_OFFERING_ID)
@Param(description = "the ID of the service offering of the Kubernetes cluster")
private String serviceOfferingId;
@SerializedName("serviceofferingname")
@Param(description = "the name of the service offering of the Kubernetes cluster")
private String serviceOfferingName;
@SerializedName(ApiConstants.TEMPLATE_ID)
@Param(description = "the ID of the template of the Kubernetes cluster")
private String templateId;
@SerializedName(ApiConstants.NETWORK_ID)
@Param(description = "the ID of the network of the Kubernetes cluster")
private String networkId;
@SerializedName(ApiConstants.ASSOCIATED_NETWORK_NAME)
@Param(description = "the name of the network of the Kubernetes cluster")
private String associatedNetworkName;
@SerializedName(ApiConstants.KUBERNETES_VERSION_ID)
@Param(description = "the ID of the Kubernetes version for the Kubernetes cluster")
private String kubernetesVersionId;
@SerializedName(ApiConstants.KUBERNETES_VERSION_NAME)
@Param(description = "the name of the Kubernetes version for the Kubernetes cluster")
private String kubernetesVersionName;
@SerializedName(ApiConstants.ACCOUNT)
@Param(description = "the account associated with the Kubernetes cluster")
private String accountName;
@SerializedName(ApiConstants.PROJECT_ID)
@Param(description = "the project id of the Kubernetes cluster")
private String projectId;
@SerializedName(ApiConstants.PROJECT)
@Param(description = "the project name of the Kubernetes cluster")
private String projectName;
@SerializedName(ApiConstants.DOMAIN_ID)
@Param(description = "the ID of the domain in which the Kubernetes cluster exists")
private String domainId;
@SerializedName(ApiConstants.DOMAIN)
@Param(description = "the name of the domain in which the Kubernetes cluster exists")
private String domainName;
@SerializedName(ApiConstants.SSH_KEYPAIR)
@Param(description = "keypair details")
private String keypair;
@SerializedName(ApiConstants.MASTER_NODES)
@Param(description = "the master nodes count for the Kubernetes cluster")
private Long masterNodes;
@SerializedName(ApiConstants.SIZE)
@Param(description = "the size (worker nodes count) of the Kubernetes cluster")
private Long clusterSize;
@SerializedName(ApiConstants.STATE)
@Param(description = "the state of the Kubernetes cluster")
private String state;
@SerializedName(ApiConstants.CPU_NUMBER)
@Param(description = "the cpu cores of the Kubernetes cluster")
private String cores;
@SerializedName(ApiConstants.MEMORY)
@Param(description = "the memory the Kubernetes cluster")
private String memory;
@SerializedName(ApiConstants.END_POINT)
@Param(description = "URL end point for the Kubernetes cluster")
private String endpoint;
@SerializedName(ApiConstants.CONSOLE_END_POINT)
@Param(description = "URL end point for the Kubernetes cluster dashboard UI")
private String consoleEndpoint;
@SerializedName(ApiConstants.VIRTUAL_MACHINE_IDS)
@Param(description = "the list of virtualmachine IDs associated with this Kubernetes cluster")
private List<String> virtualMachineIds;
public KubernetesClusterResponse() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getZoneId() {
return zoneId;
}
public void setZoneId(String zoneId) {
this.zoneId = zoneId;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public String getServiceOfferingId() {
return serviceOfferingId;
}
public void setServiceOfferingId(String serviceOfferingId) {
this.serviceOfferingId = serviceOfferingId;
}
public String getTemplateId() {
return templateId;
}
public void setTemplateId(String templateId) {
this.templateId = templateId;
}
public String getNetworkId() {
return networkId;
}
public void setNetworkId(String networkId) {
this.networkId = networkId;
}
public String getAssociatedNetworkName() {
return associatedNetworkName;
}
public void setAssociatedNetworkName(String associatedNetworkName) {
this.associatedNetworkName = associatedNetworkName;
}
public String getKubernetesVersionId() {
return kubernetesVersionId;
}
public void setKubernetesVersionId(String kubernetesVersionId) {
this.kubernetesVersionId = kubernetesVersionId;
}
public String getKubernetesVersionName() {
return kubernetesVersionName;
}
public void setKubernetesVersionName(String kubernetesVersionName) {
this.kubernetesVersionName = kubernetesVersionName;
}
public String getProjectId() {
return projectId;
}
@Override
public void setAccountName(String accountName) {
this.accountName = accountName;
}
@Override
public void setProjectId(String projectId) {
this.projectId = projectId;
}
@Override
public void setProjectName(String projectName) {
this.projectName = projectName;
}
@Override
public void setDomainId(String domainId) {
this.domainId = domainId;
}
@Override
public void setDomainName(String domainName) {
this.domainName = domainName;
}
public String getKeypair() {
return keypair;
}
public void setKeypair(String keypair) {
this.keypair = keypair;
}
public Long getMasterNodes() {
return masterNodes;
}
public void setMasterNodes(Long masterNodes) {
this.masterNodes = masterNodes;
}
public Long getClusterSize() {
return clusterSize;
}
public void setClusterSize(Long clusterSize) {
this.clusterSize = clusterSize;
}
public String getCores() {
return cores;
}
public void setCores(String cores) {
this.cores = cores;
}
public String getMemory() {
return memory;
}
public void setMemory(String memory) {
this.memory = memory;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
public String getEndpoint() {
return endpoint;
}
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
public String getId() {
return this.id;
}
public void setId(String id) {
this.id = id;
}
public String getServiceOfferingName() {
return serviceOfferingName;
}
public void setServiceOfferingName(String serviceOfferingName) {
this.serviceOfferingName = serviceOfferingName;
}
public void setVirtualMachineIds(List<String> virtualMachineIds) {
this.virtualMachineIds = virtualMachineIds;
}
;
public List<String> getVirtualMachineIds() {
return virtualMachineIds;
}
}

View File

@ -0,0 +1,174 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@SuppressWarnings("unused")
@EntityReference(value = {KubernetesSupportedVersion.class})
public class KubernetesSupportedVersionResponse extends BaseResponse {
@SerializedName(ApiConstants.ID)
@Param(description = "the id of the Kubernetes supported version")
private String id;
@SerializedName(ApiConstants.NAME)
@Param(description = "Name of the Kubernetes supported version")
private String name;
@SerializedName(ApiConstants.SEMANTIC_VERSION)
@Param(description = "Kubernetes semantic version")
private String semanticVersion;
@SerializedName(ApiConstants.ISO_ID)
@Param(description = "the id of the binaries ISO for Kubernetes supported version")
private String isoId;
@SerializedName(ApiConstants.ISO_NAME)
@Param(description = "the name of the binaries ISO for Kubernetes supported version")
private String isoName;
@SerializedName(ApiConstants.ISO_STATE)
@Param(description = "the state of the binaries ISO for Kubernetes supported version")
private String isoState;
@SerializedName(ApiConstants.ZONE_ID)
@Param(description = "the id of the zone in which Kubernetes supported version is available")
private String zoneId;
@SerializedName(ApiConstants.ZONE_NAME)
@Param(description = "the name of the zone in which Kubernetes supported version is available")
private String zoneName;
@SerializedName(ApiConstants.SUPPORTS_HA)
@Param(description = "whether Kubernetes supported version supports HA, multi-master")
private Boolean supportsHA;
@SerializedName(ApiConstants.STATE)
@Param(description = "the enabled or disabled state of the Kubernetes supported version")
private String state;
@SerializedName(ApiConstants.MIN_CPU_NUMBER)
@Param(description = "the minimum number of CPUs needed for the Kubernetes supported version")
private Integer minimumCpu;
@SerializedName(ApiConstants.MIN_MEMORY)
@Param(description = "the minimum RAM size in MB needed for the Kubernetes supported version")
private Integer minimumRamSize;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getSemanticVersion() {
return semanticVersion;
}
public void setSemanticVersion(String semanticVersion) {
this.semanticVersion = semanticVersion;
}
public String getIsoId() {
return isoId;
}
public void setIsoId(String isoId) {
this.isoId = isoId;
}
public String getIsoName() {
return isoName;
}
public void setIsoName(String isoName) {
this.isoName = isoName;
}
public String getIsoState() {
return isoState;
}
public void setIsoState(String isoState) {
this.isoState = isoState;
}
public String getZoneId() {
return zoneId;
}
public void setZoneId(String zoneId) {
this.zoneId = zoneId;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
public Boolean isSupportsHA() {
return supportsHA;
}
public void setSupportsHA(Boolean supportsHA) {
this.supportsHA = supportsHA;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
public Integer getMinimumCpu() {
return minimumCpu;
}
public void setMinimumCpu(Integer minimumCpu) {
this.minimumCpu = minimumCpu;
}
public Integer getMinimumRamSize() {
return minimumRamSize;
}
public void setMinimumRamSize(Integer minimumRamSize) {
this.minimumRamSize = minimumRamSize;
}
}

View File

@ -0,0 +1,18 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=kubernetes-service
parent=compute

View File

@ -0,0 +1,37 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-3.0.xsd"
>
<bean id="kubernetesSupportedVersionDaoImpl" class="com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDaoImpl" />
<bean id="kubernetesVersionManagerImpl" class="com.cloud.kubernetes.version.KubernetesVersionManagerImpl" />
<bean id="kubernetesClusterDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterDaoImpl" />
<bean id="kubernetesClusterDetailsDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDaoImpl" />
<bean id="kubernetesClusterVmMapDaoImpl" class="com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDaoImpl" />
<bean id="kubernetesClusterManagerImpl" class="com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl" />
</beans>

View File

@ -0,0 +1,237 @@
#cloud-config
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
- path: /opt/bin/setup-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
ISO_MOUNT_DIR=/mnt/k8sdisk
BINARIES_DIR=${ISO_MOUNT_DIR}/
K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
ATTEMPT_ONLINE_INSTALL=false
setup_complete=false
OFFLINE_INSTALL_ATTEMPT_SLEEP=15
MAX_OFFLINE_INSTALL_ATTEMPTS=100
offline_attempts=1
MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
crucial_cmd_attempts=1
iso_drive_path=""
while true; do
if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
echo "Warning: Offline install timed out!"
break
fi
set +e
output=`blkid -o device -t TYPE=iso9660`
set -e
if [ "$output" != "" ]; then
while read -r line; do
if [ ! -d "${ISO_MOUNT_DIR}" ]; then
mkdir "${ISO_MOUNT_DIR}"
fi
retval=0
set +e
mount -o ro "${line}" "${ISO_MOUNT_DIR}"
retval=$?
set -e
if [ $retval -eq 0 ]; then
if [ -d "$BINARIES_DIR" ]; then
iso_drive_path="${line}"
break
else
umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
fi
fi
done <<< "$output"
fi
if [ -d "$BINARIES_DIR" ]; then
break
fi
echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
offline_attempts=$[$offline_attempts + 1]
done
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
if [ -d "$BINARIES_DIR" ]; then
### Binaries available offline ###
echo "Installing binaries from ${BINARIES_DIR}"
mkdir -p /opt/cni/bin
tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
mkdir -p /opt/bin
tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
output=`ls ${BINARIES_DIR}/docker/`
if [ "$output" != "" ]; then
while read -r line; do
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
break;
fi
retval=0
set +e
docker load < "${BINARIES_DIR}/docker/$line"
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
done <<< "$output"
setup_complete=true
fi
umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
eject "${iso_drive_path}"
fi
fi
if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
### Binaries not available offline ###
RELEASE="v1.16.3"
CNI_VERSION="v0.7.5"
CRICTL_VERSION="v1.16.0"
echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
chmod +x {kubeadm,kubelet,kubectl}
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
fi
systemctl enable kubelet && systemctl start kubelet
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Warning: kubeadm pull images failed after multiple tries!"
break;
fi
retval=0
set +e
kubeadm config images pull
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
fi
- path: /opt/bin/deploy-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
echo "setup-kube-system is running!"
exit 1
fi
modprobe ip_vs
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --control-plane --certificate-key {{ k8s_master.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
update:
group: stable
reboot-strategy: off

View File

@ -0,0 +1,294 @@
#cloud-config
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
- path: /etc/conf.d/nfs
permissions: '0644'
content: |
OPTS_RPC_MOUNTD=""
- path: /etc/kubernetes/pki/cloudstack/ca.crt
permissions: '0644'
content: |
{{ k8s_master.ca.crt }}
- path: /etc/kubernetes/pki/cloudstack/apiserver.crt
permissions: '0644'
content: |
{{ k8s_master.apiserver.crt }}
- path: /etc/kubernetes/pki/cloudstack/apiserver.key
permissions: '0600'
content: |
{{ k8s_master.apiserver.key }}
- path: /opt/bin/setup-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
ISO_MOUNT_DIR=/mnt/k8sdisk
BINARIES_DIR=${ISO_MOUNT_DIR}/
K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
ATTEMPT_ONLINE_INSTALL=false
setup_complete=false
OFFLINE_INSTALL_ATTEMPT_SLEEP=15
MAX_OFFLINE_INSTALL_ATTEMPTS=100
offline_attempts=1
MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
crucial_cmd_attempts=1
iso_drive_path=""
while true; do
if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
echo "Warning: Offline install timed out!"
break
fi
set +e
output=`blkid -o device -t TYPE=iso9660`
set -e
if [ "$output" != "" ]; then
while read -r line; do
if [ ! -d "${ISO_MOUNT_DIR}" ]; then
mkdir "${ISO_MOUNT_DIR}"
fi
retval=0
set +e
mount -o ro "${line}" "${ISO_MOUNT_DIR}"
retval=$?
set -e
if [ $retval -eq 0 ]; then
if [ -d "$BINARIES_DIR" ]; then
iso_drive_path="${line}"
break
else
umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
fi
fi
done <<< "$output"
fi
if [ -d "$BINARIES_DIR" ]; then
break
fi
echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
offline_attempts=$[$offline_attempts + 1]
done
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
if [ -d "$BINARIES_DIR" ]; then
### Binaries available offline ###
echo "Installing binaries from ${BINARIES_DIR}"
mkdir -p /opt/cni/bin
tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
mkdir -p /opt/bin
tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
output=`ls ${BINARIES_DIR}/docker/`
if [ "$output" != "" ]; then
while read -r line; do
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
break;
fi
retval=0
set +e
docker load < "${BINARIES_DIR}/docker/$line"
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
done <<< "$output"
setup_complete=true
fi
mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
eject "${iso_drive_path}"
fi
fi
if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
### Binaries not available offline ###
RELEASE="v1.16.3"
CNI_VERSION="v0.7.5"
CRICTL_VERSION="v1.16.0"
echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
chmod +x {kubeadm,kubelet,kubectl}
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
fi
systemctl enable kubelet && systemctl start kubelet
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Warning: kubeadm pull images failed after multiple tries!"
break;
fi
retval=0
set +e
kubeadm config images pull
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
fi
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Error: kubeadm init failed!"
exit 1
fi
retval=0
set +e
kubeadm init --token {{ k8s_master.cluster.token }} {{ k8s_master.cluster.initargs }}
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
- path: /opt/bin/deploy-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
echo "setup-kube-system is running!"
exit 1
fi
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
export KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p /root/.kube
cp -i /etc/kubernetes/admin.conf /root/.kube/config
chown $(id -u):$(id -g) /root/.kube/config
echo export PATH=\$PATH:/opt/bin >> /root/.bashrc
if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then
### Network, dashboard configs available offline ###
echo "Offline configs are available!"
kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml
kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml
rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
else
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
fi
kubectl create rolebinding admin-binding --role=admin --user=admin || true
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version
ExecStart=/opt/bin/deploy-kube-system
update:
group: stable
reboot-strategy: off

View File

@ -0,0 +1,237 @@
#cloud-config
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
- path: /opt/bin/setup-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
ISO_MOUNT_DIR=/mnt/k8sdisk
BINARIES_DIR=${ISO_MOUNT_DIR}/
K8S_CONFIG_SCRIPTS_COPY_DIR=/tmp/k8sconfigscripts/
ATTEMPT_ONLINE_INSTALL=false
setup_complete=false
OFFLINE_INSTALL_ATTEMPT_SLEEP=30
MAX_OFFLINE_INSTALL_ATTEMPTS=40
offline_attempts=1
MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3
EJECT_ISO_FROM_OS={{ k8s.eject.iso }}
crucial_cmd_attempts=1
iso_drive_path=""
while true; do
if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
echo "Warning: Offline install timed out!"
break
fi
set +e
output=`blkid -o device -t TYPE=iso9660`
set -e
if [ "$output" != "" ]; then
while read -r line; do
if [ ! -d "${ISO_MOUNT_DIR}" ]; then
mkdir "${ISO_MOUNT_DIR}"
fi
retval=0
set +e
mount -o ro "${line}" "${ISO_MOUNT_DIR}"
retval=$?
set -e
if [ $retval -eq 0 ]; then
if [ -d "$BINARIES_DIR" ]; then
iso_drive_path="${line}"
break
else
umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
fi
fi
done <<< "$output"
fi
if [ -d "$BINARIES_DIR" ]; then
break
fi
echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
offline_attempts=$[$offline_attempts + 1]
done
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
if [ -d "$BINARIES_DIR" ]; then
### Binaries available offline ###
echo "Installing binaries from ${BINARIES_DIR}"
mkdir -p /opt/cni/bin
tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
mkdir -p /opt/bin
tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
output=`ls ${BINARIES_DIR}/docker/`
if [ "$output" != "" ]; then
while read -r line; do
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Loading docker image ${BINARIES_DIR}/docker/$line failed!"
break;
fi
retval=0
set +e
docker load < "${BINARIES_DIR}/docker/$line"
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
done <<< "$output"
setup_complete=true
fi
umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
eject "${iso_drive_path}"
fi
fi
if [ "$setup_complete" = false ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
### Binaries not available offline ###
RELEASE="v1.16.3"
CNI_VERSION="v0.7.5"
CRICTL_VERSION="v1.16.0"
echo "Warning: ${BINARIES_DIR} not found. Will get binaries and docker images from Internet."
mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
mkdir -p /opt/bin
cd /opt/bin
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
chmod +x {kubeadm,kubelet,kubectl}
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
fi
systemctl enable kubelet && systemctl start kubelet
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
crucial_cmd_attempts=1
while true; do
if (( "$crucial_cmd_attempts" > "$MAX_SETUP_CRUCIAL_CMD_ATTEMPTS" )); then
echo "Warning: kubeadm pull images failed after multiple tries!"
break;
fi
retval=0
set +e
kubeadm config images pull
retval=$?
set -e
if [ $retval -eq 0 ]; then
break;
fi
crucial_cmd_attempts=$[$crucial_cmd_attempts + 1]
done
fi
- path: /opt/bin/deploy-kube-system
permissions: 0700
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
fi
if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
echo "setup-kube-system is running!"
exit 1
fi
modprobe ip_vs
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
kubeadm join {{ k8s_master.join_ip }}:6443 --token {{ k8s_master.cluster.token }} --discovery-token-unsafe-skip-ca-verification
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_master.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
update:
group: stable
reboot-strategy: off

View File

@ -0,0 +1,133 @@
#!/bin/bash -e
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Version 1.14 and below needs extra flags with kubeadm upgrade node
if [ $# -lt 4 ]; then
echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_MASTER IS_OLD_VERSION IS_EJECT_ISO"
echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false"
exit 1
fi
UPGRADE_VERSION="${1}"
IS_MAIN_MASTER=""
if [ $# -gt 1 ]; then
IS_MAIN_MASTER="${2}"
fi
IS_OLD_VERSION=""
if [ $# -gt 2 ]; then
IS_OLD_VERSION="${3}"
fi
EJECT_ISO_FROM_OS=false
if [ $# -gt 3 ]; then
EJECT_ISO_FROM_OS="${4}"
fi
export PATH=$PATH:/opt/bin
ISO_MOUNT_DIR=/mnt/k8sdisk
BINARIES_DIR=${ISO_MOUNT_DIR}/
OFFLINE_INSTALL_ATTEMPT_SLEEP=5
MAX_OFFLINE_INSTALL_ATTEMPTS=10
offline_attempts=1
iso_drive_path=""
while true; do
if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then
echo "Warning: Offline install timed out!"
break
fi
set +e
output=`blkid -o device -t TYPE=iso9660`
set -e
if [ "$output" != "" ]; then
while read -r line; do
if [ ! -d "${ISO_MOUNT_DIR}" ]; then
mkdir "${ISO_MOUNT_DIR}"
fi
retval=0
set +e
mount -o ro "${line}" "${ISO_MOUNT_DIR}"
retval=$?
set -e
if [ $retval -eq 0 ]; then
if [ -d "$BINARIES_DIR" ]; then
iso_drive_path="${line}"
break
else
umount "${line}" && rmdir "${ISO_MOUNT_DIR}"
fi
fi
done <<< "$output"
fi
if [ -d "$BINARIES_DIR" ]; then
break
fi
echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts"
sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP
offline_attempts=$[$offline_attempts + 1]
done
if [ -d "$BINARIES_DIR" ]; then
### Binaries available offline ###
echo "Installing binaries from ${BINARIES_DIR}"
cd /opt/bin
cp ${BINARIES_DIR}/k8s/kubeadm /opt/bin
chmod +x kubeadm
output=`ls ${BINARIES_DIR}/docker/`
if [ "$output" != "" ]; then
while read -r line; do
docker load < "${BINARIES_DIR}/docker/$line"
done <<< "$output"
fi
tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
if [ "${IS_MAIN_MASTER}" == 'true' ]; then
set +e
kubeadm upgrade apply ${UPGRADE_VERSION} -y
retval=$?
set -e
if [ $retval -ne 0 ]; then
kubeadm upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y
fi
else
if [ "${IS_OLD_VERSION}" == 'true' ]; then
kubeadm upgrade node config --kubelet-version ${UPGRADE_VERSION}
else
kubeadm upgrade node
fi
fi
systemctl stop kubelet
cp -a ${BINARIES_DIR}/k8s/{kubelet,kubectl} /opt/bin
chmod +x {kubelet,kubectl}
systemctl restart kubelet
if [ "${IS_MAIN_MASTER}" == 'true' ]; then
kubectl apply -f ${BINARIES_DIR}/network.yaml
kubectl apply -f ${BINARIES_DIR}/dashboard.yaml
fi
umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"
if [ "$EJECT_ISO_FROM_OS" = true ] && [ "$iso_drive_path" != "" ]; then
eject "${iso_drive_path}"
fi
fi

View File

@ -0,0 +1,253 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.version;
import static org.mockito.Mockito.when;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesSupportedVersionsCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.kubernetes.cluster.KubernetesClusterService;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.User;
import com.cloud.user.UserVO;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.exception.CloudRuntimeException;
@RunWith(PowerMockRunner.class)
@PrepareForTest({ComponentContext.class})
public class KubernetesVersionServiceTest {
@InjectMocks
private KubernetesVersionService kubernetesVersionService = new KubernetesVersionManagerImpl();
@Mock
private KubernetesSupportedVersionDao kubernetesSupportedVersionDao;
@Mock
private KubernetesClusterDao kubernetesClusterDao;
@Mock
private AccountManager accountManager;
@Mock
private VMTemplateDao templateDao;
@Mock
private TemplateJoinDao templateJoinDao;
@Mock
private DataCenterDao dataCenterDao;
@Mock
private TemplateApiService templateService;
private void overrideDefaultConfigValue(final ConfigKey configKey, final String name, final Object o) throws IllegalAccessException, NoSuchFieldException {
Field f = ConfigKey.class.getDeclaredField(name);
f.setAccessible(true);
f.set(configKey, o);
}
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
overrideDefaultConfigValue(KubernetesClusterService.KubernetesServiceEnabled, "_defaultValue", "true");
DataCenterVO zone = Mockito.mock(DataCenterVO.class);
when(zone.getId()).thenReturn(1L);
when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone);
TemplateJoinVO templateJoinVO = Mockito.mock(TemplateJoinVO.class);
when(templateJoinVO.getId()).thenReturn(1L);
when(templateJoinVO.getUrl()).thenReturn("https://download.cloudstack.com");
when(templateJoinVO.getState()).thenReturn(ObjectInDataStoreStateMachine.State.Ready);
when(templateJoinDao.findById(Mockito.anyLong())).thenReturn(templateJoinVO);
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
when(kubernetesSupportedVersionDao.persist(Mockito.any(KubernetesSupportedVersionVO.class))).thenReturn(versionVO);
}
@After
public void tearDown() throws Exception {
}
@Test
public void listKubernetesSupportedVersionsTest() {
ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class);
List<KubernetesSupportedVersionVO> versionVOs = new ArrayList<>();
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
versionVOs.add(versionVO);
when(kubernetesSupportedVersionDao.listAll()).thenReturn(versionVOs);
when(kubernetesSupportedVersionDao.listAllInZone(Mockito.anyLong())).thenReturn(versionVOs);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO);
kubernetesVersionService.listKubernetesSupportedVersions(cmd);
}
@Test(expected = InvalidParameterValueException.class)
public void addKubernetesSupportedVersionLowerUnsupportedTest() {
AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class);
when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU);
when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(cmd.getSemanticVersion()).thenReturn("1.1.1");
kubernetesVersionService.addKubernetesSupportedVersion(cmd);
}
@Test(expected = InvalidParameterValueException.class)
public void addKubernetesSupportedVersionInvalidCpuTest() {
AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class);
when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU-1);
when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
CallContext.register(user, account);
kubernetesVersionService.addKubernetesSupportedVersion(cmd);
}
@Test(expected = InvalidParameterValueException.class)
public void addKubernetesSupportedVersionInvalidRamSizeTest() {
AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class);
when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU);
when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE-10);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
CallContext.register(user, account);
kubernetesVersionService.addKubernetesSupportedVersion(cmd);
}
@Test(expected = InvalidParameterValueException.class)
public void addKubernetesSupportedVersionEmptyUrlTest() {
AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class);
when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU);
when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
CallContext.register(user, account);
when(cmd.getUrl()).thenReturn("");
kubernetesVersionService.addKubernetesSupportedVersion(cmd);
}
@Test
public void addKubernetesSupportedVersionIsoUrlTest() throws ResourceAllocationException, NoSuchFieldException {
AddKubernetesSupportedVersionCmd cmd = Mockito.mock(AddKubernetesSupportedVersionCmd.class);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(cmd.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
when(cmd.getUrl()).thenReturn("https://download.cloudstack.com");
when(cmd.getChecksum()).thenReturn(null);
when(cmd.getMinimumCpu()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_CPU);
when(cmd.getMinimumRamSize()).thenReturn(KubernetesClusterService.MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE);
Account systemAccount = new AccountVO("system", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
when(accountManager.getSystemAccount()).thenReturn(systemAccount);
PowerMockito.mockStatic(ComponentContext.class);
when(ComponentContext.inject(Mockito.any(RegisterIsoCmd.class))).thenReturn(new RegisterIsoCmd());
when(templateService.registerIso(Mockito.any(RegisterIsoCmd.class))).thenReturn(Mockito.mock(VirtualMachineTemplate.class));
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
when(templateVO.getId()).thenReturn(1L);
when(templateDao.findById(Mockito.anyLong())).thenReturn(templateVO);
kubernetesVersionService.addKubernetesSupportedVersion(cmd);
}
@Test(expected = CloudRuntimeException.class)
public void deleteKubernetesSupportedVersionExistingClustersTest() {
DeleteKubernetesSupportedVersionCmd cmd = Mockito.mock(DeleteKubernetesSupportedVersionCmd.class);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class));
List<KubernetesClusterVO> clusters = new ArrayList<>();
clusters.add(Mockito.mock(KubernetesClusterVO.class));
when(kubernetesClusterDao.listAllByKubernetesVersion(Mockito.anyLong())).thenReturn(clusters);
kubernetesVersionService.deleteKubernetesSupportedVersion(cmd);
}
@Test
public void deleteKubernetesSupportedVersionTest() {
DeleteKubernetesSupportedVersionCmd cmd = Mockito.mock(DeleteKubernetesSupportedVersionCmd.class);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class));
List<KubernetesClusterVO> clusters = new ArrayList<>();
when(kubernetesClusterDao.listAllByKubernetesVersion(Mockito.anyLong())).thenReturn(clusters);
when(templateDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMTemplateVO.class));
PowerMockito.mockStatic(ComponentContext.class);
when(ComponentContext.inject(Mockito.any(DeleteIsoCmd.class))).thenReturn(new DeleteIsoCmd());
when(templateService.deleteIso(Mockito.any(DeleteIsoCmd.class))).thenReturn(true);
when(kubernetesClusterDao.remove(Mockito.anyLong())).thenReturn(true);
kubernetesVersionService.deleteKubernetesSupportedVersion(cmd);
}
@Test
public void updateKubernetesSupportedVersionTest() {
UpdateKubernetesSupportedVersionCmd cmd = Mockito.mock(UpdateKubernetesSupportedVersionCmd.class);
when(cmd.getState()).thenReturn(KubernetesSupportedVersion.State.Disabled.toString());
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(KubernetesSupportedVersionVO.class));
KubernetesSupportedVersionVO version = Mockito.mock(KubernetesSupportedVersionVO.class);
when(kubernetesSupportedVersionDao.createForUpdate(Mockito.anyLong())).thenReturn(version);
when(kubernetesSupportedVersionDao.update(Mockito.anyLong(), Mockito.any(KubernetesSupportedVersionVO.class))).thenReturn(true);
when(version.getState()).thenReturn(KubernetesSupportedVersion.State.Disabled);
when(version.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(version);
kubernetesVersionService.updateKubernetesSupportedVersion(cmd);
}
}

View File

@ -88,6 +88,7 @@
<module>integrations/cloudian</module>
<module>integrations/prometheus</module>
<module>integrations/kubernetes-service</module>
<module>metrics</module>

View File

@ -0,0 +1,106 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
if [ $# -lt 6 ]; then
echo "Invalid input. Valid usage: ./create-kubernetes-binaries-iso.sh OUTPUT_PATH KUBERNETES_VERSION CNI_VERSION CRICTL_VERSION WEAVENET_NETWORK_YAML_CONFIG DASHBOARD_YAML_CONFIG"
echo "eg: ./create-kubernetes-binaries-iso.sh ./ 1.11.4 0.7.1 1.11.1 https://github.com/weaveworks/weave/releases/download/latest_release/weave-daemonset-k8s-1.11.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.0/src/deploy/recommended/kubernetes-dashboard.yaml"
exit 1
fi
RELEASE="v${2}"
output_dir="${1}"
start_dir="$PWD"
iso_dir="/tmp/iso"
working_dir="${iso_dir}/"
mkdir -p "${working_dir}"
CNI_VERSION="v${3}"
echo "Downloading CNI ${CNI_VERSION}..."
cni_dir="${working_dir}/cni/"
mkdir -p "${cni_dir}"
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" -o "${cni_dir}/cni-plugins-amd64.tgz"
CRICTL_VERSION="v${4}"
echo "Downloading CRI tools ${CRICTL_VERSION}..."
crictl_dir="${working_dir}/cri-tools/"
mkdir -p "${crictl_dir}"
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" -o "${crictl_dir}/crictl-linux-amd64.tar.gz"
echo "Downloading Kubernetes tools ${RELEASE}..."
k8s_dir="${working_dir}/k8s"
mkdir -p "${k8s_dir}"
cd "${k8s_dir}"
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
kubeadm_file_permissions=`stat --format '%a' kubeadm`
chmod +x kubeadm
echo "Downloading kubelet.service ${RELEASE}..."
cd $start_dir
kubelet_service_file="${working_dir}/kubelet.service"
touch "${kubelet_service_file}"
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file}
echo "Downloading 10-kubeadm.conf ${RELEASE}..."
kubeadm_conf_file="${working_dir}/10-kubeadm.conf"
touch "${kubeadm_conf_file}"
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file}
NETWORK_CONFIG_URL="${5}"
echo "Downloading network config ${NETWORK_CONFIG_URL}"
network_conf_file="${working_dir}/network.yaml"
curl -sSL ${NETWORK_CONFIG_URL} -o ${network_conf_file}
DASHBORAD_CONFIG_URL="${6}"
echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}"
dashboard_conf_file="${working_dir}/dashboard.yaml"
curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file}
echo "Fetching k8s docker images..."
docker -v
if [ $? -ne 0 ]; then
echo "Installing docker..."
if [ -f /etc/redhat-release ]; then
sudo yum -y remove docker-common docker container-selinux docker-selinux docker-engine
sudo yum -y install lvm2 device-mapper device-mapper-persistent-data device-mapper-event device-mapper-libs device-mapper-event-libs
sudo yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-3.el7.noarch.rpm
sudo wget https://download.docker.com/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo && sudo yum -y install docker-ce
sudo systemctl enable docker && sudo systemctl start docker
elif [ -f /etc/lsb-release ]; then
sudo apt update && sudo apt install docker.io -y
sudo systemctl enable docker && sudo systemctl start docker
fi
fi
mkdir -p "${working_dir}/docker"
output=`${k8s_dir}/kubeadm config images list`
while read -r line; do
echo "Downloading docker image $line ---"
sudo docker pull "$line"
image_name=`echo "$line" | grep -oE "[^/]+$"`
sudo docker save "$line" > "${working_dir}/docker/$image_name.tar"
sudo docker image rm "$line"
done <<< "$output"
echo "Restore kubeadm permissions..."
if [ "${kubeadm_file_permissions}" -eq "" ]; then
kubeadm_file_permissions=644
fi
chmod ${kubeadm_file_permissions} "${working_dir}/k8s/kubeadm"
mkisofs -o "${output_dir}/setup-${RELEASE}.iso" -J -R -l "${iso_dir}"
rm -rf "${iso_dir}"

View File

@ -27,9 +27,27 @@ import com.cloud.utils.db.GenericDao;
public interface NetworkOfferingJoinDao extends GenericDao<NetworkOfferingJoinVO, Long> {
List<NetworkOfferingJoinVO> findByDomainId(long domainId);
/**
* Returns list of network offerings for a given domain
* NetworkOfferingJoinVO can have multiple domains set. Method will search for
* given domainId in list of domains for the offering.
* @param long domainId
* @param Boolean includeAllDomainOffering (if set to true offerings for which domain
* is not set will also be returned)
* @return List<NetworkOfferingJoinVO> List of network offerings
*/
List<NetworkOfferingJoinVO> findByDomainId(long domainId, Boolean includeAllDomainOffering);
List<NetworkOfferingJoinVO> findByZoneId(long zoneId);
/**
* Returns list of network offerings for a given zone
* NetworkOfferingJoinVO can have multiple zones set. Method will search for
* given zoneId in list of zones for the offering.
* @param long zoneId
* @param Boolean includeAllZoneOffering (if set to true offerings for which zone
* is not set will also be returned)
* @return List<NetworkOfferingJoinVO> List of network offerings
*/
List<NetworkOfferingJoinVO> findByZoneId(long zoneId, Boolean includeAllZoneOffering);
NetworkOfferingResponse newNetworkOfferingResponse(NetworkOffering nof);

View File

@ -43,9 +43,12 @@ public class NetworkOfferingJoinDaoImpl extends GenericDaoBase<NetworkOfferingJo
}
@Override
public List<NetworkOfferingJoinVO> findByDomainId(long domainId) {
public List<NetworkOfferingJoinVO> findByDomainId(long domainId, Boolean includeAllDomainOffering) {
SearchBuilder<NetworkOfferingJoinVO> sb = createSearchBuilder();
sb.and("domainId", sb.entity().getDomainId(), SearchCriteria.Op.FIND_IN_SET);
if (includeAllDomainOffering) {
sb.or("dId", sb.entity().getDomainId(), SearchCriteria.Op.NULL);
}
sb.done();
SearchCriteria<NetworkOfferingJoinVO> sc = sb.create();
@ -54,9 +57,12 @@ public class NetworkOfferingJoinDaoImpl extends GenericDaoBase<NetworkOfferingJo
}
@Override
public List<NetworkOfferingJoinVO> findByZoneId(long zoneId) {
public List<NetworkOfferingJoinVO> findByZoneId(long zoneId, Boolean includeAllZoneOffering) {
SearchBuilder<NetworkOfferingJoinVO> sb = createSearchBuilder();
sb.and("zoneId", sb.entity().getZoneId(), SearchCriteria.Op.FIND_IN_SET);
if (includeAllZoneOffering) {
sb.or("zId", sb.entity().getZoneId(), SearchCriteria.Op.NULL);
}
sb.done();
SearchCriteria<NetworkOfferingJoinVO> sc = sb.create();

View File

@ -300,6 +300,72 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
private Random rand = new Random(System.currentTimeMillis());
@DB
private IPAddressVO assignAndAllocateIpAddressEntry(final Account owner, final VlanType vlanUse, final Long guestNetworkId,
final boolean sourceNat, final boolean allocate, final boolean isSystem,
final Long vpcId, final Boolean displayIp, final boolean fetchFromDedicatedRange,
final List<IPAddressVO> addressVOS) throws CloudRuntimeException {
return Transaction.execute((TransactionCallbackWithException<IPAddressVO, CloudRuntimeException>) status -> {
IPAddressVO finalAddress = null;
if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) {
// Check that the maximum number of public IPs for the given accountId will not be exceeded
try {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
} catch (ResourceAllocationException ex) {
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
}
}
for (final IPAddressVO possibleAddr : addressVOS) {
if (possibleAddr.getState() != State.Free) {
continue;
}
final IPAddressVO addressVO = possibleAddr;
addressVO.setSourceNat(sourceNat);
addressVO.setAllocatedTime(new Date());
addressVO.setAllocatedInDomainId(owner.getDomainId());
addressVO.setAllocatedToAccountId(owner.getId());
addressVO.setSystem(isSystem);
if (displayIp != null) {
addressVO.setDisplay(displayIp);
}
if (vlanUse != VlanType.DirectAttached) {
addressVO.setAssociatedWithNetworkId(guestNetworkId);
addressVO.setVpcId(vpcId);
}
if (_ipAddressDao.lockRow(possibleAddr.getId(), true) != null) {
final IPAddressVO userIp = _ipAddressDao.findById(addressVO.getId());
if (userIp.getState() == State.Free) {
addressVO.setState(State.Allocating);
if (_ipAddressDao.update(addressVO.getId(), addressVO)) {
finalAddress = addressVO;
break;
}
}
}
}
if (finalAddress == null) {
s_logger.error("Failed to fetch any free public IP address");
throw new CloudRuntimeException("Failed to fetch any free public IP address");
}
if (allocate) {
markPublicIpAsAllocated(finalAddress);
}
final State expectedAddressState = allocate ? State.Allocated : State.Allocating;
if (finalAddress.getState() != expectedAddressState) {
s_logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState);
throw new CloudRuntimeException("Failed to fetch new public IP with expected state " + expectedAddressState);
}
return finalAddress;
});
}
@Override
public boolean configure(String name, Map<String, Object> params) {
// populate providers
@ -694,9 +760,23 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, type, networkId, false, true, requestedIp, isSystem, null, null, false);
}
@Override
public PublicIp getAvailablePublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List<Long> vlanDbIds, Long networkId, String requestedIp, boolean isSystem)
throws InsufficientAddressCapacityException {
return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, type, networkId, false, false, false, requestedIp, isSystem, null, null, false);
}
@DB
public PublicIp fetchNewPublicIp(final long dcId, final Long podId, final List<Long> vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId,
final boolean sourceNat, final boolean assign, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms)
final boolean sourceNat, final boolean allocate, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms)
throws InsufficientAddressCapacityException {
return fetchNewPublicIp(dcId, podId, vlanDbIds, owner, vlanUse, guestNetworkId,
sourceNat, true, allocate, requestedIp, isSystem, vpcId, displayIp, forSystemVms);
}
@DB
public PublicIp fetchNewPublicIp(final long dcId, final Long podId, final List<Long> vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId,
final boolean sourceNat, final boolean assign, final boolean allocate, final String requestedIp, final boolean isSystem, final Long vpcId, final Boolean displayIp, final boolean forSystemVms)
throws InsufficientAddressCapacityException {
IPAddressVO addr = Transaction.execute(new TransactionCallbackWithException<IPAddressVO, InsufficientAddressCapacityException>() {
@Override
@ -807,64 +887,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
}
assert(addrs.size() == 1) : "Return size is incorrect: " + addrs.size();
if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) {
// Check that the maximum number of public IPs for the given accountId will not be exceeded
try {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
} catch (ResourceAllocationException ex) {
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
}
}
IPAddressVO finalAddr = null;
for (final IPAddressVO possibleAddr: addrs) {
if (possibleAddr.getState() != IpAddress.State.Free) {
continue;
}
final IPAddressVO addr = possibleAddr;
addr.setSourceNat(sourceNat);
addr.setAllocatedTime(new Date());
addr.setAllocatedInDomainId(owner.getDomainId());
addr.setAllocatedToAccountId(owner.getId());
addr.setSystem(isSystem);
if (displayIp != null) {
addr.setDisplay(displayIp);
}
if (vlanUse != VlanType.DirectAttached) {
addr.setAssociatedWithNetworkId(guestNetworkId);
addr.setVpcId(vpcId);
}
if (_ipAddressDao.lockRow(possibleAddr.getId(), true) != null) {
final IPAddressVO userIp = _ipAddressDao.findById(addr.getId());
if (userIp.getState() == IpAddress.State.Free) {
addr.setState(IpAddress.State.Allocating);
if (_ipAddressDao.update(addr.getId(), addr)) {
finalAddr = addr;
break;
}
}
}
}
if (finalAddr == null) {
s_logger.error("Failed to fetch any free public IP address");
throw new CloudRuntimeException("Failed to fetch any free public IP address");
}
if (assign) {
markPublicIpAsAllocated(finalAddr);
finalAddr = assignAndAllocateIpAddressEntry(owner, vlanUse, guestNetworkId, sourceNat, allocate,
isSystem,vpcId, displayIp, fetchFromDedicatedRange, addrs);
} else {
finalAddr = addrs.get(0);
}
final State expectedAddressState = assign ? State.Allocated : State.Allocating;
if (finalAddr.getState() != expectedAddressState) {
s_logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState);
throw new CloudRuntimeException("Failed to fetch new public IP with expected state " + expectedAddressState);
}
return finalAddr;
}
});

View File

@ -3516,6 +3516,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final boolean allowUserViewAllDomainAccounts = (QueryService.AllowUserViewAllDomainAccounts.valueIn(caller.getDomainId()));
final boolean kubernetesServiceEnabled = Boolean.parseBoolean(_configDao.getValue("cloud.kubernetes.service.enabled"));
final boolean kubernetesClusterExperimentalFeaturesEnabled = Boolean.parseBoolean(_configDao.getValue("cloud.kubernetes.cluster.experimental.features.enabled"));
// check if region-wide secondary storage is used
boolean regionSecondaryEnabled = false;
final List<ImageStoreVO> imgStores = _imgStoreDao.findRegionImageStores();
@ -3537,6 +3540,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
capabilities.put("allowUserExpungeRecoverVM", allowUserExpungeRecoverVM);
capabilities.put("allowUserExpungeRecoverVolume", allowUserExpungeRecoverVolume);
capabilities.put("allowUserViewAllDomainAccounts", allowUserViewAllDomainAccounts);
capabilities.put("kubernetesServiceEnabled", kubernetesServiceEnabled);
capabilities.put("kubernetesClusterExperimentalFeaturesEnabled", kubernetesClusterExperimentalFeaturesEnabled);
if (apiLimitEnabled) {
capabilities.put("apiLimitInterval", apiLimitInterval);
capabilities.put("apiLimitMax", apiLimitMax);

View File

@ -0,0 +1,729 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Kubernetes supported version """
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import (listInfrastructure,
listKubernetesSupportedVersions,
addKubernetesSupportedVersion,
deleteKubernetesSupportedVersion,
createKubernetesCluster,
stopKubernetesCluster,
deleteKubernetesCluster,
upgradeKubernetesCluster,
scaleKubernetesCluster)
from marvin.cloudstackException import CloudstackAPIException
from marvin.codes import FAILED
from marvin.lib.base import (Template,
ServiceOffering,
Configurations)
from marvin.lib.utils import (cleanup_resources,
random_gen)
from marvin.lib.common import (get_zone)
from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
import time
_multiprocess_shared_ = True
class TestKubernetesCluster(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower()
cls.setup_failed = False
cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
name="cloud.kubernetes.service.enabled")[0].value
if cls.initial_configuration_cks_enabled not in ["true", True]:
cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server")
Configurations.update(cls.apiclient,
"cloud.kubernetes.service.enabled",
"true")
cls.restartServer()
cls.cks_template = None
cls.initial_configuration_cks_template_name = None
cls.cks_service_offering = None
cls.kubernetes_version_ids = []
if cls.setup_failed == False:
try:
cls.kuberetes_version_1 = cls.addKubernetesSupportedVersion('1.14.9', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso')
cls.kubernetes_version_ids.append(cls.kuberetes_version_1.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso, %s" % e)
if cls.setup_failed == False:
try:
cls.kuberetes_version_2 = cls.addKubernetesSupportedVersion('1.15.0', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso')
cls.kubernetes_version_ids.append(cls.kuberetes_version_2.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso, %s" % e)
if cls.setup_failed == False:
try:
cls.kuberetes_version_3 = cls.addKubernetesSupportedVersion('1.16.0', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.iso')
cls.kubernetes_version_ids.append(cls.kuberetes_version_3.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.is, %s" % e)
if cls.setup_failed == False:
try:
cls.kuberetes_version_4 = cls.addKubernetesSupportedVersion('1.16.3', 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.iso')
cls.kubernetes_version_ids.append(cls.kuberetes_version_4.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.is, %s" % e)
cks_template_data = {
"name": "Kubernetes-Service-Template",
"displaytext": "Kubernetes-Service-Template",
"format": "qcow2",
"hypervisor": "kvm",
"ostype": "CoreOS",
"url": "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-kvm.qcow2.bz2",
"ispublic": "True",
"isextractable": "True"
}
# "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2"
cks_template_data_details = []
if cls.hypervisor.lower() == "vmware":
cks_template_data["url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-vmware.ova" # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova"
cks_template_data["format"] = "OVA"
cks_template_data_details = [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}]
elif cls.hypervisor.lower() == "xenserver":
cks_template_data["url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-xen.vhd.bz2" # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2"
cks_template_data["format"] = "VHD"
elif cls.hypervisor.lower() == "kvm":
cks_template_data["requireshvm"] = "True"
if cls.setup_failed == False:
cls.cks_template = Template.register(
cls.apiclient,
cks_template_data,
zoneid=cls.zone.id,
hypervisor=cls.hypervisor,
details=cks_template_data_details
)
cls.debug("Waiting for CKS template with ID %s to be ready" % cls.cks_template.id)
try:
cls.waitForTemplateReadyState(cls.cks_template.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get CKS template in ready state, {}, {}".format(cks_template_data["url"], e))
cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient,
name=cls.cks_template_name_key)[0].value
Configurations.update(cls.apiclient,
cls.cks_template_name_key,
cls.cks_template.name)
cks_offering_data = {
"name": "CKS-Instance",
"displaytext": "CKS Instance",
"cpunumber": 2,
"cpuspeed": 1000,
"memory": 2048,
}
cks_offering_data["name"] = cks_offering_data["name"] + '-' + random_gen()
if cls.setup_failed == False:
cls.cks_service_offering = ServiceOffering.create(
cls.apiclient,
cks_offering_data
)
cls._cleanup = []
if cls.cks_template != None:
cls._cleanup.append(cls.cks_template)
if cls.cks_service_offering != None:
cls._cleanup.append(cls.cks_service_offering)
return
@classmethod
def tearDownClass(cls):
version_delete_failed = False
# Delete added Kubernetes supported version
for version_id in cls.kubernetes_version_ids:
try:
cls.deleteKubernetesSupportedVersion(version_id)
except Exception as e:
version_delete_failed = True
cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e)
try:
# Restore original CKS template
if cls.initial_configuration_cks_template_name != None:
Configurations.update(cls.apiclient,
cls.cks_template_name_key,
cls.initial_configuration_cks_template_name)
# Delete created CKS template
if cls.setup_failed == False and cls.cks_template != None:
cls.cks_template.delete(cls.apiclient,
cls.zone.id)
# Restore CKS enabled
if cls.initial_configuration_cks_enabled not in ["true", True]:
cls.debug("Restoring Kubernetes Service enabled value")
Configurations.update(cls.apiclient,
"cloud.kubernetes.service.enabled",
"false")
cls.restartServer()
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
if version_delete_failed == True:
raise Exception("Warning: Exception during cleanup, unable to delete Kubernetes supported versions")
return
@classmethod
def restartServer(cls):
"""Restart management server"""
cls.debug("Restarting management server")
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management stop"
sshClient.execute(command)
command = "service cloudstack-management start"
sshClient.execute(command)
#Waits for management to come up in 5 mins, when it's up it will continue
timeout = time.time() + 300
while time.time() < timeout:
if cls.isManagementUp() is True: return
time.sleep(5)
cls.setup_failed = True
cls.debug("Management server did not come up, failing")
return
@classmethod
def isManagementUp(cls):
try:
cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd())
return True
except Exception:
return False
@classmethod
def waitForTemplateReadyState(cls, template_id, retries=30, interval=30):
"""Check if template download will finish"""
while retries > -1:
time.sleep(interval)
template_response = Template.list(
cls.apiclient,
id=template_id,
zoneid=cls.zone.id,
templatefilter='self'
)
if isinstance(template_response, list):
template = template_response[0]
if not hasattr(template, 'status') or not template or not template.status:
retries = retries - 1
continue
if 'Failed' == template.status:
raise Exception("Failed to download template: status - %s" % template.status)
elif template.status == 'Download Complete' and template.isready:
return
retries = retries - 1
raise Exception("Template download timed out")
@classmethod
def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=20, interval=30):
"""Check if Kubernetes supported version ISO is in Ready state"""
while retries > -1:
time.sleep(interval)
list_versions_response = cls.listKubernetesSupportedVersion(version_id)
if not hasattr(list_versions_response, 'isostate') or not list_versions_response or not list_versions_response.isostate:
retries = retries - 1
continue
if 'Creating' == list_versions_response.isostate:
retries = retries - 1
elif 'Ready' == list_versions_response.isostate:
return
elif 'Failed' == list_versions_response.isostate:
raise Exception( "Failed to download template: status - %s" % template.status)
else:
raise Exception(
"Failed to download Kubernetes supported version ISO: status - %s" %
list_versions_response.isostate)
raise Exception("Kubernetes supported version Ready state timed out")
@classmethod
def listKubernetesSupportedVersion(cls, version_id):
listKubernetesSupportedVersionsCmd = listKubernetesSupportedVersions.listKubernetesSupportedVersionsCmd()
listKubernetesSupportedVersionsCmd.id = version_id
versionResponse = cls.apiclient.listKubernetesSupportedVersions(listKubernetesSupportedVersionsCmd)
return versionResponse[0]
@classmethod
def addKubernetesSupportedVersion(cls, semantic_version, iso_url):
addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd()
addKubernetesSupportedVersionCmd.semanticversion = semantic_version
addKubernetesSupportedVersionCmd.name = 'v' + semantic_version + '-' + random_gen()
addKubernetesSupportedVersionCmd.url = iso_url
addKubernetesSupportedVersionCmd.mincpunumber = 2
addKubernetesSupportedVersionCmd.minmemory = 2048
kubernetes_version = cls.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd)
cls.debug("Waiting for Kubernetes version with ID %s to be ready" % kubernetes_version.id)
cls.waitForKubernetesSupportedVersionIsoReadyState(kubernetes_version.id)
kubernetes_version = cls.listKubernetesSupportedVersion(kubernetes_version.id)
return kubernetes_version
@classmethod
def deleteKubernetesSupportedVersion(cls, version_id):
deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd()
deleteKubernetesSupportedVersionCmd.id = version_id
deleteKubernetesSupportedVersionCmd.deleteiso = True
cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd)
def setUp(self):
self.services = self.testClient.getParsedTestDataConfig()
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_01_deploy_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. stopKubernetesCluster should stop the cluster
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now stopping it" % cluster_response.id)
self.stopAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully stopped, now deleting it" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_02_deploy_kubernetes_ha_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_3.id, 1, 2)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now deleting it" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_03_deploy_invalid_kubernetes_ha_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
try:
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id, 1, 2)
self.debug("Invslid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id)
self.deleteKubernetesCluster(cluster_response.id)
self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.")
except CloudstackAPIException as e:
self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_04_deploy_and_upgrade_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and upgrade it to newer version
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. upgradeKubernetesCluster should return valid info for the cluster
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id)
try:
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_3.id)
except Exception as e:
self.deleteKubernetesCluster(cluster_response.id)
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterUpgrade(cluster_response, self.kuberetes_version_3.id)
self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_05_deploy_and_upgrade_kubernetes_ha_cluster(self):
"""Test to deploy a new HA Kubernetes cluster and upgrade it to newer version
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. upgradeKubernetesCluster should return valid info for the cluster
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_3.id, 1, 2)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id)
try:
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_4.id)
except Exception as e:
self.deleteKubernetesCluster(cluster_response.id)
self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e)
self.verifyKubernetesClusterUpgrade(cluster_response, self.kuberetes_version_4.id)
self.debug("Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_06_deploy_and_invalid_upgrade_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and check for failure while tying to upgrade it to a lower version
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. upgradeKubernetesCluster should fail
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now scaling it" % cluster_response.id)
try:
cluster_response = self.upgradeKubernetesCluster(cluster_response.id, self.kuberetes_version_1.id)
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kuberetes_version_1.id)
self.deleteKubernetesCluster(cluster_response.id)
self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.")
except Exception as e:
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
self.debug("Deleting Kubernetes cluster with ID: %s" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_07_deploy_and_scale_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and check for failure while tying to scale it
# Validate the following:
# 1. createKubernetesCluster should return valid info for new cluster
# 2. The Cloud Database contains the valid information
# 3. scaleKubernetesCluster should return valid info for the cluster when it is scaled up
# 4. scaleKubernetesCluster should return valid info for the cluster when it is scaled down
"""
if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower())
if self.setup_failed == True:
self.skipTest("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
cluster_response = self.createKubernetesCluster(name, self.kuberetes_version_2.id)
self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id)
self.debug("Kubernetes cluster with ID: %s successfully deployed, now upscaling it" % cluster_response.id)
try:
cluster_response = self.scaleKubernetesCluster(cluster_response.id, 2)
except Exception as e:
self.deleteKubernetesCluster(cluster_response.id)
self.fail("Failed to upscale Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterScale(cluster_response, 2)
self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % cluster_response.id)
try:
cluster_response = self.scaleKubernetesCluster(cluster_response.id, 1)
except Exception as e:
self.deleteKubernetesCluster(cluster_response.id)
self.fail("Failed to downscale Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterScale(cluster_response)
self.debug("Kubernetes cluster with ID: %s successfully downscaled, now deleting it" % cluster_response.id)
self.deleteAndVerifyKubernetesCluster(cluster_response.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id)
return
def listKubernetesCluster(self, cluster_id):
listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd()
listKubernetesClustersCmd.id = cluster_id
clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd)
return clusterResponse[0]
def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1):
createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd()
createKubernetesClusterCmd.name = name
createKubernetesClusterCmd.description = name + "-description"
createKubernetesClusterCmd.kubernetesversionid = version_id
createKubernetesClusterCmd.size = size
createKubernetesClusterCmd.masternodes = master_nodes
createKubernetesClusterCmd.serviceofferingid = self.cks_service_offering.id
createKubernetesClusterCmd.zoneid = self.zone.id
createKubernetesClusterCmd.noderootdisksize = 10
clusterResponse = self.apiclient.createKubernetesCluster(createKubernetesClusterCmd)
if not clusterResponse:
self.cleanup.append(clusterResponse)
return clusterResponse
def stopKubernetesCluster(self, cluster_id):
stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd()
stopKubernetesClusterCmd.id = cluster_id
response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd)
return response
def deleteKubernetesCluster(self, cluster_id):
deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd()
deleteKubernetesClusterCmd.id = cluster_id
response = self.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd)
return response
def upgradeKubernetesCluster(self, cluster_id, version_id):
upgradeKubernetesClusterCmd = upgradeKubernetesCluster.upgradeKubernetesClusterCmd()
upgradeKubernetesClusterCmd.id = cluster_id
upgradeKubernetesClusterCmd.kubernetesversionid = version_id
response = self.apiclient.upgradeKubernetesCluster(upgradeKubernetesClusterCmd)
return response
def scaleKubernetesCluster(self, cluster_id, size):
scaleKubernetesClusterCmd = scaleKubernetesCluster.scaleKubernetesClusterCmd()
scaleKubernetesClusterCmd.id = cluster_id
scaleKubernetesClusterCmd.size = size
response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd)
return response
def verifyKubernetesCluster(self, cluster_response, name, version_id, size=1, master_nodes=1):
"""Check if Kubernetes cluster is valid"""
self.verifyKubernetesClusterState(cluster_response, 'Running')
self.assertEqual(
cluster_response.name,
name,
"Check KubernetesCluster name {}, {}".format(cluster_response.name, name)
)
self.verifyKubernetesClusterVersion(cluster_response, version_id)
self.assertEqual(
cluster_response.zoneid,
self.zone.id,
"Check KubernetesCluster zone {}, {}".format(cluster_response.zoneid, self.zone.id)
)
self.verifyKubernetesClusterSize(cluster_response, size, master_nodes)
db_cluster_name = self.dbclient.execute("select name from kubernetes_cluster where uuid = '%s';" % cluster_response.id)[0][0]
self.assertEqual(
str(db_cluster_name),
name,
"Check KubernetesCluster name in DB {}, {}".format(db_cluster_name, name)
)
def verifyKubernetesClusterState(self, cluster_response, state):
"""Check if Kubernetes cluster state is Running"""
self.assertEqual(
cluster_response.state,
'Running',
"Check KubernetesCluster state {}, {}".format(cluster_response.state, state)
)
def verifyKubernetesClusterVersion(self, cluster_response, version_id):
"""Check if Kubernetes cluster node sizes are valid"""
self.assertEqual(
cluster_response.kubernetesversionid,
version_id,
"Check KubernetesCluster version {}, {}".format(cluster_response.kubernetesversionid, version_id)
)
def verifyKubernetesClusterSize(self, cluster_response, size=1, master_nodes=1):
"""Check if Kubernetes cluster node sizes are valid"""
self.assertEqual(
cluster_response.size,
size,
"Check KubernetesCluster size {}, {}".format(cluster_response.size, size)
)
self.assertEqual(
cluster_response.masternodes,
master_nodes,
"Check KubernetesCluster master nodes {}, {}".format(cluster_response.masternodes, master_nodes)
)
def verifyKubernetesClusterUpgrade(self, cluster_response, version_id):
"""Check if Kubernetes cluster state and version are valid after upgrade"""
self.verifyKubernetesClusterState(cluster_response, 'Running')
self.verifyKubernetesClusterVersion(cluster_response, version_id)
def verifyKubernetesClusterScale(self, cluster_response, size=1, master_nodes=1):
"""Check if Kubernetes cluster state and node sizes are valid after upgrade"""
self.verifyKubernetesClusterState(cluster_response, 'Running')
self.verifyKubernetesClusterSize(cluster_response, size, master_nodes)
def stopAndVerifyKubernetesCluster(self, cluster_id):
"""Stop Kubernetes cluster and check if it is really stopped"""
stop_response = self.stopKubernetesCluster(cluster_id)
self.assertEqual(
stop_response.success,
True,
"Check KubernetesCluster stop response {}, {}".format(stop_response.success, True)
)
db_cluster_state = self.dbclient.execute("select state from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0]
self.assertEqual(
db_cluster_state,
'Stopped',
"KubernetesCluster not stopped in DB, {}".format(db_cluster_state)
)
def deleteAndVerifyKubernetesCluster(self, cluster_id):
"""Delete Kubernetes cluster and check if it is really deleted"""
delete_response = self.deleteKubernetesCluster(cluster_id)
self.assertEqual(
delete_response.success,
True,
"Check KubernetesCluster delete response {}, {}".format(delete_response.success, True)
)
db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0]
self.assertNotEqual(
db_cluster_removed,
None,
"KubernetesCluster not removed in DB, {}".format(db_cluster_removed)
)

View File

@ -0,0 +1,278 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Kubernetes supported version """
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import (listInfrastructure,
listKubernetesSupportedVersions,
addKubernetesSupportedVersion,
deleteKubernetesSupportedVersion)
from marvin.cloudstackException import CloudstackAPIException
from marvin.codes import FAILED
from marvin.lib.base import Configurations
from marvin.lib.utils import (cleanup_resources,
random_gen)
from marvin.lib.common import get_zone
from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
import time
_multiprocess_shared_ = True
class TestKubernetesSupportedVersion(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestKubernetesSupportedVersion, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
cls.kubernetes_version_iso_url = 'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.iso'
cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
name="cloud.kubernetes.service.enabled")[0].value
if cls.initial_configuration_cks_enabled not in ["true", True]:
cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server")
Configurations.update(cls.apiclient,
"cloud.kubernetes.service.enabled",
"true")
cls.restartServer()
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Restore CKS enabled
if cls.initial_configuration_cks_enabled not in ["true", True]:
cls.debug("Restoring Kubernetes Service enabled value")
Configurations.update(cls.apiclient,
"cloud.kubernetes.service.enabled",
"false")
cls.restartServer()
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def restartServer(cls):
"""Restart management server"""
cls.debug("Restarting management server")
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management stop"
sshClient.execute(command)
command = "service cloudstack-management start"
sshClient.execute(command)
#Waits for management to come up in 5 mins, when it's up it will continue
timeout = time.time() + 300
while time.time() < timeout:
if cls.isManagementUp() is True: return
time.sleep(5)
return cls.fail("Management server did not come up, failing")
@classmethod
def isManagementUp(cls):
try:
cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd())
return True
except Exception:
return False
def setUp(self):
self.services = self.testClient.getParsedTestDataConfig()
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_01_add_delete_kubernetes_supported_version(self):
"""Test to add a new Kubernetes supported version
# Validate the following:
# 1. addKubernetesSupportedVersion should return valid info for new version
# 2. The Cloud Database contains the valid information when listKubernetesSupportedVersions is called
"""
version = '1.16.3'
name = 'v' + version + '-' + random_gen()
self.debug("Adding Kubernetes supported version with name: %s" % name)
version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url)
list_versions_response = self.listKubernetesSupportedVersion(version_response.id)
self.assertEqual(
list_versions_response.name,
name,
"Check KubernetesSupportedVersion name {}, {}".format(list_versions_response.name, name)
)
self.assertEqual(
list_versions_response.semanticversion,
version,
"Check KubernetesSupportedVersion version {}, {}".format(list_versions_response.semanticversion, version)
)
self.assertEqual(
list_versions_response.zoneid,
self.zone.id,
"Check KubernetesSupportedVersion zone {}, {}".format(list_versions_response.zoneid, self.zone.id)
)
db_version_name = self.dbclient.execute("select name from kubernetes_supported_version where uuid = '%s';" % version_response.id)[0][0]
self.assertEqual(
str(db_version_name),
name,
"Check KubernetesSupportedVersion name in DB {}, {}".format(db_version_name, name)
)
self.debug("Added Kubernetes supported version with ID: %s. Waiting for its ISO to be Ready" % version_response.id)
self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id)
self.debug("Deleting Kubernetes supported version with ID: %s" % version_response.id)
delete_response = self.deleteKubernetesSupportedVersion(version_response.id, True)
self.assertEqual(
delete_response.success,
True,
"Check KubernetesSupportedVersion deletion in DB {}, {}".format(delete_response.success, True)
)
db_version_removed = self.dbclient.execute("select removed from kubernetes_supported_version where uuid = '%s';" % version_response.id)[0][0]
self.assertNotEqual(
db_version_removed,
None,
"KubernetesSupportedVersion not removed in DB"
)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_02_add_unsupported_kubernetes_supported_version(self):
"""Test to trying to add a new unsupported Kubernetes supported version
# Validate the following:
# 1. API should return an error
"""
version = '1.1.1'
name = 'v' + version + '-' + random_gen()
try:
version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url)
self.debug("Unsupported CKS Kubernetes supported added with ID: %s. Deleting it and failing test." % version_response.id)
self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id)
self.deleteKubernetesSupportedVersion(version_response.id, True)
self.fail("Kubernetes supported version below version 1.11.0 been added. Must be an error.")
except CloudstackAPIException as e:
self.debug("Unsupported version error check successful, API failure: %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
def test_03_add_invalid_kubernetes_supported_version(self):
"""Test to trying to add a new unsupported Kubernetes supported version
# Validate the following:
# 1. API should return an error
"""
version = 'invalid'
name = 'v' + version + '-' + random_gen()
try:
version_response = self.addKubernetesSupportedVersion(version, name, self.zone.id, self.kubernetes_version_iso_url)
self.debug("Invalid Kubernetes supported added with ID: %s. Deleting it and failing test." % version_response.id)
self.waitForKubernetesSupportedVersionIsoReadyState(version_response.id)
self.deleteKubernetesSupportedVersion(version_response.id, True)
self.fail("Invalid Kubernetes supported version has been added. Must be an error.")
except CloudstackAPIException as e:
self.debug("Unsupported version error check successful, API failure: %s" % e)
return
def addKubernetesSupportedVersion(self, version, name, zoneId, isoUrl):
addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd()
addKubernetesSupportedVersionCmd.semanticversion = version
addKubernetesSupportedVersionCmd.name = name
addKubernetesSupportedVersionCmd.zoneid = zoneId
addKubernetesSupportedVersionCmd.url = isoUrl
addKubernetesSupportedVersionCmd.mincpunumber = 2
addKubernetesSupportedVersionCmd.minmemory = 2048
versionResponse = self.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd)
if not versionResponse:
self.cleanup.append(versionResponse)
return versionResponse
def listKubernetesSupportedVersion(self, versionId):
listKubernetesSupportedVersionsCmd = listKubernetesSupportedVersions.listKubernetesSupportedVersionsCmd()
listKubernetesSupportedVersionsCmd.id = versionId
versionResponse = self.apiclient.listKubernetesSupportedVersions(listKubernetesSupportedVersionsCmd)
return versionResponse[0]
def deleteKubernetesSupportedVersion(self, cmd):
response = self.apiclient.deleteKubernetesSupportedVersion(cmd)
return response
def deleteKubernetesSupportedVersion(self, versionId, deleteIso):
deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd()
deleteKubernetesSupportedVersionCmd.id = versionId
deleteKubernetesSupportedVersionCmd.deleteiso = deleteIso
response = self.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd)
return response
def waitForKubernetesSupportedVersionIsoReadyState(self, version_id, retries=20, interval=30):
"""Check if Kubernetes supported version ISO is in Ready state"""
while retries > -1:
time.sleep(interval)
list_versions_response = self.listKubernetesSupportedVersion(version_id)
if not hasattr(list_versions_response, 'isostate') or not list_versions_response or not list_versions_response.isostate:
retries = retries - 1
continue
if 'Creating' == list_versions_response.isostate:
retries = retries - 1
elif 'Ready' == list_versions_response.isostate:
return
else:
raise Exception(
"Failed to download Kubernetes supported version ISO: status - %s" %
list_versions_response.isostate)
raise Exception("Kubernetes supported version Ready state timed out")

View File

@ -191,7 +191,9 @@ known_categories = {
'Management': 'Management',
'Backup' : 'Backup and Recovery',
'Restore' : 'Backup and Recovery',
'UnmanagedInstance': 'Virtual Machine'
'UnmanagedInstance': 'Virtual Machine',
'KubernetesSupportedVersion': 'Kubernetes Service',
'KubernetesCluster': 'Kubernetes Service'
}

View File

@ -92,6 +92,7 @@ var dictionary = {
"label.about":"About",
"label.about.app":"About CloudStack",
"label.accept.project.invitation":"Accept project invitation",
"label.access":"Access",
"label.account":"Account",
"label.accounts":"Accounts",
"label.account.and.security.group":"Account, Security group",
@ -359,6 +360,8 @@ var dictionary = {
"label.add.isolated.guest.network":"Add Isolated Guest Network",
"label.add.isolated.guest.network.with.sourcenat":"Add Isolated Guest Network with SourceNat",
"label.add.isolated.network":"Add Isolated Network",
"label.add.kubernetes.cluster":"Add Kubernetes Cluster",
"label.add.kubernetes.version":"Add Kubernetes Version",
"label.add.l2.guest.network":"Add L2 Guest Network",
"label.add.ldap.account":"Add LDAP account",
"label.add.list.name":"ACL List Name",
@ -372,6 +375,7 @@ var dictionary = {
"label.add.network.device":"Add Network Device",
"label.add.network.offering":"Add network offering",
"label.add.new.F5":"Add new F5",
"label.add.new.iso":"Add new ISO",
"label.add.new.NetScaler":"Add new NetScaler",
"label.add.new.PA":"Add new Palo Alto",
"label.add.new.SRX":"Add new SRX",
@ -450,6 +454,7 @@ var dictionary = {
"label.allocated":"Allocated",
"label.allocation.state":"Allocation State",
"label.allow":"Allow",
"label.all.zones":"All zones",
"label.annotated.by":"Annotator",
"label.annotation":"Annotation",
"label.anti.affinity":"Anti-affinity",
@ -556,6 +561,8 @@ var dictionary = {
"label.cloud.managed":"Cloud.com Managed",
"label.cluster":"Cluster",
"label.cluster.name":"Cluster Name",
"label.cluster.size":"Cluster size",
"label.cluster.size.worker.nodes":"Cluster size (Worker nodes)",
"label.cluster.type":"Cluster Type",
"label.clusters":"Clusters",
"label.clvm":"CLVM",
@ -614,6 +621,7 @@ var dictionary = {
"label.day":"Day",
"label.day.of.month":"Day of Month",
"label.day.of.week":"Day of Week",
"label.dashboard.endpoint":"Dashboard endpoint",
"label.dc.name":"DC Name",
"label.dead.peer.detection":"Dead Peer Detection",
"label.decline.invitation":"Decline invitation",
@ -650,6 +658,8 @@ var dictionary = {
"label.delete.events":"Delete events",
"label.delete.gateway":"Delete gateway",
"label.delete.internal.lb":"Delete Internal LB",
"label.delete.iso":"Delete ISO",
"label.delete.kubernetes.version":"Delete Kubernetes version",
"label.delete.portable.ip.range":"Delete Portable IP Range",
"label.delete.profile":"Delete Profile",
"label.delete.project":"Delete project",
@ -667,6 +677,7 @@ var dictionary = {
"label.destination.physical.network.id":"Destination physical network ID",
"label.destination.zone":"Destination Zone",
"label.destroy":"Destroy",
"label.destroy.kubernetes.cluster":"Destroy Kubernetes cluster",
"label.destroy.router":"Destroy router",
"label.destroy.vm.graceperiod":"Destroy VM Grace Period",
"label.detaching.disk":"Detaching Disk",
@ -732,6 +743,7 @@ var dictionary = {
"label.domain.suffix":"DNS Domain Suffix (i.e., xyz.com)",
"label.done":"Done",
"label.double.quotes.are.not.allowed":"Double quotes are not allowed",
"label.download.kubernetes.cluster.config":"Download Kubernetes cluster config",
"label.download.progress":"Download Progress",
"label.drag.new.position":"Drag to new position",
"label.duration.in.sec":"Duration (in sec)",
@ -790,6 +802,7 @@ var dictionary = {
"label.expunge":"Expunge",
"label.external.id":"External ID",
"label.external.link":"External link",
'label.external.loadbalancer.ip.address': "External load balancer IP address",
"label.extractable":"Extractable",
"label.extractable.lower":"extractable",
"label.f5":"F5",
@ -964,6 +977,9 @@ var dictionary = {
"label.iscsi":"iSCSI",
"label.iso":"ISO",
"label.iso.boot":"ISO Boot",
"label.iso.id":"ISO ID",
"label.iso.name":"ISO name",
"label.iso.state":"ISO state",
"label.isolated.networks":"Isolated networks",
"label.isolation.method":"Isolation method",
"label.isolation.mode":"Isolation Mode",
@ -975,6 +991,11 @@ var dictionary = {
"label.key":"Key",
"label.keyboard.language":"Keyboard language",
"label.keyboard.type":"Keyboard type",
"label.kubernetes.cluster":"Kubernetes cluster",
"label.kubernetes.cluster.details":"Kubernetes cluster details",
"label.kubernetes.service":"Kubernetes Service",
"label.kubernetes.version":"Kubernetes version",
"label.kubernetes.version.details":"Kubernetes version details",
"label.kvm.traffic.label":"KVM traffic label",
"label.label":"Label",
"label.lang.arabic":"Arabic",
@ -1042,6 +1063,7 @@ var dictionary = {
"label.mac.address": "MAC Address",
"label.management.servers":"Management Servers",
"label.mac.address.changes":"MAC Address Changes",
"label.master.nodes":"Master nodes",
"label.max.cpus":"Max. CPU cores",
"label.max.guest.limit":"Max guest limit",
"label.max.instances":"Max Instances",
@ -1246,6 +1268,7 @@ var dictionary = {
"label.no.items":"No Available Items",
"label.no.security.groups":"No Available Security Groups",
"label.no.thanks":"No thanks",
"label.node.root.disk.size.gb":"Node root disk size (in GB)",
"label.none":"None",
"label.not.found":"Not Found",
"label.notifications":"Notifications",
@ -1352,6 +1375,7 @@ var dictionary = {
"label.private.key":"Private Key",
"label.private.network":"Private network",
"label.private.port":"Private Port",
"label.private.registry":"Private registry",
"label.private.zone":"Private Zone",
"label.privatekey":"PKCS#8 Private Key",
"label.privatekey.name":"Private Key",
@ -1549,6 +1573,7 @@ var dictionary = {
"label.save.and.continue":"Save and continue",
"label.save.changes":"Save changes",
"label.saving.processing":"Saving....",
"label.scale.kubernetes.cluster":"Scale Kubernetes cluster",
"label.scale.up.policy":"SCALE UP POLICY",
"label.scaledown.policy":"ScaleDown Policy",
"label.scaleup.policy":"ScaleUp Policy",
@ -1589,6 +1614,7 @@ var dictionary = {
"label.select.template":"Select Template",
"label.select.tier":"Select Tier",
"label.select.vm.for.static.nat":"Select VM for static NAT",
"label.semantic.version":"Semantic version",
"label.sent":"Sent",
"label.server":"Server",
"label.service.capabilities":"Service Capabilities",
@ -1639,6 +1665,7 @@ var dictionary = {
"label.sslcertificates":"SSL Certificates",
"label.standard.us.keyboard":"Standard (US) keyboard",
"label.start.IP":"Start IP",
"label.start.kuberentes.cluster":"Start Kubernetes cluster",
"label.start.lb.vm":"Start LB VM",
"label.start.port":"Start Port",
"label.start.reserved.system.IP":"Start Reserved system IP",
@ -1679,6 +1706,7 @@ var dictionary = {
"label.sticky.request-learn":"Request learn",
"label.sticky.tablesize":"Table size",
"label.stop":"Stop",
"label.stop.kuberentes.cluster":"Stop Kubernetes cluster",
"label.stop.lb.vm":"Stop LB VM",
"label.stopped.vms":"Stopped VMs",
"label.storage":"Storage",
@ -1757,11 +1785,13 @@ var dictionary = {
"label.unhealthy.threshold":"Unhealthy Threshold",
"label.unlimited":"Unlimited",
"label.untagged":"Untagged",
"label.update.kubernetes.version":"Update Kubernetes Version",
"label.update.project.resources":"Update project resources",
"label.update.ssl":" SSL Certificate",
"label.update.ssl.cert":" SSL Certificate",
"label.update.vmware.datacenter":"Update VMware datacenter",
"label.updating":"Updating",
"label.upgrade.kubernetes.cluster":"Upgrade Kubernetes cluster",
"label.upgrade.required":"Upgrade is required",
"label.upgrade.router.newer.template":"Upgrade Router to Use Newer Template",
"label.upload":"Upload",
@ -1790,6 +1820,7 @@ var dictionary = {
"label.username.lower":"username",
"label.users":"Users",
"label.uuid":"UUID",
"label.versions":"Versions",
"label.vSwitch.type":"vSwitch Type",
"label.value":"Value",
"label.vcdcname":"vCenter DC name",
@ -2096,8 +2127,10 @@ var dictionary = {
"message.confirm.delete.ciscoASA1000v":"Please confirm you want to delete CiscoASA1000v",
"message.confirm.delete.ciscovnmc.resource":"Please confirm you want to delete CiscoVNMC resource",
"message.confirm.delete.internal.lb":"Please confirm you want to delete Internal LB",
"message.confirm.delete.kubernetes.version":"Please confirm that you want to delete this Kubernetes version.",
"message.confirm.delete.secondary.staging.store":"Please confirm you want to delete Secondary Staging Store.",
"message.confirm.delete.ucs.manager":"Please confirm that you want to delete UCS Manager",
"message.confirm.destroy.kubernetes.cluster":"Please confirm that you want to destroy this Kubernetes cluster.",
"message.confirm.destroy.router":"Please confirm that you would like to destroy this router",
"message.confirm.disable.host":"Please confirm that you want to disable the host",
"message.confirm.disable.network.offering":"Are you sure you want to disable this network offering?",
@ -2130,7 +2163,9 @@ var dictionary = {
"message.confirm.scale.up.router.vm":"Do you really want to scale up the Router VM ?",
"message.confirm.scale.up.system.vm":"Do you really want to scale up the system VM ?",
"message.confirm.shutdown.provider":"Please confirm that you would like to shutdown this provider",
"message.confirm.start.kubernetes.cluster":"Please confirm that you want to start this Kubernetes cluster.",
"message.confirm.start.lb.vm":"Please confirm you want to start LB VM",
"message.confirm.stop.kubernetes.cluster":"Please confirm that you want to stop this Kubernetes cluster.",
"message.confirm.stop.lb.vm":"Please confirm you want to stop LB VM",
"message.confirm.upgrade.router.newer.template":"Please confirm that you want to upgrade router to use newer template",
"message.confirm.upgrade.routers.account.newtemplate":"Please confirm that you want to upgrade all routers in this account to use newer template",

43
ui/plugins/cks/cks.css Normal file
View File

@ -0,0 +1,43 @@
/*[fmt]1C20-1C0D-E*/
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
.downloadKubernetesClusterKubeConfig .icon {
background-position: -35px -125px;
}
.downloadKubernetesClusterKubeConfig:hover .icon {
background-position: -35px -707px;
}
.scaleKubernetesCluster .icon {
background-position: -264px -2px;
}
.scaleKubernetesCluster:hover .icon {
background-position: -263px -583px;
}
.upgradeKubernetesCluster .icon {
background-position: -138px -65px;
}
.upgradeKubernetesCluster:hover .icon {
background-position: -138px -647px;
}

1581
ui/plugins/cks/cks.js Normal file

File diff suppressed because it is too large Load Diff

25
ui/plugins/cks/config.js Normal file
View File

@ -0,0 +1,25 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
(function (cloudStack) {
cloudStack.plugins.cks.config = {
title: 'Kubernetes Service',
desc: 'Kubernetes Service',
externalLink: 'http://www.cloudstack.org/',
authorName: 'Apache CloudStack',
authorEmail: 'dev@cloudstack.apache.org'
};
}(cloudStack));

BIN
ui/plugins/cks/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -18,6 +18,7 @@
cloudStack.plugins = [
//'testPlugin',
'cloudian',
'quota'
'quota',
'cks'
];
}(jQuery, cloudStack));

View File

@ -2800,16 +2800,37 @@ jQuery.validator.addMethod("ipv6CustomJqueryValidator", function(value, element)
return jQuery.validator.methods.ipv6.call(this, value, element);
}, "The specified IPv6 address is invalid.");
$.validator.addMethod("allzonesonly", function(value, element){
if ((value.indexOf("-1") != -1) &&(value.length > 1))
if ((value.indexOf("-1") != -1) && (value.length > 1))
return false;
return true;
},
"All Zones cannot be combined with any other zone");
$.validator.addMethod("naturalnumber", function(value, element){
if (this.optional(element) && value.length == 0)
return true;
if (isNaN(value))
return false;
value = parseInt(value);
return (typeof value === 'number') && (value > 0) && (Math.floor(value) === value) && value !== Infinity;
},
"Please enter a valid number, 1 or greater");
$.validator.addMethod("multiplecountnumber", function(value, element){
if (this.optional(element) && value.length == 0)
return true;
if (isNaN(value))
return false;
value = parseInt(value);
return (typeof value === 'number') && (value > 1) && (Math.floor(value) === value) && value !== Infinity;
},
"Please enter a valid number, 2 or greater");
cloudStack.createTemplateMethod = function (isSnapshot){
return {
label: 'label.create.template',