mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
CKS Enhancements and SystemVM template upgrade improvements (#5863)
* This PR/commit comprises of the following: - Support to fallback on the older systemVM template in case of no change in template across ACS versions - Update core user to cloud in CKS - Display details of accessing CKS nodes in the UI - K8s Access tab - Update systemvm template from debian 11 to debian 11.2 - Update letsencrypt cert - Remove docker dependency as from ACS 4.16 onward k8s has deprecated support for docker - use containerd as container runtime * support for private registry - containerd * Enable updating template type (only) for system owned templates via UI * edit indents * Address comments and move cmd from patch file to cloud-init runcmd * temporary change * update k8s test to use k8s version 1.21.5 (instead of 1.21.3 - due to https://github.com/kubernetes/kubernetes/pull/104530) * support for private registry - containerd * Enable updating template type (only) for system owned templates via UI * smooth upgrade of cks clusters * update pom file with temp download.cloudstack.org testing links * fix pom * add cgroup config for containerd * add systemd config for kubelet * add additional info during image registry config * update to official links
This commit is contained in:
parent
8db598bc0f
commit
e0a5df50ce
1
.gitignore
vendored
1
.gitignore
vendored
@ -48,6 +48,7 @@ tools/cli/cloudmonkey/precache.py
|
|||||||
tools/marvin/marvin/cloudstackAPI/
|
tools/marvin/marvin/cloudstackAPI/
|
||||||
tools/marvin/build/
|
tools/marvin/build/
|
||||||
tools/cli/build/
|
tools/cli/build/
|
||||||
|
tools/appliance/systemvmtemplate/packer_cache/
|
||||||
*.jar
|
*.jar
|
||||||
*.war
|
*.war
|
||||||
*.mar
|
*.mar
|
||||||
|
|||||||
@ -16,6 +16,26 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.agent;
|
package com.cloud.agent;
|
||||||
|
|
||||||
|
import com.cloud.agent.Agent.ExitStatus;
|
||||||
|
import com.cloud.agent.dao.StorageComponent;
|
||||||
|
import com.cloud.agent.dao.impl.PropertiesStorage;
|
||||||
|
import com.cloud.resource.ServerResource;
|
||||||
|
import com.cloud.utils.LogUtils;
|
||||||
|
import com.cloud.utils.NumbersUtil;
|
||||||
|
import com.cloud.utils.ProcessUtil;
|
||||||
|
import com.cloud.utils.PropertiesUtil;
|
||||||
|
import com.cloud.utils.backoff.BackoffAlgorithm;
|
||||||
|
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.google.common.base.Strings;
|
||||||
|
import org.apache.commons.daemon.Daemon;
|
||||||
|
import org.apache.commons.daemon.DaemonContext;
|
||||||
|
import org.apache.commons.daemon.DaemonInitException;
|
||||||
|
import org.apache.commons.lang.math.NumberUtils;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.apache.log4j.xml.DOMConfigurator;
|
||||||
|
|
||||||
|
import javax.naming.ConfigurationException;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -30,28 +50,6 @@ import java.util.Map;
|
|||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import javax.naming.ConfigurationException;
|
|
||||||
|
|
||||||
import org.apache.commons.daemon.Daemon;
|
|
||||||
import org.apache.commons.daemon.DaemonContext;
|
|
||||||
import org.apache.commons.daemon.DaemonInitException;
|
|
||||||
import org.apache.commons.lang.math.NumberUtils;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
import org.apache.log4j.xml.DOMConfigurator;
|
|
||||||
|
|
||||||
import com.cloud.agent.Agent.ExitStatus;
|
|
||||||
import com.cloud.agent.dao.StorageComponent;
|
|
||||||
import com.cloud.agent.dao.impl.PropertiesStorage;
|
|
||||||
import com.cloud.resource.ServerResource;
|
|
||||||
import com.cloud.utils.LogUtils;
|
|
||||||
import com.cloud.utils.NumbersUtil;
|
|
||||||
import com.cloud.utils.ProcessUtil;
|
|
||||||
import com.cloud.utils.PropertiesUtil;
|
|
||||||
import com.cloud.utils.backoff.BackoffAlgorithm;
|
|
||||||
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
import com.google.common.base.Strings;
|
|
||||||
|
|
||||||
public class AgentShell implements IAgentShell, Daemon {
|
public class AgentShell implements IAgentShell, Daemon {
|
||||||
private static final Logger s_logger = Logger.getLogger(AgentShell.class.getName());
|
private static final Logger s_logger = Logger.getLogger(AgentShell.class.getName());
|
||||||
|
|
||||||
@ -423,13 +421,13 @@ public class AgentShell implements IAgentShell, Daemon {
|
|||||||
} catch (final ClassNotFoundException e) {
|
} catch (final ClassNotFoundException e) {
|
||||||
throw new ConfigurationException("Resource class not found: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Resource class not found: " + name + " due to: " + e.toString());
|
||||||
} catch (final SecurityException e) {
|
} catch (final SecurityException e) {
|
||||||
throw new ConfigurationException("Security excetion when loading resource: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Security exception when loading resource: " + name + " due to: " + e.toString());
|
||||||
} catch (final NoSuchMethodException e) {
|
} catch (final NoSuchMethodException e) {
|
||||||
throw new ConfigurationException("Method not found excetion when loading resource: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Method not found exception when loading resource: " + name + " due to: " + e.toString());
|
||||||
} catch (final IllegalArgumentException e) {
|
} catch (final IllegalArgumentException e) {
|
||||||
throw new ConfigurationException("Illegal argument excetion when loading resource: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Illegal argument exception when loading resource: " + name + " due to: " + e.toString());
|
||||||
} catch (final InstantiationException e) {
|
} catch (final InstantiationException e) {
|
||||||
throw new ConfigurationException("Instantiation excetion when loading resource: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Instantiation exception when loading resource: " + name + " due to: " + e.toString());
|
||||||
} catch (final IllegalAccessException e) {
|
} catch (final IllegalAccessException e) {
|
||||||
throw new ConfigurationException("Illegal access exception when loading resource: " + name + " due to: " + e.toString());
|
throw new ConfigurationException("Illegal access exception when loading resource: " + name + " due to: " + e.toString());
|
||||||
} catch (final InvocationTargetException e) {
|
} catch (final InvocationTargetException e) {
|
||||||
|
|||||||
@ -73,4 +73,5 @@ public interface VmDetailConstants {
|
|||||||
String DISK_OFFERING = "diskOffering";
|
String DISK_OFFERING = "diskOffering";
|
||||||
|
|
||||||
String DEPLOY_AS_IS_CONFIGURATION = "configurationId";
|
String DEPLOY_AS_IS_CONFIGURATION = "configurationId";
|
||||||
|
String CKS_CONTROL_NODE_LOGIN_USER = "controlNodeLoginUser";
|
||||||
}
|
}
|
||||||
|
|||||||
@ -73,10 +73,10 @@
|
|||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
<source>
|
<source>
|
||||||
def projectVersion = project.version
|
def projectVersion = project.properties['project.systemvm.template.version']
|
||||||
String[] versionParts = projectVersion.tokenize('.')
|
String[] versionParts = projectVersion.tokenize('.')
|
||||||
pom.properties['cs.version'] = "4.16"
|
pom.properties['cs.version'] = versionParts[0] + "." + versionParts[1]
|
||||||
pom.properties['patch.version'] = "0"
|
pom.properties['patch.version'] = versionParts[2]
|
||||||
</source>
|
</source>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
@ -146,7 +146,7 @@
|
|||||||
<executable>bash</executable>
|
<executable>bash</executable>
|
||||||
<arguments>
|
<arguments>
|
||||||
<argument>templateConfig.sh</argument>
|
<argument>templateConfig.sh</argument>
|
||||||
<armument>${project.version}</armument>
|
<argument>${project.systemvm.template.version}</argument>
|
||||||
</arguments>
|
</arguments>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
|||||||
@ -367,10 +367,11 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SystemVmTemplateRegistration.parseMetadataFile();
|
String csVersion = SystemVmTemplateRegistration.parseMetadataFile();
|
||||||
|
final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion);
|
||||||
final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue);
|
final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue);
|
||||||
SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease());
|
SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(sysVmVersion.getMajorRelease()) + "." + String.valueOf(sysVmVersion.getMinorRelease());
|
||||||
SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(currentVersion.getPatchRelease());
|
SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease());
|
||||||
|
|
||||||
s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion);
|
s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion);
|
||||||
|
|
||||||
|
|||||||
@ -54,7 +54,7 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreDaoImpl;
|
|||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||||
import org.apache.commons.codec.digest.DigestUtils;
|
import org.apache.cloudstack.utils.security.DigestHelper;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.ini4j.Ini;
|
import org.ini4j.Ini;
|
||||||
@ -64,7 +64,6 @@ import java.io.BufferedReader;
|
|||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileReader;
|
import java.io.FileReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
@ -351,16 +350,6 @@ public class SystemVmTemplateRegistration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private String calculateChecksum(File file) {
|
|
||||||
try (InputStream is = Files.newInputStream(Paths.get(file.getPath()))) {
|
|
||||||
return DigestUtils.md5Hex(is);
|
|
||||||
} catch (IOException e) {
|
|
||||||
String errMsg = "Failed to calculate template checksum";
|
|
||||||
LOGGER.error(errMsg, e);
|
|
||||||
throw new CloudRuntimeException(errMsg, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Long getRegisteredTemplateId(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
public Long getRegisteredTemplateId(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
||||||
VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second());
|
VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second());
|
||||||
Long templateId = null;
|
Long templateId = null;
|
||||||
@ -481,7 +470,7 @@ public class SystemVmTemplateRegistration {
|
|||||||
template.setCrossZones(true);
|
template.setCrossZones(true);
|
||||||
template.setHypervisorType(details.getHypervisorType());
|
template.setHypervisorType(details.getHypervisorType());
|
||||||
template.setState(VirtualMachineTemplate.State.Inactive);
|
template.setState(VirtualMachineTemplate.State.Inactive);
|
||||||
template.setDeployAsIs(Hypervisor.HypervisorType.VMware.equals(details.getHypervisorType()));
|
template.setDeployAsIs(false);
|
||||||
template = vmTemplateDao.persist(template);
|
template = vmTemplateDao.persist(template);
|
||||||
return template;
|
return template;
|
||||||
}
|
}
|
||||||
@ -690,7 +679,13 @@ public class SystemVmTemplateRegistration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void parseMetadataFile() {
|
/**
|
||||||
|
* This method parses the metadata file consisting of the systemVM templates information
|
||||||
|
* @return the version of the systemvm template that is to be used. This is done to in order
|
||||||
|
* to fallback on the latest available version of the systemVM template when there does not
|
||||||
|
* exist a template corresponding to the current code version.
|
||||||
|
*/
|
||||||
|
public static String parseMetadataFile() {
|
||||||
try {
|
try {
|
||||||
Ini ini = new Ini();
|
Ini ini = new Ini();
|
||||||
ini.load(new FileReader(METADATA_FILE));
|
ini.load(new FileReader(METADATA_FILE));
|
||||||
@ -702,6 +697,8 @@ public class SystemVmTemplateRegistration {
|
|||||||
NewTemplateChecksum.put(hypervisorType, section.get("checksum"));
|
NewTemplateChecksum.put(hypervisorType, section.get("checksum"));
|
||||||
NewTemplateUrl.put(hypervisorType, section.get("downloadurl"));
|
NewTemplateUrl.put(hypervisorType, section.get("downloadurl"));
|
||||||
}
|
}
|
||||||
|
Ini.Section section = ini.get("default");
|
||||||
|
return section.get("version");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
String errMsg = String.format("Failed to parse systemVM template metadata file: %s", METADATA_FILE);
|
String errMsg = String.format("Failed to parse systemVM template metadata file: %s", METADATA_FILE);
|
||||||
LOGGER.error(errMsg, e);
|
LOGGER.error(errMsg, e);
|
||||||
@ -735,7 +732,7 @@ public class SystemVmTemplateRegistration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
File tempFile = new File(TEMPLATES_PATH + matchedTemplate);
|
File tempFile = new File(TEMPLATES_PATH + matchedTemplate);
|
||||||
String templateChecksum = calculateChecksum(tempFile);
|
String templateChecksum = DigestHelper.calculateChecksum(tempFile);
|
||||||
if (!templateChecksum.equals(NewTemplateChecksum.get(getHypervisorType(hypervisor)))) {
|
if (!templateChecksum.equals(NewTemplateChecksum.get(getHypervisorType(hypervisor)))) {
|
||||||
LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, NewTemplateChecksum.get(getHypervisorType(hypervisor))));
|
LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, NewTemplateChecksum.get(getHypervisorType(hypervisor))));
|
||||||
templatesFound = false;
|
templatesFound = false;
|
||||||
@ -812,9 +809,6 @@ public class SystemVmTemplateRegistration {
|
|||||||
private void updateRegisteredTemplateDetails(Long templateId, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
private void updateRegisteredTemplateDetails(Long templateId, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
||||||
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
|
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
|
||||||
templateVO.setTemplateType(Storage.TemplateType.SYSTEM);
|
templateVO.setTemplateType(Storage.TemplateType.SYSTEM);
|
||||||
if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) {
|
|
||||||
templateVO.setDeployAsIs(true);
|
|
||||||
}
|
|
||||||
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
||||||
if (!updated) {
|
if (!updated) {
|
||||||
String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId);
|
String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId);
|
||||||
@ -834,9 +828,6 @@ public class SystemVmTemplateRegistration {
|
|||||||
private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
|
||||||
templateVO.setUrl(NewTemplateUrl.get(hypervisorAndTemplateName.getKey()));
|
templateVO.setUrl(NewTemplateUrl.get(hypervisorAndTemplateName.getKey()));
|
||||||
templateVO.setChecksum(NewTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
|
templateVO.setChecksum(NewTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
|
||||||
if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) {
|
|
||||||
templateVO.setDeployAsIs(true);
|
|
||||||
}
|
|
||||||
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
||||||
if (!updated) {
|
if (!updated) {
|
||||||
String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", hypervisorAndTemplateName.getKey().name());
|
String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", hypervisorAndTemplateName.getKey().name());
|
||||||
|
|||||||
@ -128,6 +128,29 @@ CALL `cloud_usage`.`IDEMPOTENT_ADD_UNIQUE_INDEX`('cloud_usage.usage_volume', 'id
|
|||||||
-- Add PK to cloud_usage.usage_vpn_user
|
-- Add PK to cloud_usage.usage_vpn_user
|
||||||
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_vpn_user', 'id', 'BIGINT(20) NOT NULL AUTO_INCREMENT FIRST, ADD PRIMARY KEY (`id`)');
|
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_vpn_user', 'id', 'BIGINT(20) NOT NULL AUTO_INCREMENT FIRST, ADD PRIMARY KEY (`id`)');
|
||||||
|
|
||||||
|
UPDATE `cloud`.`vm_template` SET deploy_as_is = 0 WHERE id = 8;
|
||||||
|
|
||||||
|
CREATE PROCEDURE `cloud`.`UPDATE_KUBERNETES_NODE_DETAILS`()
|
||||||
|
BEGIN
|
||||||
|
DECLARE vmid BIGINT
|
||||||
|
; DECLARE done TINYINT DEFAULT FALSE
|
||||||
|
; DECLARE vmidcursor CURSOR FOR SELECT DISTINCT(vm_id) FROM `cloud`.`kubernetes_cluster_vm_map`
|
||||||
|
; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE
|
||||||
|
; OPEN vmidcursor
|
||||||
|
; vmid_loop:LOOP
|
||||||
|
FETCH NEXT FROM vmidcursor INTO vmid
|
||||||
|
; IF done THEN
|
||||||
|
LEAVE vmid_loop
|
||||||
|
; ELSE
|
||||||
|
INSERT `cloud`.`user_vm_details` (vm_id, name, value, display) VALUES (vmid, 'controlNodeLoginUser', 'core', 1)
|
||||||
|
; END IF
|
||||||
|
; END LOOP
|
||||||
|
; CLOSE vmidcursor
|
||||||
|
; END;
|
||||||
|
|
||||||
|
CALL `cloud`.`UPDATE_KUBERNETES_NODE_DETAILS`();
|
||||||
|
DROP PROCEDURE IF EXISTS `cloud`.`UPDATE_KUBERNETES_NODE_DETAILS`;
|
||||||
|
|
||||||
-- Add support for VMware 7.0.2.0
|
-- Add support for VMware 7.0.2.0
|
||||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '7.0.2.0', 1024, 0, 59, 64, 1, 1);
|
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '7.0.2.0', 1024, 0, 59, 64, 1, 1);
|
||||||
-- Copy VMware 7.0.1.0 hypervisor guest OS mappings to VMware 7.0.2.0
|
-- Copy VMware 7.0.1.0 hypervisor guest OS mappings to VMware 7.0.2.0
|
||||||
|
|||||||
@ -23,8 +23,10 @@ function getTemplateVersion() {
|
|||||||
subversion1="$(cut -d'.' -f1 <<<"$version")"
|
subversion1="$(cut -d'.' -f1 <<<"$version")"
|
||||||
subversion2="$(cut -d'.' -f2 <<<"$version")"
|
subversion2="$(cut -d'.' -f2 <<<"$version")"
|
||||||
minorversion="$(cut -d'.' -f3 <<<"$version")"
|
minorversion="$(cut -d'.' -f3 <<<"$version")"
|
||||||
|
securityversion="$(cut -d'.' -f4 <<<"$version")"
|
||||||
export CS_VERSION="${subversion1}"."${subversion2}"
|
export CS_VERSION="${subversion1}"."${subversion2}"
|
||||||
export CS_MINOR_VERSION="${minorversion}"
|
export CS_MINOR_VERSION="${minorversion}"
|
||||||
|
export VERSION="${CS_VERSION}.${CS_MINOR_VERSION}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function getGenericName() {
|
function getGenericName() {
|
||||||
@ -52,12 +54,14 @@ function getChecksum() {
|
|||||||
|
|
||||||
function createMetadataFile() {
|
function createMetadataFile() {
|
||||||
local fileData=$(cat $SOURCEFILE)
|
local fileData=$(cat $SOURCEFILE)
|
||||||
|
echo -e "["default"]\nversion = $VERSION.${securityversion}\n" >> $METADATAFILE
|
||||||
for i in "${!templates[@]}"
|
for i in "${!templates[@]}"
|
||||||
do
|
do
|
||||||
section="$i"
|
section="$i"
|
||||||
hvName=$(getGenericName $i)
|
hvName=$(getGenericName $i)
|
||||||
templatename="systemvm-${i}-${CS_VERSION}.${CS_MINOR_VERSION}"
|
|
||||||
checksum=$(getChecksum "$fileData" $hvName)
|
templatename="systemvm-${i}-${VERSION}"
|
||||||
|
checksum=$(getChecksum "$fileData" "$VERSION-$hvName")
|
||||||
downloadurl="${templates[$i]}"
|
downloadurl="${templates[$i]}"
|
||||||
filename=$(echo ${downloadurl##*'/'})
|
filename=$(echo ${downloadurl##*'/'})
|
||||||
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE
|
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE
|
||||||
@ -66,12 +70,12 @@ function createMetadataFile() {
|
|||||||
|
|
||||||
declare -A templates
|
declare -A templates
|
||||||
getTemplateVersion $1
|
getTemplateVersion $1
|
||||||
templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2"
|
templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-kvm.qcow2.bz2"
|
||||||
["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova"
|
["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-vmware.ova"
|
||||||
["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2"
|
["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-xen.vhd.bz2"
|
||||||
["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip"
|
["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-hyperv.vhd.zip"
|
||||||
["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2"
|
["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-kvm.qcow2.bz2"
|
||||||
["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" )
|
["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-ovm.raw.bz2" )
|
||||||
|
|
||||||
|
|
||||||
PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/"
|
PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/"
|
||||||
|
|||||||
@ -16,57 +16,6 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.kubernetes.cluster;
|
package com.cloud.kubernetes.cluster;
|
||||||
|
|
||||||
import java.math.BigInteger;
|
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.security.SecureRandom;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Date;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.regex.Matcher;
|
|
||||||
import java.util.regex.Pattern;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
import javax.naming.ConfigurationException;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.acl.ControlledEntity;
|
|
||||||
import org.apache.cloudstack.acl.SecurityChecker;
|
|
||||||
import org.apache.cloudstack.annotation.AnnotationService;
|
|
||||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
|
||||||
import org.apache.cloudstack.api.ApiConstants;
|
|
||||||
import org.apache.cloudstack.api.ApiConstants.VMDetails;
|
|
||||||
import org.apache.cloudstack.api.ResponseObject.ResponseView;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.StopKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd;
|
|
||||||
import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
|
|
||||||
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
|
|
||||||
import org.apache.cloudstack.api.response.ListResponse;
|
|
||||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
|
||||||
import org.apache.cloudstack.config.ApiServiceConfiguration;
|
|
||||||
import org.apache.cloudstack.context.CallContext;
|
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
|
||||||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.apache.log4j.Level;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
import com.cloud.api.ApiDBUtils;
|
import com.cloud.api.ApiDBUtils;
|
||||||
import com.cloud.api.query.dao.NetworkOfferingJoinDao;
|
import com.cloud.api.query.dao.NetworkOfferingJoinDao;
|
||||||
import com.cloud.api.query.dao.TemplateJoinDao;
|
import com.cloud.api.query.dao.TemplateJoinDao;
|
||||||
@ -165,6 +114,53 @@ import com.cloud.vm.VMInstanceVO;
|
|||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
|
import org.apache.cloudstack.acl.ControlledEntity;
|
||||||
|
import org.apache.cloudstack.acl.SecurityChecker;
|
||||||
|
import org.apache.cloudstack.annotation.AnnotationService;
|
||||||
|
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants.VMDetails;
|
||||||
|
import org.apache.cloudstack.api.ResponseObject.ResponseView;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.StopKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd;
|
||||||
|
import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse;
|
||||||
|
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
|
||||||
|
import org.apache.cloudstack.api.response.ListResponse;
|
||||||
|
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||||
|
import org.apache.cloudstack.config.ApiServiceConfiguration;
|
||||||
|
import org.apache.cloudstack.context.CallContext;
|
||||||
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||||
|
import org.apache.commons.codec.binary.Base64;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
import javax.naming.ConfigurationException;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.security.SecureRandom;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Date;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||||
|
|
||||||
@ -428,21 +424,18 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
|
|
||||||
private void validateDockerRegistryParams(final String dockerRegistryUserName,
|
private void validateDockerRegistryParams(final String dockerRegistryUserName,
|
||||||
final String dockerRegistryPassword,
|
final String dockerRegistryPassword,
|
||||||
final String dockerRegistryUrl,
|
final String dockerRegistryUrl) {
|
||||||
final String dockerRegistryEmail) {
|
|
||||||
// if no params related to docker registry specified then nothing to validate so return true
|
// if no params related to docker registry specified then nothing to validate so return true
|
||||||
if ((dockerRegistryUserName == null || dockerRegistryUserName.isEmpty()) &&
|
if ((dockerRegistryUserName == null || dockerRegistryUserName.isEmpty()) &&
|
||||||
(dockerRegistryPassword == null || dockerRegistryPassword.isEmpty()) &&
|
(dockerRegistryPassword == null || dockerRegistryPassword.isEmpty()) &&
|
||||||
(dockerRegistryUrl == null || dockerRegistryUrl.isEmpty()) &&
|
(dockerRegistryUrl == null || dockerRegistryUrl.isEmpty())) {
|
||||||
(dockerRegistryEmail == null || dockerRegistryEmail.isEmpty())) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// all params related to docker registry must be specified or nothing
|
// all params related to docker registry must be specified or nothing
|
||||||
if (!((dockerRegistryUserName != null && !dockerRegistryUserName.isEmpty()) &&
|
if (!((dockerRegistryUserName != null && !dockerRegistryUserName.isEmpty()) &&
|
||||||
(dockerRegistryPassword != null && !dockerRegistryPassword.isEmpty()) &&
|
(dockerRegistryPassword != null && !dockerRegistryPassword.isEmpty()) &&
|
||||||
(dockerRegistryUrl != null && !dockerRegistryUrl.isEmpty()) &&
|
(dockerRegistryUrl != null && !dockerRegistryUrl.isEmpty()))) {
|
||||||
(dockerRegistryEmail != null && !dockerRegistryEmail.isEmpty()))) {
|
|
||||||
throw new InvalidParameterValueException("All the docker private registry parameters (username, password, url, email) required are specified");
|
throw new InvalidParameterValueException("All the docker private registry parameters (username, password, url, email) required are specified");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,12 +444,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
} catch (MalformedURLException e) {
|
} catch (MalformedURLException e) {
|
||||||
throw new InvalidParameterValueException("Invalid docker registry url specified");
|
throw new InvalidParameterValueException("Invalid docker registry url specified");
|
||||||
}
|
}
|
||||||
|
|
||||||
Pattern VALID_EMAIL_ADDRESS_REGEX = Pattern.compile("^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}$", Pattern.CASE_INSENSITIVE);
|
|
||||||
Matcher matcher = VALID_EMAIL_ADDRESS_REGEX.matcher(dockerRegistryEmail);
|
|
||||||
if (!matcher.find()) {
|
|
||||||
throw new InvalidParameterValueException("Invalid docker registry email specified");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
|
private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
|
||||||
@ -619,7 +606,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
|
final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
|
||||||
final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
|
final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
|
||||||
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
|
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
|
||||||
final String dockerRegistryEmail = cmd.getDockerRegistryEmail();
|
|
||||||
final Long nodeRootDiskSize = cmd.getNodeRootDiskSize();
|
final Long nodeRootDiskSize = cmd.getNodeRootDiskSize();
|
||||||
final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress();
|
final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress();
|
||||||
|
|
||||||
@ -706,7 +692,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster");
|
throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster");
|
||||||
}
|
}
|
||||||
|
|
||||||
validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl, dockerRegistryEmail);
|
validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl);
|
||||||
|
|
||||||
Network network = null;
|
Network network = null;
|
||||||
if (networkId != null) {
|
if (networkId != null) {
|
||||||
@ -729,7 +715,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!KubernetesClusterExperimentalFeaturesEnabled.value() && (!Strings.isNullOrEmpty(dockerRegistryUrl) ||
|
if (!KubernetesClusterExperimentalFeaturesEnabled.value() && (!Strings.isNullOrEmpty(dockerRegistryUrl) ||
|
||||||
!Strings.isNullOrEmpty(dockerRegistryUserName) || !Strings.isNullOrEmpty(dockerRegistryEmail) || !Strings.isNullOrEmpty(dockerRegistryPassword))) {
|
!Strings.isNullOrEmpty(dockerRegistryUserName) || !Strings.isNullOrEmpty(dockerRegistryPassword))) {
|
||||||
throw new CloudRuntimeException(String.format("Private registry for the Kubernetes cluster is an experimental feature. Use %s configuration for enabling experimental features", KubernetesClusterExperimentalFeaturesEnabled.key()));
|
throw new CloudRuntimeException(String.format("Private registry for the Kubernetes cluster is an experimental feature. Use %s configuration for enabling experimental features", KubernetesClusterExperimentalFeaturesEnabled.key()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -779,7 +765,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
|
final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
|
||||||
final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
|
final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
|
||||||
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
|
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
|
||||||
final String dockerRegistryEmail = cmd.getDockerRegistryEmail();
|
|
||||||
final boolean networkCleanup = cmd.getNetworkId() == null;
|
final boolean networkCleanup = cmd.getNetworkId() == null;
|
||||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
@Override
|
@Override
|
||||||
@ -797,9 +782,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||||||
if (!Strings.isNullOrEmpty(dockerRegistryUrl)) {
|
if (!Strings.isNullOrEmpty(dockerRegistryUrl)) {
|
||||||
details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_URL, dockerRegistryUrl, true));
|
details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_URL, dockerRegistryUrl, true));
|
||||||
}
|
}
|
||||||
if (!Strings.isNullOrEmpty(dockerRegistryEmail)) {
|
|
||||||
details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.DOCKER_REGISTRY_EMAIL, dockerRegistryEmail, true));
|
|
||||||
}
|
|
||||||
details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.USERNAME, "admin", true));
|
details.add(new KubernetesClusterDetailsVO(kubernetesCluster.getId(), ApiConstants.USERNAME, "admin", true));
|
||||||
SecureRandom random = new SecureRandom();
|
SecureRandom random = new SecureRandom();
|
||||||
String randomPassword = new BigInteger(130, random).toString(32);
|
String randomPassword = new BigInteger(130, random).toString(32);
|
||||||
|
|||||||
@ -17,27 +17,6 @@
|
|||||||
|
|
||||||
package com.cloud.kubernetes.cluster.actionworkers;
|
package com.cloud.kubernetes.cluster.actionworkers;
|
||||||
|
|
||||||
import java.io.BufferedWriter;
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileWriter;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.api.ApiConstants;
|
|
||||||
import org.apache.cloudstack.ca.CAManager;
|
|
||||||
import org.apache.cloudstack.config.ApiServiceConfiguration;
|
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.log4j.Level;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
import com.cloud.dc.DataCenterVO;
|
import com.cloud.dc.DataCenterVO;
|
||||||
import com.cloud.dc.dao.DataCenterDao;
|
import com.cloud.dc.dao.DataCenterDao;
|
||||||
import com.cloud.dc.dao.VlanDao;
|
import com.cloud.dc.dao.VlanDao;
|
||||||
@ -78,14 +57,40 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
import com.cloud.utils.fsm.NoTransitionException;
|
import com.cloud.utils.fsm.NoTransitionException;
|
||||||
import com.cloud.utils.fsm.StateMachine2;
|
import com.cloud.utils.fsm.StateMachine2;
|
||||||
import com.cloud.utils.ssh.SshHelper;
|
import com.cloud.utils.ssh.SshHelper;
|
||||||
|
import com.cloud.vm.UserVmDetailVO;
|
||||||
import com.cloud.vm.UserVmService;
|
import com.cloud.vm.UserVmService;
|
||||||
|
import com.cloud.vm.UserVmVO;
|
||||||
import com.cloud.vm.VirtualMachineManager;
|
import com.cloud.vm.VirtualMachineManager;
|
||||||
|
import com.cloud.vm.VmDetailConstants;
|
||||||
import com.cloud.vm.dao.UserVmDao;
|
import com.cloud.vm.dao.UserVmDao;
|
||||||
|
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.ca.CAManager;
|
||||||
|
import org.apache.cloudstack.config.ApiServiceConfiguration;
|
||||||
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
import java.io.BufferedWriter;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileWriter;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class KubernetesClusterActionWorker {
|
public class KubernetesClusterActionWorker {
|
||||||
|
|
||||||
public static final String CLUSTER_NODE_VM_USER = "core";
|
public static final String CLUSTER_NODE_VM_USER = "cloud";
|
||||||
public static final int CLUSTER_API_PORT = 6443;
|
public static final int CLUSTER_API_PORT = 6443;
|
||||||
public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222;
|
public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222;
|
||||||
|
|
||||||
@ -120,6 +125,8 @@ public class KubernetesClusterActionWorker {
|
|||||||
@Inject
|
@Inject
|
||||||
protected UserVmDao userVmDao;
|
protected UserVmDao userVmDao;
|
||||||
@Inject
|
@Inject
|
||||||
|
protected UserVmDetailsDao userVmDetailsDao;
|
||||||
|
@Inject
|
||||||
protected UserVmService userVmService;
|
protected UserVmService userVmService;
|
||||||
@Inject
|
@Inject
|
||||||
protected VlanDao vlanDao;
|
protected VlanDao vlanDao;
|
||||||
@ -175,6 +182,27 @@ public class KubernetesClusterActionWorker {
|
|||||||
return IOUtils.toString(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream(resource)), StringUtils.getPreferredCharset());
|
return IOUtils.toString(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream(resource)), StringUtils.getPreferredCharset());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected String getControlNodeLoginUser() {
|
||||||
|
List<KubernetesClusterVmMapVO> vmMapVOList = getKubernetesClusterVMMaps();
|
||||||
|
if (vmMapVOList.size() > 0) {
|
||||||
|
long vmId = vmMapVOList.get(0).getVmId();
|
||||||
|
UserVmVO userVM = userVmDao.findById(vmId);
|
||||||
|
if (userVM == null) {
|
||||||
|
throw new CloudRuntimeException("Failed to find login user, Unable to log in to node to fetch details");
|
||||||
|
}
|
||||||
|
Set<String> vm = new HashSet<>();
|
||||||
|
vm.add(userVM.getName());
|
||||||
|
UserVmDetailVO vmDetail = userVmDetailsDao.findDetail(vmId, VmDetailConstants.CKS_CONTROL_NODE_LOGIN_USER);
|
||||||
|
if (vmDetail != null && !org.apache.commons.lang3.StringUtils.isEmpty(vmDetail.getValue())) {
|
||||||
|
return vmDetail.getValue();
|
||||||
|
} else {
|
||||||
|
return CLUSTER_NODE_VM_USER;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return CLUSTER_NODE_VM_USER;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected void logMessage(final Level logLevel, final String message, final Exception e) {
|
protected void logMessage(final Level logLevel, final String message, final Exception e) {
|
||||||
if (logLevel == Level.INFO) {
|
if (logLevel == Level.INFO) {
|
||||||
if (LOGGER.isInfoEnabled()) {
|
if (LOGGER.isInfoEnabled()) {
|
||||||
@ -410,6 +438,20 @@ public class KubernetesClusterActionWorker {
|
|||||||
return vmList;
|
return vmList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void updateLoginUserDetails(List<Long> clusterVMs) {
|
||||||
|
if (clusterVMs == null) {
|
||||||
|
clusterVMs = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
if (!CollectionUtils.isEmpty(clusterVMs)) {
|
||||||
|
for (Long vmId : clusterVMs) {
|
||||||
|
UserVm controlNode = userVmDao.findById(vmId);
|
||||||
|
if (controlNode != null) {
|
||||||
|
userVmDetailsDao.addDetail(vmId, VmDetailConstants.CKS_CONTROL_NODE_LOGIN_USER, CLUSTER_NODE_VM_USER, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) {
|
protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Event e) {
|
||||||
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
|
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
|
||||||
try {
|
try {
|
||||||
@ -424,13 +466,14 @@ public class KubernetesClusterActionWorker {
|
|||||||
protected boolean createCloudStackSecret(String[] keys) {
|
protected boolean createCloudStackSecret(String[] keys) {
|
||||||
File pkFile = getManagementServerSshPublicKeyFile();
|
File pkFile = getManagementServerSshPublicKeyFile();
|
||||||
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
|
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
|
||||||
|
List<KubernetesClusterVmMapVO> vmMapVOList = getKubernetesClusterVMMaps();
|
||||||
publicIpAddress = publicIpSshPort.first();
|
publicIpAddress = publicIpSshPort.first();
|
||||||
sshPort = publicIpSshPort.second();
|
sshPort = publicIpSshPort.second();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final String command = String.format("sudo %s/%s -u '%s' -k '%s' -s '%s'",
|
final String command = String.format("sudo %s/%s -u '%s' -k '%s' -s '%s'",
|
||||||
scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]);
|
scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]);
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, command, 10000, 10000, 60000);
|
pkFile, null, command, 10000, 10000, 60000);
|
||||||
return result.first();
|
return result.first();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -469,10 +512,10 @@ public class KubernetesClusterActionWorker {
|
|||||||
|
|
||||||
protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String desitnation) {
|
protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String desitnation) {
|
||||||
try {
|
try {
|
||||||
SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
|
SshHelper.scpTo(nodeAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null,
|
||||||
"~/", file.getAbsolutePath(), "0755");
|
"~/", file.getAbsolutePath(), "0755");
|
||||||
String cmdStr = String.format("sudo mv ~/%s %s/%s", file.getName(), scriptPath, desitnation);
|
String cmdStr = String.format("sudo mv ~/%s %s/%s", file.getName(), scriptPath, desitnation);
|
||||||
SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
|
SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null,
|
||||||
cmdStr, 10000, 10000, 10 * 60 * 1000);
|
cmdStr, 10000, 10000, 10 * 60 * 1000);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new CloudRuntimeException(e);
|
throw new CloudRuntimeException(e);
|
||||||
@ -496,7 +539,7 @@ public class KubernetesClusterActionWorker {
|
|||||||
publicIpAddress = publicIpSshPort.first();
|
publicIpAddress = publicIpSshPort.first();
|
||||||
sshPort = publicIpSshPort.second();
|
sshPort = publicIpSshPort.second();
|
||||||
|
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, commands.toString(), 10000, 10000, 60000);
|
pkFile, null, commands.toString(), 10000, 10000, 60000);
|
||||||
return result.first();
|
return result.first();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -521,7 +564,7 @@ public class KubernetesClusterActionWorker {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
String command = String.format("sudo %s/%s", scriptPath, deployProviderScriptFilename);
|
String command = String.format("sudo %s/%s", scriptPath, deployProviderScriptFilename);
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, command, 10000, 10000, 60000);
|
pkFile, null, command, 10000, 10000, 60000);
|
||||||
|
|
||||||
// Maybe the file isn't present. Try and copy it
|
// Maybe the file isn't present. Try and copy it
|
||||||
@ -536,7 +579,7 @@ public class KubernetesClusterActionWorker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If at first you don't succeed ...
|
// If at first you don't succeed ...
|
||||||
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, command, 10000, 10000, 60000);
|
pkFile, null, command, 10000, 10000, 60000);
|
||||||
if (!result.first()) {
|
if (!result.first()) {
|
||||||
throw new CloudRuntimeException(result.second());
|
throw new CloudRuntimeException(result.second());
|
||||||
|
|||||||
@ -17,26 +17,6 @@
|
|||||||
|
|
||||||
package com.cloud.kubernetes.cluster.actionworkers;
|
package com.cloud.kubernetes.cluster.actionworkers;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.api.ApiConstants;
|
|
||||||
import org.apache.cloudstack.api.BaseCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.vm.StartVMCmd;
|
|
||||||
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.apache.log4j.Level;
|
|
||||||
|
|
||||||
import com.cloud.capacity.CapacityManager;
|
import com.cloud.capacity.CapacityManager;
|
||||||
import com.cloud.dc.ClusterDetailsDao;
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
import com.cloud.dc.ClusterDetailsVO;
|
import com.cloud.dc.ClusterDetailsVO;
|
||||||
@ -74,9 +54,9 @@ import com.cloud.network.rules.dao.PortForwardingRulesDao;
|
|||||||
import com.cloud.offering.ServiceOffering;
|
import com.cloud.offering.ServiceOffering;
|
||||||
import com.cloud.resource.ResourceManager;
|
import com.cloud.resource.ResourceManager;
|
||||||
import com.cloud.storage.Volume;
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.storage.VolumeApiService;
|
||||||
import com.cloud.storage.VolumeVO;
|
import com.cloud.storage.VolumeVO;
|
||||||
import com.cloud.storage.dao.LaunchPermissionDao;
|
import com.cloud.storage.dao.LaunchPermissionDao;
|
||||||
import com.cloud.storage.VolumeApiService;
|
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.user.Account;
|
import com.cloud.user.Account;
|
||||||
import com.cloud.user.SSHKeyPairVO;
|
import com.cloud.user.SSHKeyPairVO;
|
||||||
@ -95,8 +75,27 @@ import com.cloud.utils.ssh.SshHelper;
|
|||||||
import com.cloud.vm.Nic;
|
import com.cloud.vm.Nic;
|
||||||
import com.cloud.vm.UserVmManager;
|
import com.cloud.vm.UserVmManager;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.cloud.vm.VmDetailConstants;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.api.BaseCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.vm.StartVMCmd;
|
||||||
|
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
||||||
|
import org.apache.commons.codec.binary.Base64;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||||
|
|
||||||
@ -164,56 +163,53 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp);
|
k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp);
|
||||||
k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
|
k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
|
||||||
k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
||||||
/* genarate /.docker/config.json file on the nodes only if Kubernetes cluster is created to
|
|
||||||
* use docker private registry */
|
k8sNodeConfig = updateKubeConfigWithRegistryDetails(k8sNodeConfig);
|
||||||
String dockerUserName = null;
|
|
||||||
String dockerPassword = null;
|
|
||||||
String dockerRegistryUrl = null;
|
|
||||||
String dockerRegistryEmail = null;
|
|
||||||
List<KubernetesClusterDetailsVO> details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId());
|
|
||||||
for (KubernetesClusterDetailsVO detail : details) {
|
|
||||||
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) {
|
|
||||||
dockerUserName = detail.getValue();
|
|
||||||
}
|
|
||||||
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) {
|
|
||||||
dockerPassword = detail.getValue();
|
|
||||||
}
|
|
||||||
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) {
|
|
||||||
dockerRegistryUrl = detail.getValue();
|
|
||||||
}
|
|
||||||
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_EMAIL)) {
|
|
||||||
dockerRegistryEmail = detail.getValue();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) {
|
|
||||||
// do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section
|
|
||||||
// optional or conditionally applied
|
|
||||||
String dockerConfigString = "write_files:\n" +
|
|
||||||
" - path: /.docker/config.json\n" +
|
|
||||||
" owner: core:core\n" +
|
|
||||||
" permissions: '0644'\n" +
|
|
||||||
" content: |\n" +
|
|
||||||
" {\n" +
|
|
||||||
" \"auths\": {\n" +
|
|
||||||
" {{docker.url}}: {\n" +
|
|
||||||
" \"auth\": {{docker.secret}},\n" +
|
|
||||||
" \"email\": {{docker.email}}\n" +
|
|
||||||
" }\n" +
|
|
||||||
" }\n" +
|
|
||||||
" }";
|
|
||||||
k8sNodeConfig = k8sNodeConfig.replace("write_files:", dockerConfigString);
|
|
||||||
final String dockerUrlKey = "{{docker.url}}";
|
|
||||||
final String dockerAuthKey = "{{docker.secret}}";
|
|
||||||
final String dockerEmailKey = "{{docker.email}}";
|
|
||||||
final String usernamePasswordKey = dockerUserName + ":" + dockerPassword;
|
|
||||||
String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(StringUtils.getPreferredCharset()));
|
|
||||||
k8sNodeConfig = k8sNodeConfig.replace(dockerUrlKey, "\"" + dockerRegistryUrl + "\"");
|
|
||||||
k8sNodeConfig = k8sNodeConfig.replace(dockerAuthKey, "\"" + base64Auth + "\"");
|
|
||||||
k8sNodeConfig = k8sNodeConfig.replace(dockerEmailKey, "\"" + dockerRegistryEmail + "\"");
|
|
||||||
}
|
|
||||||
return k8sNodeConfig;
|
return k8sNodeConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected String updateKubeConfigWithRegistryDetails(String k8sConfig) {
|
||||||
|
/* genarate /etc/containerd/config.toml file on the nodes only if Kubernetes cluster is created to
|
||||||
|
* use docker private registry */
|
||||||
|
String registryUsername = null;
|
||||||
|
String registryPassword = null;
|
||||||
|
String registryUrl = null;
|
||||||
|
|
||||||
|
List<KubernetesClusterDetailsVO> details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId());
|
||||||
|
for (KubernetesClusterDetailsVO detail : details) {
|
||||||
|
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) {
|
||||||
|
registryUsername = detail.getValue();
|
||||||
|
}
|
||||||
|
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) {
|
||||||
|
registryPassword = detail.getValue();
|
||||||
|
}
|
||||||
|
if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) {
|
||||||
|
registryUrl = detail.getValue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!Strings.isNullOrEmpty(registryUsername) && !Strings.isNullOrEmpty(registryPassword) && !Strings.isNullOrEmpty(registryUrl)) {
|
||||||
|
// Update runcmd in the cloud-init configuration to run a script that updates the containerd config with provided registry details
|
||||||
|
String runCmd = "- bash -x /opt/bin/setup-containerd";
|
||||||
|
|
||||||
|
String registryEp = registryUrl.split("://")[1];
|
||||||
|
k8sConfig = k8sConfig.replace("- containerd config default > /etc/containerd/config.toml", runCmd);
|
||||||
|
final String registryUrlKey = "{{registry.url}}";
|
||||||
|
final String registryUrlEpKey = "{{registry.url.endpoint}}";
|
||||||
|
final String registryAuthKey = "{{registry.token}}";
|
||||||
|
final String registryUname = "{{registry.username}}";
|
||||||
|
final String registryPsswd = "{{registry.password}}";
|
||||||
|
|
||||||
|
final String usernamePasswordKey = registryUsername + ":" + registryPassword;
|
||||||
|
String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(StringUtils.getPreferredCharset()));
|
||||||
|
k8sConfig = k8sConfig.replace(registryUrlKey, registryUrl);
|
||||||
|
k8sConfig = k8sConfig.replace(registryUrlEpKey, registryEp);
|
||||||
|
k8sConfig = k8sConfig.replace(registryUname, registryUsername);
|
||||||
|
k8sConfig = k8sConfig.replace(registryPsswd, registryPassword);
|
||||||
|
k8sConfig = k8sConfig.replace(registryAuthKey, base64Auth);
|
||||||
|
}
|
||||||
|
return k8sConfig;
|
||||||
|
}
|
||||||
protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
|
protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException {
|
||||||
final int cpu_requested = offering.getCpu() * offering.getSpeed();
|
final int cpu_requested = offering.getCpu() * offering.getSpeed();
|
||||||
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
final long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
||||||
@ -369,6 +365,9 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
if (rootDiskSize > 0) {
|
if (rootDiskSize > 0) {
|
||||||
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
||||||
}
|
}
|
||||||
|
if (Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())) {
|
||||||
|
customParameterMap.put(VmDetailConstants.ROOT_DISK_CONTROLLER, "scsi");
|
||||||
|
}
|
||||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||||
String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix);
|
String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix);
|
||||||
String k8sNodeConfig = null;
|
String k8sNodeConfig = null;
|
||||||
@ -616,7 +615,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
if (enable) {
|
if (enable) {
|
||||||
String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d",
|
String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d",
|
||||||
kubernetesCluster.getUuid(), maxSize, minSize);
|
kubernetesCluster.getUuid(), maxSize, minSize);
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, command, 10000, 10000, 60000);
|
pkFile, null, command, 10000, 10000, 60000);
|
||||||
|
|
||||||
// Maybe the file isn't present. Try and copy it
|
// Maybe the file isn't present. Try and copy it
|
||||||
@ -631,7 +630,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If at first you don't succeed ...
|
// If at first you don't succeed ...
|
||||||
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, command, 10000, 10000, 60000);
|
pkFile, null, command, 10000, 10000, 60000);
|
||||||
if (!result.first()) {
|
if (!result.first()) {
|
||||||
throw new CloudRuntimeException(result.second());
|
throw new CloudRuntimeException(result.second());
|
||||||
@ -639,7 +638,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
}
|
}
|
||||||
updateKubernetesClusterEntry(true, minSize, maxSize);
|
updateKubernetesClusterEntry(true, minSize, maxSize);
|
||||||
} else {
|
} else {
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(),
|
||||||
pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"),
|
pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"),
|
||||||
10000, 10000, 60000);
|
10000, 10000, 60000);
|
||||||
if (!result.first()) {
|
if (!result.first()) {
|
||||||
@ -654,6 +653,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
return false;
|
return false;
|
||||||
} finally {
|
} finally {
|
||||||
// Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state
|
// Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state
|
||||||
|
updateLoginUserDetails(null);
|
||||||
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
|
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import java.util.stream.Collectors;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.InternalIdentity;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
@ -192,13 +193,13 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
|
|||||||
while (retryCounter < retries) {
|
while (retryCounter < retries) {
|
||||||
retryCounter++;
|
retryCounter++;
|
||||||
try {
|
try {
|
||||||
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
|
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, getControlNodeLoginUser(),
|
||||||
pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
|
pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
|
||||||
10000, 10000, 60000);
|
10000, 10000, 60000);
|
||||||
if (!result.first()) {
|
if (!result.first()) {
|
||||||
LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName()));
|
LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName()));
|
||||||
} else {
|
} else {
|
||||||
result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
|
result = SshHelper.sshExecute(ipAddress, port, getControlNodeLoginUser(),
|
||||||
pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName),
|
pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName),
|
||||||
10000, 10000, 30000);
|
10000, 10000, 30000);
|
||||||
if (result.first()) {
|
if (result.first()) {
|
||||||
@ -359,6 +360,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
|
|||||||
launchPermissionDao.persist(launchPermission);
|
launchPermissionDao.persist(launchPermission);
|
||||||
try {
|
try {
|
||||||
clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
|
clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
|
||||||
|
updateLoginUserDetails(clusterVMs.stream().map(InternalIdentity::getId).collect(Collectors.toList()));
|
||||||
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
|
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
|
||||||
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e);
|
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e);
|
||||||
}
|
}
|
||||||
@ -372,7 +374,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
|
|||||||
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
|
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
|
||||||
kubernetesClusterVO.setNodeCount(clusterSize);
|
kubernetesClusterVO.setNodeCount(clusterSize);
|
||||||
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort,
|
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort,
|
||||||
CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000);
|
getControlNodeLoginUser(), sshKeyFile, scaleTimeoutTime, 15000);
|
||||||
detachIsoKubernetesVMs(clusterVMs);
|
detachIsoKubernetesVMs(clusterVMs);
|
||||||
deleteTemplateLaunchPermission();
|
deleteTemplateLaunchPermission();
|
||||||
if (!readyNodesCountValid) { // Scaling failed
|
if (!readyNodesCountValid) { // Scaling failed
|
||||||
|
|||||||
@ -17,24 +17,6 @@
|
|||||||
|
|
||||||
package com.cloud.kubernetes.cluster.actionworkers;
|
package com.cloud.kubernetes.cluster.actionworkers;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.api.BaseCmd;
|
|
||||||
import org.apache.cloudstack.framework.ca.Certificate;
|
|
||||||
import org.apache.cloudstack.utils.security.CertUtils;
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.apache.log4j.Level;
|
|
||||||
|
|
||||||
import com.cloud.dc.DataCenter;
|
import com.cloud.dc.DataCenter;
|
||||||
import com.cloud.dc.Vlan;
|
import com.cloud.dc.Vlan;
|
||||||
import com.cloud.dc.VlanVO;
|
import com.cloud.dc.VlanVO;
|
||||||
@ -74,7 +56,27 @@ import com.cloud.vm.ReservationContext;
|
|||||||
import com.cloud.vm.ReservationContextImpl;
|
import com.cloud.vm.ReservationContextImpl;
|
||||||
import com.cloud.vm.UserVmManager;
|
import com.cloud.vm.UserVmManager;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.cloud.vm.VmDetailConstants;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
|
import org.apache.cloudstack.api.BaseCmd;
|
||||||
|
import org.apache.cloudstack.api.InternalIdentity;
|
||||||
|
import org.apache.cloudstack.framework.ca.Certificate;
|
||||||
|
import org.apache.cloudstack.utils.security.CertUtils;
|
||||||
|
import org.apache.commons.codec.binary.Base64;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
|
public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
|
||||||
|
|
||||||
@ -175,6 +177,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion());
|
initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion());
|
||||||
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterInitArgsKey, initArgs);
|
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterInitArgsKey, initArgs);
|
||||||
k8sControlNodeConfig = k8sControlNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
k8sControlNodeConfig = k8sControlNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
||||||
|
k8sControlNodeConfig = updateKubeConfigWithRegistryDetails(k8sControlNodeConfig);
|
||||||
|
|
||||||
return k8sControlNodeConfig;
|
return k8sControlNodeConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,6 +201,9 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
if (rootDiskSize > 0) {
|
if (rootDiskSize > 0) {
|
||||||
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
||||||
}
|
}
|
||||||
|
if (Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())) {
|
||||||
|
customParameterMap.put(VmDetailConstants.ROOT_DISK_CONTROLLER, "scsi");
|
||||||
|
}
|
||||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||||
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
|
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
|
||||||
boolean haSupported = isKubernetesVersionSupportsHA();
|
boolean haSupported = isKubernetesVersionSupportsHA();
|
||||||
@ -237,6 +244,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
|
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster));
|
||||||
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
|
k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster));
|
||||||
k8sControlNodeConfig = k8sControlNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
k8sControlNodeConfig = k8sControlNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso));
|
||||||
|
k8sControlNodeConfig = updateKubeConfigWithRegistryDetails(k8sControlNodeConfig);
|
||||||
|
|
||||||
return k8sControlNodeConfig;
|
return k8sControlNodeConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,6 +262,9 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
if (rootDiskSize > 0) {
|
if (rootDiskSize > 0) {
|
||||||
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
|
||||||
}
|
}
|
||||||
|
if (Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())) {
|
||||||
|
customParameterMap.put(VmDetailConstants.ROOT_DISK_CONTROLLER, "scsi");
|
||||||
|
}
|
||||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||||
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
|
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
|
||||||
String k8sControlNodeConfig = null;
|
String k8sControlNodeConfig = null;
|
||||||
@ -455,7 +467,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime);
|
String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, timeoutTime);
|
||||||
if (!Strings.isNullOrEmpty(kubeConfig)) {
|
if (!Strings.isNullOrEmpty(kubeConfig)) {
|
||||||
final String controlVMPrivateIpAddress = getControlVmPrivateIp();
|
final String controlVMPrivateIpAddress = getControlVmPrivateIp();
|
||||||
if (!Strings.isNullOrEmpty(controlVMPrivateIpAddress)) {
|
if (!Strings.isNullOrEmpty(controlVMPrivateIpAddress)) {
|
||||||
@ -475,7 +487,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (KubernetesClusterUtil.isKubernetesClusterDashboardServiceRunning(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime, 15000)) {
|
if (KubernetesClusterUtil.isKubernetesClusterDashboardServiceRunning(kubernetesCluster, publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, timeoutTime, 15000)) {
|
||||||
kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "dashboardServiceRunning", String.valueOf(true), false);
|
kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "dashboardServiceRunning", String.valueOf(true), false);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -571,7 +583,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
sshPort = publicIpSshPort.second();
|
sshPort = publicIpSshPort.second();
|
||||||
updateKubernetesClusterEntryEndpoint();
|
updateKubernetesClusterEntryEndpoint();
|
||||||
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort,
|
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort,
|
||||||
CLUSTER_NODE_VM_USER, sshKeyFile, startTimeoutTime, 15000);
|
getControlNodeLoginUser(), sshKeyFile, startTimeoutTime, 15000);
|
||||||
detachIsoKubernetesVMs(clusterVMs);
|
detachIsoKubernetesVMs(clusterVMs);
|
||||||
if (!readyNodesCountValid) {
|
if (!readyNodesCountValid) {
|
||||||
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s as it does not have desired number of nodes in ready state", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
|
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s as it does not have desired number of nodes in ready state", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
|
||||||
@ -584,6 +596,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
}
|
}
|
||||||
taintControlNodes();
|
taintControlNodes();
|
||||||
deployProvider();
|
deployProvider();
|
||||||
|
updateLoginUserDetails(clusterVMs.stream().map(InternalIdentity::getId).collect(Collectors.toList()));
|
||||||
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
|
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -638,7 +651,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
|
|||||||
}
|
}
|
||||||
long actualNodeCount = 0;
|
long actualNodeCount = 0;
|
||||||
try {
|
try {
|
||||||
actualNodeCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile);
|
actualNodeCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -63,7 +63,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
|
|||||||
private Pair<Boolean, String> runInstallScriptOnVM(final UserVm vm, final int index) throws Exception {
|
private Pair<Boolean, String> runInstallScriptOnVM(final UserVm vm, final int index) throws Exception {
|
||||||
int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index;
|
int nodeSshPort = sshPort == 22 ? sshPort : sshPort + index;
|
||||||
String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress;
|
String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress;
|
||||||
SshHelper.scpTo(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
|
SshHelper.scpTo(nodeAddress, nodeSshPort, getControlNodeLoginUser(), sshKeyFile, null,
|
||||||
"~/", upgradeScriptFile.getAbsolutePath(), "0755");
|
"~/", upgradeScriptFile.getAbsolutePath(), "0755");
|
||||||
String cmdStr = String.format("sudo ./%s %s %s %s %s",
|
String cmdStr = String.format("sudo ./%s %s %s %s %s",
|
||||||
upgradeScriptFile.getName(),
|
upgradeScriptFile.getName(),
|
||||||
@ -71,7 +71,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
|
|||||||
index == 0 ? "true" : "false",
|
index == 0 ? "true" : "false",
|
||||||
KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false",
|
KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false",
|
||||||
Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType()));
|
Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType()));
|
||||||
return SshHelper.sshExecute(nodeAddress, nodeSshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
|
return SshHelper.sshExecute(nodeAddress, nodeSshPort, getControlNodeLoginUser(), sshKeyFile, null,
|
||||||
cmdStr,
|
cmdStr,
|
||||||
10000, 10000, 10 * 60 * 1000);
|
10000, 10000, 10 * 60 * 1000);
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
|
|||||||
vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
|
vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
|
result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null,
|
||||||
String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
|
String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
|
||||||
10000, 10000, 60000);
|
10000, 10000, 60000);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -114,11 +114,11 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
|
|||||||
if (System.currentTimeMillis() > upgradeTimeoutTime) {
|
if (System.currentTimeMillis() > upgradeTimeoutTime) {
|
||||||
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
||||||
}
|
}
|
||||||
if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) {
|
if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, getControlNodeLoginUser(), getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) {
|
||||||
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to uncordon Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to uncordon Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
||||||
}
|
}
|
||||||
if (i == 0) { // Wait for control node to get in Ready state
|
if (i == 0) { // Wait for control node to get in Ready state
|
||||||
if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) {
|
if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, getControlNodeLoginUser(), getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) {
|
||||||
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get control Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get control Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,10 +16,10 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
|
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
|
||||||
|
|
||||||
import java.security.InvalidParameterException;
|
import com.cloud.kubernetes.cluster.KubernetesCluster;
|
||||||
|
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
|
||||||
import javax.inject.Inject;
|
import com.cloud.kubernetes.cluster.KubernetesClusterService;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import org.apache.cloudstack.acl.RoleType;
|
import org.apache.cloudstack.acl.RoleType;
|
||||||
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
||||||
import org.apache.cloudstack.api.ACL;
|
import org.apache.cloudstack.api.ACL;
|
||||||
@ -41,10 +41,8 @@ import org.apache.cloudstack.api.response.ZoneResponse;
|
|||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.cloud.kubernetes.cluster.KubernetesCluster;
|
import javax.inject.Inject;
|
||||||
import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes;
|
import java.security.InvalidParameterException;
|
||||||
import com.cloud.kubernetes.cluster.KubernetesClusterService;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
|
|
||||||
@APICommand(name = CreateKubernetesClusterCmd.APINAME,
|
@APICommand(name = CreateKubernetesClusterCmd.APINAME,
|
||||||
description = "Creates a Kubernetes cluster",
|
description = "Creates a Kubernetes cluster",
|
||||||
@ -140,10 +138,6 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
|
|||||||
description = "URL for the docker image private registry")
|
description = "URL for the docker image private registry")
|
||||||
private String dockerRegistryUrl;
|
private String dockerRegistryUrl;
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.DOCKER_REGISTRY_EMAIL, type = CommandType.STRING,
|
|
||||||
description = "email of the docker image private registry user")
|
|
||||||
private String dockerRegistryEmail;
|
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG,
|
@Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG,
|
||||||
description = "root disk size in GB for each node")
|
description = "root disk size in GB for each node")
|
||||||
private Long nodeRootDiskSize;
|
private Long nodeRootDiskSize;
|
||||||
@ -226,10 +220,6 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
|
|||||||
return dockerRegistryUrl;
|
return dockerRegistryUrl;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getDockerRegistryEmail() {
|
|
||||||
return dockerRegistryEmail;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Long getNodeRootDiskSize() {
|
public Long getNodeRootDiskSize() {
|
||||||
if (nodeRootDiskSize != null) {
|
if (nodeRootDiskSize != null) {
|
||||||
if (nodeRootDiskSize < DEFAULT_NODE_ROOT_DISK_SIZE) {
|
if (nodeRootDiskSize < DEFAULT_NODE_ROOT_DISK_SIZE) {
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
users:
|
users:
|
||||||
- name: core
|
- name: cloud
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
@ -31,7 +31,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -107,6 +107,8 @@ write_files:
|
|||||||
mkdir -p /etc/systemd/system/kubelet.service.d
|
mkdir -p /etc/systemd/system/kubelet.service.d
|
||||||
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||||
|
|
||||||
|
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/default/kubelet
|
||||||
|
|
||||||
output=`ls ${BINARIES_DIR}/docker/`
|
output=`ls ${BINARIES_DIR}/docker/`
|
||||||
if [ "$output" != "" ]; then
|
if [ "$output" != "" ]; then
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
@ -118,7 +120,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
docker load < "${BINARIES_DIR}/docker/$line"
|
ctr image import "${BINARIES_DIR}/docker/$line"
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -165,7 +167,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
systemctl enable kubelet && systemctl start kubelet
|
systemctl enable kubelet && systemctl start kubelet
|
||||||
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
modprobe overlay && modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
||||||
|
|
||||||
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
||||||
crucial_cmd_attempts=1
|
crucial_cmd_attempts=1
|
||||||
@ -176,7 +178,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
kubeadm config images pull
|
kubeadm config images pull --cri-socket /run/containerd/containerd.sock
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -192,7 +194,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -210,16 +212,33 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
kubeadm join {{ k8s_control_node.join_ip }}:6443 --token {{ k8s_control_node.cluster.token }} --control-plane --certificate-key {{ k8s_control_node.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification
|
kubeadm join {{ k8s_control_node.join_ip }}:6443 --token {{ k8s_control_node.cluster.token }} --control-plane --certificate-key {{ k8s_control_node.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification
|
||||||
|
|
||||||
sudo touch /home/core/success
|
sudo touch /home/cloud/success
|
||||||
echo "true" > /home/core/success
|
echo "true" > /home/cloud/success
|
||||||
|
|
||||||
|
- path: /opt/bin/setup-containerd
|
||||||
|
permissions: '0755'
|
||||||
|
owner: root:root
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
export registryConfig="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"{{registry.url.endpoint}}\"]\n \\ endpoint = [\"{{registry.url}}\"]"
|
||||||
|
export registryCredentials="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"{{registry.url.endpoint}}\".auth]\n\tusername = \"{{registry.username}}\" \n\tpassword = \"{{registry.password}}\" \n\tidentitytoken = \"{{registry.token}}\""
|
||||||
|
|
||||||
|
echo "creating config file for containerd"
|
||||||
|
containerd config default > /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/a '"${registryCredentials}"'' /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry.mirrors\]/a '"${registryConfig}"'' /etc/containerd/config.toml
|
||||||
|
|
||||||
|
echo "Restarting containerd service"
|
||||||
|
systemctl restart containerd
|
||||||
|
|
||||||
- path: /etc/systemd/system/setup-kube-system.service
|
- path: /etc/systemd/system/setup-kube-system.service
|
||||||
permissions: '0755'
|
permissions: '0755'
|
||||||
owner: root:root
|
owner: root:root
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
Requires=docker.service
|
Requires=containerd.service
|
||||||
After=docker.service
|
After=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
@ -241,6 +260,10 @@ write_files:
|
|||||||
ExecStart=/opt/bin/deploy-kube-system
|
ExecStart=/opt/bin/deploy-kube-system
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
|
- chown -R cloud:cloud /home/cloud/.ssh
|
||||||
|
- containerd config default > /etc/containerd/config.toml
|
||||||
|
- sed -i '/\[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options\]/a '"\\ SystemdCgroup=true"'' /etc/containerd/config.toml
|
||||||
|
- systemctl restart containerd
|
||||||
- [ systemctl, start, setup-kube-system ]
|
- [ systemctl, start, setup-kube-system ]
|
||||||
- [ systemctl, start, deploy-kube-system ]
|
- [ systemctl, start, deploy-kube-system ]
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
users:
|
users:
|
||||||
- name: core
|
- name: cloud
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
@ -51,7 +51,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -127,6 +127,8 @@ write_files:
|
|||||||
mkdir -p /etc/systemd/system/kubelet.service.d
|
mkdir -p /etc/systemd/system/kubelet.service.d
|
||||||
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||||
|
|
||||||
|
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/default/kubelet
|
||||||
|
|
||||||
output=`ls ${BINARIES_DIR}/docker/`
|
output=`ls ${BINARIES_DIR}/docker/`
|
||||||
if [ "$output" != "" ]; then
|
if [ "$output" != "" ]; then
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
@ -138,7 +140,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
docker load < "${BINARIES_DIR}/docker/$line"
|
ctr image import "${BINARIES_DIR}/docker/$line"
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -187,7 +189,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
systemctl enable kubelet && systemctl start kubelet
|
systemctl enable kubelet && systemctl start kubelet
|
||||||
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
modprobe overlay && modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
||||||
|
|
||||||
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
||||||
crucial_cmd_attempts=1
|
crucial_cmd_attempts=1
|
||||||
@ -198,7 +200,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
kubeadm config images pull
|
kubeadm config images pull --cri-socket /run/containerd/containerd.sock
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -216,7 +218,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
kubeadm init --token {{ k8s_control_node.cluster.token }} --token-ttl 0 {{ k8s_control_node.cluster.initargs }}
|
kubeadm init --token {{ k8s_control_node.cluster.token }} --token-ttl 0 {{ k8s_control_node.cluster.initargs }} --cri-socket /run/containerd/containerd.sock
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -231,7 +233,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -267,16 +269,34 @@ write_files:
|
|||||||
/opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
|
/opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
|
||||||
/opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
|
/opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
|
||||||
|
|
||||||
sudo touch /home/core/success
|
sudo touch /home/cloud/success
|
||||||
echo "true" > /home/core/success
|
echo "true" > /home/cloud/success
|
||||||
|
|
||||||
|
- path: /opt/bin/setup-containerd
|
||||||
|
permissions: '0755'
|
||||||
|
owner: root:root
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
export registryConfig="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"{{registry.url.endpoint}}\"]\n \\ endpoint = [\"{{registry.url}}\"]"
|
||||||
|
export registryCredentials="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"{{registry.url.endpoint}}\".auth]\n\tusername = \"{{registry.username}}\" \n\tpassword = \"{{registry.password}}\" \n\tidentitytoken = \"{{registry.token}}\""
|
||||||
|
|
||||||
|
echo "creating config file for containerd"
|
||||||
|
containerd config default > /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/a '"${registryCredentials}"'' /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry.mirrors\]/a '"${registryConfig}"'' /etc/containerd/config.toml
|
||||||
|
|
||||||
|
echo "Restarting containerd service"
|
||||||
|
systemctl restart containerd
|
||||||
|
|
||||||
|
|
||||||
- path: /etc/systemd/system/setup-kube-system.service
|
- path: /etc/systemd/system/setup-kube-system.service
|
||||||
permissions: '0755'
|
permissions: '0755'
|
||||||
owner: root:root
|
owner: root:root
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
Requires=docker.service
|
Requires=containerd.service
|
||||||
After=docker.service
|
After=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
@ -298,6 +318,10 @@ write_files:
|
|||||||
ExecStart=/opt/bin/deploy-kube-system
|
ExecStart=/opt/bin/deploy-kube-system
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
|
- chown -R cloud:cloud /home/cloud/.ssh
|
||||||
|
- containerd config default > /etc/containerd/config.toml
|
||||||
|
- sed -i '/\[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options\]/a '"\\ SystemdCgroup=true"'' /etc/containerd/config.toml
|
||||||
|
- systemctl restart containerd
|
||||||
- [ systemctl, start, setup-kube-system ]
|
- [ systemctl, start, setup-kube-system ]
|
||||||
- [ systemctl, start, deploy-kube-system ]
|
- [ systemctl, start, deploy-kube-system ]
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
users:
|
users:
|
||||||
- name: core
|
- name: cloud
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
@ -31,7 +31,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -107,6 +107,8 @@ write_files:
|
|||||||
mkdir -p /etc/systemd/system/kubelet.service.d
|
mkdir -p /etc/systemd/system/kubelet.service.d
|
||||||
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||||
|
|
||||||
|
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/default/kubelet
|
||||||
|
|
||||||
output=`ls ${BINARIES_DIR}/docker/`
|
output=`ls ${BINARIES_DIR}/docker/`
|
||||||
if [ "$output" != "" ]; then
|
if [ "$output" != "" ]; then
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
@ -118,7 +120,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
docker load < "${BINARIES_DIR}/docker/$line"
|
ctr image import "${BINARIES_DIR}/docker/$line"
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -165,7 +167,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
systemctl enable kubelet && systemctl start kubelet
|
systemctl enable kubelet && systemctl start kubelet
|
||||||
modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
modprobe overlay && modprobe br_netfilter && sysctl net.bridge.bridge-nf-call-iptables=1
|
||||||
|
|
||||||
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
if [ -d "$BINARIES_DIR" ] && [ "$ATTEMPT_ONLINE_INSTALL" = true ]; then
|
||||||
crucial_cmd_attempts=1
|
crucial_cmd_attempts=1
|
||||||
@ -176,7 +178,7 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
retval=0
|
retval=0
|
||||||
set +e
|
set +e
|
||||||
kubeadm config images pull
|
kubeadm config images pull --cri-socket /run/containerd/containerd.sock
|
||||||
retval=$?
|
retval=$?
|
||||||
set -e
|
set -e
|
||||||
if [ $retval -eq 0 ]; then
|
if [ $retval -eq 0 ]; then
|
||||||
@ -192,7 +194,7 @@ write_files:
|
|||||||
content: |
|
content: |
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [[ -f "/home/core/success" ]]; then
|
if [[ -f "/home/cloud/success" ]]; then
|
||||||
echo "Already provisioned!"
|
echo "Already provisioned!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
@ -210,16 +212,33 @@ write_files:
|
|||||||
fi
|
fi
|
||||||
kubeadm join {{ k8s_control_node.join_ip }}:6443 --token {{ k8s_control_node.cluster.token }} --discovery-token-unsafe-skip-ca-verification
|
kubeadm join {{ k8s_control_node.join_ip }}:6443 --token {{ k8s_control_node.cluster.token }} --discovery-token-unsafe-skip-ca-verification
|
||||||
|
|
||||||
sudo touch /home/core/success
|
sudo touch /home/cloud/success
|
||||||
echo "true" > /home/core/success
|
echo "true" > /home/cloud/success
|
||||||
|
|
||||||
|
- path: /opt/bin/setup-containerd
|
||||||
|
permissions: '0755'
|
||||||
|
owner: root:root
|
||||||
|
content: |
|
||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
export registryConfig="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"{{registry.url.endpoint}}\"]\n \\ endpoint = [\"{{registry.url}}\"]"
|
||||||
|
export registryCredentials="\\ [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"{{registry.url.endpoint}}\".auth]\n\tusername = \"{{registry.username}}\" \n\tpassword = \"{{registry.password}}\" \n\tidentitytoken = \"{{registry.token}}\""
|
||||||
|
|
||||||
|
echo "creating config file for containerd"
|
||||||
|
containerd config default > /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/a '"${registryCredentials}"'' /etc/containerd/config.toml
|
||||||
|
sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry.mirrors\]/a '"${registryConfig}"'' /etc/containerd/config.toml
|
||||||
|
|
||||||
|
echo "Restarting containerd service"
|
||||||
|
systemctl restart containerd
|
||||||
|
|
||||||
- path: /etc/systemd/system/setup-kube-system.service
|
- path: /etc/systemd/system/setup-kube-system.service
|
||||||
permissions: '0755'
|
permissions: '0755'
|
||||||
owner: root:root
|
owner: root:root
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
Requires=docker.service
|
Requires=containerd.service
|
||||||
After=docker.service
|
After=containerd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
@ -241,5 +260,9 @@ write_files:
|
|||||||
ExecStart=/opt/bin/deploy-kube-system
|
ExecStart=/opt/bin/deploy-kube-system
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
|
- chown -R cloud:cloud /home/cloud/.ssh
|
||||||
|
- containerd config default > /etc/containerd/config.toml
|
||||||
|
- sed -i '/\[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options\]/a '"\\ SystemdCgroup=true"'' /etc/containerd/config.toml
|
||||||
|
- systemctl restart containerd
|
||||||
- [ systemctl, start, setup-kube-system ]
|
- [ systemctl, start, setup-kube-system ]
|
||||||
- [ systemctl, start, deploy-kube-system ]
|
- [ systemctl, start, deploy-kube-system ]
|
||||||
|
|||||||
@ -93,7 +93,7 @@ if [ -d "$BINARIES_DIR" ]; then
|
|||||||
output=`ls ${BINARIES_DIR}/docker/`
|
output=`ls ${BINARIES_DIR}/docker/`
|
||||||
if [ "$output" != "" ]; then
|
if [ "$output" != "" ]; then
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
docker load < "${BINARIES_DIR}/docker/$line"
|
ctr image import "${BINARIES_DIR}/docker/$line"
|
||||||
done <<< "$output"
|
done <<< "$output"
|
||||||
fi
|
fi
|
||||||
if [ -e "${BINARIES_DIR}/provider.yaml" ]; then
|
if [ -e "${BINARIES_DIR}/provider.yaml" ]; then
|
||||||
|
|||||||
1
pom.xml
1
pom.xml
@ -49,6 +49,7 @@
|
|||||||
<!-- keep in alphabetic order -->
|
<!-- keep in alphabetic order -->
|
||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||||
|
<project.systemvm.template.version>4.16.1.0</project.systemvm.template.version>
|
||||||
|
|
||||||
<!-- Build properties -->
|
<!-- Build properties -->
|
||||||
<cs.jdk.version>11</cs.jdk.version>
|
<cs.jdk.version>11</cs.jdk.version>
|
||||||
|
|||||||
@ -98,19 +98,18 @@ provider_conf_file="${working_dir}/provider.yaml"
|
|||||||
curl -sSL ${PROVIDER_URL} -o ${provider_conf_file}
|
curl -sSL ${PROVIDER_URL} -o ${provider_conf_file}
|
||||||
|
|
||||||
echo "Fetching k8s docker images..."
|
echo "Fetching k8s docker images..."
|
||||||
docker -v
|
ctr -v
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Installing docker..."
|
echo "Installing containerd..."
|
||||||
if [ -f /etc/redhat-release ]; then
|
if [ -f /etc/redhat-release ]; then
|
||||||
sudo yum -y remove docker-common docker container-selinux docker-selinux docker-engine
|
sudo yum -y remove docker-common docker container-selinux docker-selinux docker-engine
|
||||||
sudo yum -y install lvm2 device-mapper device-mapper-persistent-data device-mapper-event device-mapper-libs device-mapper-event-libs
|
sudo yum -y install lvm2 device-mapper device-mapper-persistent-data device-mapper-event device-mapper-libs device-mapper-event-libs
|
||||||
sudo yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-3.el7.noarch.rpm
|
sudo yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-3.el7.noarch.rpm
|
||||||
sudo wget https://download.docker.com/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo && sudo yum -y install docker-ce
|
sudo yum install -y containerd.io
|
||||||
sudo systemctl enable docker && sudo systemctl start docker
|
|
||||||
elif [ -f /etc/lsb-release ]; then
|
elif [ -f /etc/lsb-release ]; then
|
||||||
sudo apt update && sudo apt install docker.io -y
|
sudo apt update && sudo apt install containerd.io -y
|
||||||
sudo systemctl enable docker && sudo systemctl start docker
|
|
||||||
fi
|
fi
|
||||||
|
sudo systemctl enable containerd && sudo systemctl start containerd
|
||||||
fi
|
fi
|
||||||
mkdir -p "${working_dir}/docker"
|
mkdir -p "${working_dir}/docker"
|
||||||
output=`${k8s_dir}/kubeadm config images list --kubernetes-version=${RELEASE}`
|
output=`${k8s_dir}/kubeadm config images list --kubernetes-version=${RELEASE}`
|
||||||
@ -130,11 +129,14 @@ provider_image=`grep "image:" ${provider_conf_file} | cut -d ':' -f2- | tr -d '
|
|||||||
output=`printf "%s\n" ${output} ${provider_image}`
|
output=`printf "%s\n" ${output} ${provider_image}`
|
||||||
|
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
echo "Downloading docker image $line ---"
|
echo "Downloading image $line ---"
|
||||||
sudo docker pull "$line"
|
if [[ $line == kubernetesui* ]] || [[ $line == apache* ]]; then
|
||||||
|
line="docker.io/${line}"
|
||||||
|
fi
|
||||||
|
sudo ctr image pull "$line"
|
||||||
image_name=`echo "$line" | grep -oE "[^/]+$"`
|
image_name=`echo "$line" | grep -oE "[^/]+$"`
|
||||||
sudo docker save "$line" > "${working_dir}/docker/$image_name.tar"
|
sudo ctr image export "${working_dir}/docker/$image_name.tar" "$line"
|
||||||
sudo docker image rm "$line"
|
sudo ctr image rm "$line"
|
||||||
done <<< "$output"
|
done <<< "$output"
|
||||||
|
|
||||||
echo "Restore kubeadm permissions..."
|
echo "Restore kubeadm permissions..."
|
||||||
|
|||||||
@ -61,9 +61,7 @@ setup_k8s_node() {
|
|||||||
|
|
||||||
log_it "Starting cloud-init services"
|
log_it "Starting cloud-init services"
|
||||||
systemctl enable --now --no-block containerd
|
systemctl enable --now --no-block containerd
|
||||||
systemctl enable --now --no-block docker.socket
|
if [ -f /home/cloud/success ]; then
|
||||||
systemctl enable --now --no-block docker.service
|
|
||||||
if [ -f /home/core/success ]; then
|
|
||||||
systemctl stop cloud-init cloud-config cloud-final
|
systemctl stop cloud-init cloud-config cloud-final
|
||||||
systemctl disable cloud-init cloud-config cloud-final
|
systemctl disable cloud-init cloud-config cloud-final
|
||||||
else
|
else
|
||||||
|
|||||||
@ -101,12 +101,12 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
(cls.services["cks_kubernetes_versions"]["1.20.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.20.9"]["url"], e))
|
(cls.services["cks_kubernetes_versions"]["1.20.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.20.9"]["url"], e))
|
||||||
if cls.setup_failed == False:
|
if cls.setup_failed == False:
|
||||||
try:
|
try:
|
||||||
cls.kubernetes_version_1_21_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.21.3"])
|
cls.kubernetes_version_1_21_5 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.21.5"])
|
||||||
cls.kubernetes_version_ids.append(cls.kubernetes_version_1_21_3.id)
|
cls.kubernetes_version_ids.append(cls.kubernetes_version_1_21_5.id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
cls.setup_failed = True
|
cls.setup_failed = True
|
||||||
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
|
||||||
(cls.services["cks_kubernetes_versions"]["1.21.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.21.3"]["url"], e))
|
(cls.services["cks_kubernetes_versions"]["1.21.5"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.21.5"]["url"], e))
|
||||||
|
|
||||||
if cls.setup_failed == False:
|
if cls.setup_failed == False:
|
||||||
cks_offering_data = cls.services["cks_service_offering"]
|
cks_offering_data = cls.services["cks_service_offering"]
|
||||||
@ -349,7 +349,7 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
if self.setup_failed == True:
|
if self.setup_failed == True:
|
||||||
self.fail("Setup incomplete")
|
self.fail("Setup incomplete")
|
||||||
global k8s_cluster
|
global k8s_cluster
|
||||||
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3)
|
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_5)
|
||||||
|
|
||||||
self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id)
|
self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id)
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
|
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
|
||||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||||
|
|
||||||
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
|
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_5.id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||||
@ -381,12 +381,12 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
time.sleep(self.services["sleep"])
|
time.sleep(self.services["sleep"])
|
||||||
self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||||
try:
|
try:
|
||||||
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id)
|
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_5.id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||||
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
|
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
|
||||||
|
|
||||||
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
|
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_5.id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
@attr(tags=["advanced", "smoke"], required_hardware="true")
|
||||||
@ -434,7 +434,7 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
if self.setup_failed == True:
|
if self.setup_failed == True:
|
||||||
self.fail("Setup incomplete")
|
self.fail("Setup incomplete")
|
||||||
global k8s_cluster
|
global k8s_cluster
|
||||||
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3)
|
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_5)
|
||||||
|
|
||||||
self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||||
try:
|
try:
|
||||||
@ -535,12 +535,12 @@ class TestKubernetesCluster(cloudstackTestCase):
|
|||||||
|
|
||||||
self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id)
|
||||||
try:
|
try:
|
||||||
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id)
|
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_5.id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
|
||||||
self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e)
|
self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e)
|
||||||
|
|
||||||
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
|
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_5.id)
|
||||||
self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id)
|
self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@
|
|||||||
set -e
|
set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
CLOUDSTACK_RELEASE=4.16.0
|
CLOUDSTACK_RELEASE=4.16.1
|
||||||
|
|
||||||
function configure_apache2() {
|
function configure_apache2() {
|
||||||
# Enable ssl, rewrite and auth
|
# Enable ssl, rewrite and auth
|
||||||
@ -50,9 +50,12 @@ function configure_cacerts() {
|
|||||||
CDIR=$(pwd)
|
CDIR=$(pwd)
|
||||||
cd /tmp
|
cd /tmp
|
||||||
# Add LetsEncrypt ca-cert
|
# Add LetsEncrypt ca-cert
|
||||||
wget https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.der
|
wget https://letsencrypt.org/certs/lets-encrypt-r3.der
|
||||||
keytool -trustcacerts -keystore /etc/ssl/certs/java/cacerts -storepass changeit -noprompt -importcert -alias letsencryptauthorityx3cross -file lets-encrypt-x3-cross-signed.der
|
wget https://letsencrypt.org/certs/isrgrootx1.der
|
||||||
rm -f lets-encrypt-x3-cross-signed.der
|
|
||||||
|
keytool -trustcacerts -keystore /etc/ssl/certs/java/cacerts -storepass changeit -noprompt -importcert -alias letsencryptauthorityr3 -file lets-encrypt-r3.der
|
||||||
|
keytool -trustcacerts -keystore /etc/ssl/certs/java/cacerts -storepass changeit -noprompt -importcert -alias letsencryptauthorityx1 -file isrgrootx1.der
|
||||||
|
rm -f lets-encrypt-r3.der isrgrootx1.der
|
||||||
cd $CDIR
|
cd $CDIR
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,10 +129,6 @@ function configure_services() {
|
|||||||
|
|
||||||
# Disable container services
|
# Disable container services
|
||||||
systemctl disable containerd
|
systemctl disable containerd
|
||||||
systemctl disable docker.service
|
|
||||||
systemctl stop docker.service
|
|
||||||
systemctl disable docker.socket
|
|
||||||
systemctl stop docker.socket
|
|
||||||
|
|
||||||
# Disable cloud init by default
|
# Disable cloud init by default
|
||||||
cat <<EOF > /etc/cloud/cloud.cfg.d/cloudstack.cfg
|
cat <<EOF > /etc/cloud/cloud.cfg.d/cloudstack.cfg
|
||||||
|
|||||||
@ -98,7 +98,7 @@ function install_packages() {
|
|||||||
apt-key fingerprint 0EBFCD88
|
apt-key fingerprint 0EBFCD88
|
||||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
|
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
|
||||||
apt-get update
|
apt-get update
|
||||||
${apt_get} install docker-ce docker-ce-cli containerd.io
|
${apt_get} install containerd.io
|
||||||
|
|
||||||
apt_clean
|
apt_clean
|
||||||
|
|
||||||
|
|||||||
@ -27,8 +27,8 @@
|
|||||||
"format": "qcow2",
|
"format": "qcow2",
|
||||||
"headless": true,
|
"headless": true,
|
||||||
"http_directory": "http",
|
"http_directory": "http",
|
||||||
"iso_checksum": "sha512:5f6aed67b159d7ccc1a90df33cc8a314aa278728a6f50707ebf10c02e46664e383ca5fa19163b0a1c6a4cb77a39587881584b00b45f512b4a470f1138eaa1801",
|
"iso_checksum": "sha512:c685b85cf9f248633ba3cd2b9f9e781fa03225587e0c332aef2063f6877a1f0622f56d44cf0690087b0ca36883147ecb5593e3da6f965968402cdbdf12f6dd74",
|
||||||
"iso_url": "https://cdimage.debian.org/debian-cd/11.0.0/amd64/iso-cd/debian-11.0.0-amd64-netinst.iso",
|
"iso_url": "https://cdimage.debian.org/debian-cd/11.2.0/amd64/iso-cd/debian-11.2.0-amd64-netinst.iso",
|
||||||
"net_device": "virtio-net",
|
"net_device": "virtio-net",
|
||||||
"output_directory": "../dist",
|
"output_directory": "../dist",
|
||||||
"qemuargs": [
|
"qemuargs": [
|
||||||
|
|||||||
@ -47,6 +47,7 @@
|
|||||||
"label.accept.project.invitation": "Accept project invitation",
|
"label.accept.project.invitation": "Accept project invitation",
|
||||||
"label.access": "Access",
|
"label.access": "Access",
|
||||||
"label.accesskey": "Access Key",
|
"label.accesskey": "Access Key",
|
||||||
|
"label.acess.kubernetes.nodes": "Access Kubernetes Nodes",
|
||||||
"label.account": "Account",
|
"label.account": "Account",
|
||||||
"label.account.and.security.group": "Account - Security group",
|
"label.account.and.security.group": "Account - Security group",
|
||||||
"label.account.details": "Account details",
|
"label.account.details": "Account details",
|
||||||
@ -1233,6 +1234,7 @@
|
|||||||
"label.keypair": "SSH Key Pair",
|
"label.keypair": "SSH Key Pair",
|
||||||
"label.kubeconfig.cluster": "Kubernetes Cluster Config",
|
"label.kubeconfig.cluster": "Kubernetes Cluster Config",
|
||||||
"label.kubernetes": "Kubernetes",
|
"label.kubernetes": "Kubernetes",
|
||||||
|
"label.kubernetes.access.details": "The kubernetes nodes can be accessed via ssh using: <br> <code><b> ssh -i [ssh_key] -p [port_number] cloud@[public_ip_address] </b></code> <br><br> where, <br> <code><b>ssh_key:</b></code> points to the ssh private key file corresponding to the key that was associated while creating the Kubernetes cluster. If no ssh key was provided during Kubernetes cluster creation, use the ssh private key of the management server. <br> <code><b>port_number:</b></code> can be obtained from the Port Forwarding Tab (Public Port column)",
|
||||||
"label.kubernetes.cluster": "Kubernetes cluster",
|
"label.kubernetes.cluster": "Kubernetes cluster",
|
||||||
"label.kubernetes.cluster.create": "Create Kubernetes Cluster",
|
"label.kubernetes.cluster.create": "Create Kubernetes Cluster",
|
||||||
"label.kubernetes.cluster.delete": "Delete Kubernetes Cluster",
|
"label.kubernetes.cluster.delete": "Delete Kubernetes Cluster",
|
||||||
|
|||||||
@ -100,7 +100,6 @@ export default {
|
|||||||
return (['Admin'].includes(store.userInfo.roletype) || // If admin or owner or belongs to current project
|
return (['Admin'].includes(store.userInfo.roletype) || // If admin or owner or belongs to current project
|
||||||
(record.domainid === store.userInfo.domainid && record.account === store.userInfo.account) ||
|
(record.domainid === store.userInfo.domainid && record.account === store.userInfo.account) ||
|
||||||
(record.domainid === store.userInfo.domainid && record.projectid && store.project && store.project.id && record.projectid === store.project.id)) &&
|
(record.domainid === store.userInfo.domainid && record.projectid && store.project && store.project.id && record.projectid === store.project.id)) &&
|
||||||
record.templatetype !== 'SYSTEM' &&
|
|
||||||
record.isready
|
record.isready
|
||||||
},
|
},
|
||||||
popup: true,
|
popup: true,
|
||||||
|
|||||||
@ -226,14 +226,6 @@
|
|||||||
}]"
|
}]"
|
||||||
:placeholder="apiParams.dockerregistryurl.description"/>
|
:placeholder="apiParams.dockerregistryurl.description"/>
|
||||||
</a-form-item>
|
</a-form-item>
|
||||||
<a-form-item>
|
|
||||||
<tooltip-label slot="label" :title="$t('label.email')" :tooltip="apiParams.dockerregistryemail.description"/>
|
|
||||||
<a-input
|
|
||||||
v-decorator="['dockerregistryemail', {
|
|
||||||
rules: [{ required: true, message: $t('label.required') }]
|
|
||||||
}]"
|
|
||||||
:placeholder="apiParams.dockerregistryemail.description"/>
|
|
||||||
</a-form-item>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div :span="24" class="action-button">
|
<div :span="24" class="action-button">
|
||||||
|
|||||||
@ -88,6 +88,9 @@
|
|||||||
</a-timeline>
|
</a-timeline>
|
||||||
<p>{{ $t('label.more.access.dashboard.ui') }}, <a href="https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui">https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui</a></p>
|
<p>{{ $t('label.more.access.dashboard.ui') }}, <a href="https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui">https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui</a></p>
|
||||||
</a-card>
|
</a-card>
|
||||||
|
<a-card :title="$t('label.acess.kubernetes.nodes')">
|
||||||
|
<p v-html="$t('label.kubernetes.access.details')"></p>
|
||||||
|
</a-card>
|
||||||
</a-tab-pane>
|
</a-tab-pane>
|
||||||
<a-tab-pane :tab="$t('label.instances')" key="instances">
|
<a-tab-pane :tab="$t('label.instances')" key="instances">
|
||||||
<a-table
|
<a-table
|
||||||
|
|||||||
@ -18,19 +18,17 @@
|
|||||||
*/
|
*/
|
||||||
package com.cloud.utils;
|
package com.cloud.utils;
|
||||||
|
|
||||||
import java.io.UnsupportedEncodingException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import java.security.InvalidKeyException;
|
|
||||||
import java.security.NoSuchAlgorithmException;
|
|
||||||
|
|
||||||
import javax.crypto.Mac;
|
|
||||||
import javax.crypto.spec.SecretKeySpec;
|
|
||||||
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
import org.apache.commons.codec.binary.Base64;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.jasypt.encryption.pbe.PBEStringEncryptor;
|
import org.jasypt.encryption.pbe.PBEStringEncryptor;
|
||||||
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
|
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
|
||||||
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import javax.crypto.Mac;
|
||||||
|
import javax.crypto.spec.SecretKeySpec;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
|
import java.security.InvalidKeyException;
|
||||||
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
|
||||||
public class EncryptionUtil {
|
public class EncryptionUtil {
|
||||||
public static final Logger s_logger = Logger.getLogger(EncryptionUtil.class.getName());
|
public static final Logger s_logger = Logger.getLogger(EncryptionUtil.class.getName());
|
||||||
|
|||||||
@ -19,6 +19,14 @@
|
|||||||
|
|
||||||
package com.cloud.utils.script;
|
package com.cloud.utils.script;
|
||||||
|
|
||||||
|
import com.cloud.utils.PropertiesUtil;
|
||||||
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
|
import com.cloud.utils.script.OutputInterpreter.TimedOutLogger;
|
||||||
|
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.joda.time.Duration;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -37,15 +45,6 @@ import java.util.concurrent.ScheduledExecutorService;
|
|||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
import org.joda.time.Duration;
|
|
||||||
|
|
||||||
import com.cloud.utils.PropertiesUtil;
|
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
|
||||||
import com.cloud.utils.script.OutputInterpreter.TimedOutLogger;
|
|
||||||
|
|
||||||
public class Script implements Callable<String> {
|
public class Script implements Callable<String> {
|
||||||
private static final Logger s_logger = Logger.getLogger(Script.class);
|
private static final Logger s_logger = Logger.getLogger(Script.class);
|
||||||
|
|
||||||
@ -320,7 +319,7 @@ public class Script implements Callable<String> {
|
|||||||
try {
|
try {
|
||||||
_logger.trace("Checking exit value of process");
|
_logger.trace("Checking exit value of process");
|
||||||
_process.exitValue();
|
_process.exitValue();
|
||||||
_logger.trace("Script ran within the alloted time");
|
_logger.trace("Script ran within the allotted time");
|
||||||
} catch (IllegalThreadStateException e) {
|
} catch (IllegalThreadStateException e) {
|
||||||
_logger.warn("Interrupting script.");
|
_logger.warn("Interrupting script.");
|
||||||
_isTimeOut = true;
|
_isTimeOut = true;
|
||||||
|
|||||||
@ -19,16 +19,15 @@
|
|||||||
|
|
||||||
package com.cloud.utils.ssh;
|
package com.cloud.utils.ssh;
|
||||||
|
|
||||||
import java.io.IOException;
|
import com.google.common.base.Strings;
|
||||||
import java.io.InputStream;
|
import com.trilead.ssh2.ChannelCondition;
|
||||||
|
import com.trilead.ssh2.Session;
|
||||||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import com.google.common.base.Strings;
|
import java.io.IOException;
|
||||||
import com.trilead.ssh2.ChannelCondition;
|
import java.io.InputStream;
|
||||||
import com.trilead.ssh2.Session;
|
|
||||||
|
|
||||||
public class SSHCmdHelper {
|
public class SSHCmdHelper {
|
||||||
private static final Logger s_logger = Logger.getLogger(SSHCmdHelper.class);
|
private static final Logger s_logger = Logger.getLogger(SSHCmdHelper.class);
|
||||||
@ -242,8 +241,8 @@ public class SSHCmdHelper {
|
|||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.debug("Ssh executed failed", e);
|
s_logger.debug("SSH execution failed", e);
|
||||||
throw new SshException("Ssh executed failed " + e.getMessage());
|
throw new SshException("SSH execution failed " + e.getMessage());
|
||||||
} finally {
|
} finally {
|
||||||
if (sshSession != null)
|
if (sshSession != null)
|
||||||
sshSession.close();
|
sshSession.close();
|
||||||
|
|||||||
@ -16,18 +16,23 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.utils.security;
|
package org.apache.cloudstack.utils.security;
|
||||||
|
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.math.BigInteger;
|
import java.math.BigInteger;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Paths;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class DigestHelper {
|
public class DigestHelper {
|
||||||
|
public static final Logger s_logger = Logger.getLogger(DigestHelper.class.getName());
|
||||||
public static ChecksumValue digest(String algorithm, InputStream is) throws NoSuchAlgorithmException, IOException {
|
public static ChecksumValue digest(String algorithm, InputStream is) throws NoSuchAlgorithmException, IOException {
|
||||||
MessageDigest digest = MessageDigest.getInstance(algorithm);
|
MessageDigest digest = MessageDigest.getInstance(algorithm);
|
||||||
ChecksumValue checksum = null;
|
ChecksumValue checksum = null;
|
||||||
@ -131,4 +136,14 @@ public class DigestHelper {
|
|||||||
public static String getHashValueFromChecksumValue(String checksum) {
|
public static String getHashValueFromChecksumValue(String checksum) {
|
||||||
return isAlgorithmPresent(checksum) ? new ChecksumValue(checksum).getChecksum() : checksum;
|
return isAlgorithmPresent(checksum) ? new ChecksumValue(checksum).getChecksum() : checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String calculateChecksum(File file) {
|
||||||
|
try (InputStream is = Files.newInputStream(Paths.get(file.getPath()))) {
|
||||||
|
return org.apache.commons.codec.digest.DigestUtils.md5Hex(is);
|
||||||
|
} catch (IOException e) {
|
||||||
|
String errMsg = "Failed to calculate template checksum";
|
||||||
|
s_logger.error(errMsg, e);
|
||||||
|
throw new CloudRuntimeException(errMsg, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user