mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge remote-tracking branch 'apache/4.18'
This commit is contained in:
commit
c599011ef5
@ -14,6 +14,8 @@
|
||||
*/
|
||||
package com.cloud.agent.properties;
|
||||
|
||||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||
|
||||
/**
|
||||
* Class of constant agent's properties available to configure on
|
||||
* "agent.properties".
|
||||
@ -779,6 +781,13 @@ public class AgentProperties{
|
||||
*/
|
||||
public static final Property<Long> KVM_HEARTBEAT_CHECKER_TIMEOUT = new Property<>("kvm.heartbeat.checker.timeout", 360000L);
|
||||
|
||||
/**
|
||||
* Keystore passphrase
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class);
|
||||
|
||||
public static class Property <T>{
|
||||
private String name;
|
||||
private T defaultValue;
|
||||
|
||||
@ -1478,8 +1478,8 @@ public class VolumeServiceImpl implements VolumeService {
|
||||
createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future);
|
||||
} else {
|
||||
// We have a template on PowerFlex primary storage. Create new volume and copy to it.
|
||||
s_logger.debug("Copying the template to the volume on primary storage");
|
||||
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
|
||||
createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo, destPrimaryDataStore, templateOnPrimary,
|
||||
destHost, future, destDataStoreId, srcTemplateInfo.getId());
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally");
|
||||
@ -1490,6 +1490,32 @@ public class VolumeServiceImpl implements VolumeService {
|
||||
return future;
|
||||
}
|
||||
|
||||
private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary,
|
||||
Host destHost, AsyncCallFuture<VolumeApiResult> future, long destDataStoreId, long srcTemplateId) {
|
||||
GlobalLock lock = null;
|
||||
try {
|
||||
String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" + srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" + destHost.getId();
|
||||
lock = GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString);
|
||||
if (lock == null) {
|
||||
throw new CloudRuntimeException("Unable to create volume from template, couldn't get global lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
}
|
||||
|
||||
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
|
||||
if (!lock.lock(storagePoolMaxWaitSeconds)) {
|
||||
s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
}
|
||||
|
||||
s_logger.debug("Copying the template to the volume on primary storage");
|
||||
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
|
||||
} finally {
|
||||
if (lock != null) {
|
||||
lock.unlock();
|
||||
lock.releaseRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) {
|
||||
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
|
||||
return true;
|
||||
|
||||
@ -1044,7 +1044,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
}
|
||||
}
|
||||
|
||||
enableSSLForKvmAgent(params);
|
||||
enableSSLForKvmAgent();
|
||||
configureLocalStorage();
|
||||
|
||||
/* Directory to use for Qemu sockets like for the Qemu Guest Agent */
|
||||
@ -1353,13 +1353,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
}
|
||||
}
|
||||
|
||||
private void enableSSLForKvmAgent(final Map<String, Object> params) {
|
||||
private void enableSSLForKvmAgent() {
|
||||
final File keyStoreFile = PropertiesUtil.findConfigFile(KeyStoreUtils.KS_FILENAME);
|
||||
if (keyStoreFile == null) {
|
||||
s_logger.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME);
|
||||
return;
|
||||
}
|
||||
String keystorePass = (String)params.get(KeyStoreUtils.KS_PASSPHRASE_PROPERTY);
|
||||
String keystorePass = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KEYSTORE_PASSPHRASE);
|
||||
if (StringUtils.isBlank(keystorePass)) {
|
||||
s_logger.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME);
|
||||
return;
|
||||
|
||||
@ -493,39 +493,8 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
}
|
||||
|
||||
public long getCapacity(LinstorStoragePool pool) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(pool);
|
||||
final String rscGroupName = pool.getResourceGroup();
|
||||
try {
|
||||
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
|
||||
Collections.singletonList(rscGroupName),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
if (rscGrps.isEmpty()) {
|
||||
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
List<StoragePool> storagePools = linstorApi.viewStoragePools(
|
||||
Collections.emptyList(),
|
||||
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
final long capacity = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0)
|
||||
.sum() * 1024; // linstor uses kiB
|
||||
s_logger.debug("Linstor: GetCapacity() -> " + capacity);
|
||||
return capacity;
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
return LinstorUtil.getCapacityBytes(pool.getSourceHost(), rscGroupName);
|
||||
}
|
||||
|
||||
public long getAvailable(LinstorStoragePool pool) {
|
||||
@ -554,7 +523,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
final long free = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(StoragePool::getFreeCapacity).sum() * 1024; // linstor uses KiB
|
||||
.mapToLong(sp -> sp.getFreeCapacity() != null ? sp.getFreeCapacity() : 0L).sum() * 1024; // linstor uses KiB
|
||||
|
||||
s_logger.debug("Linstor: getAvailable() -> " + free);
|
||||
return free;
|
||||
@ -590,7 +559,9 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
final long used = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(sp -> sp.getTotalCapacity() - sp.getFreeCapacity()).sum() * 1024; // linstor uses Kib
|
||||
.mapToLong(sp -> sp.getTotalCapacity() != null && sp.getFreeCapacity() != null ?
|
||||
sp.getTotalCapacity() - sp.getFreeCapacity() : 0L)
|
||||
.sum() * 1024; // linstor uses Kib
|
||||
s_logger.debug("Linstor: getUsed() -> " + used);
|
||||
return used;
|
||||
} catch (ApiException apiEx) {
|
||||
|
||||
@ -161,7 +161,8 @@ public class LinstorUtil {
|
||||
|
||||
return storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(StoragePool::getTotalCapacity).sum() * 1024; // linstor uses kiB
|
||||
.mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0L)
|
||||
.sum() * 1024; // linstor uses kiB
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx);
|
||||
|
||||
@ -145,6 +145,9 @@ if [ -z "${kubeadm_file_permissions}" ]; then
|
||||
fi
|
||||
chmod ${kubeadm_file_permissions} "${working_dir}/k8s/kubeadm"
|
||||
|
||||
echo "Updating imagePullPolicy to IfNotPresent in yaml files..."
|
||||
sed -i "s/imagePullPolicy:.*/imagePullPolicy: IfNotPresent/g" ${working_dir}/*.yaml
|
||||
|
||||
mkisofs -o "${output_dir}/${build_name}" -J -R -l "${iso_dir}"
|
||||
|
||||
rm -rf "${iso_dir}"
|
||||
|
||||
@ -2260,11 +2260,23 @@ test_data = {
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.26.0.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
},
|
||||
"1.27.8": {
|
||||
"semanticversion": "1.27.8",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.27.8.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
},
|
||||
"1.28.4": {
|
||||
"semanticversion": "1.28.4",
|
||||
"url": "http://download.cloudstack.org/cks/setup-1.28.4.iso",
|
||||
"mincpunumber": 2,
|
||||
"minmemory": 2048
|
||||
}
|
||||
},
|
||||
"cks_kubernetes_version": "1.26.0",
|
||||
"cks_kubernetes_version_upgrade_from": "1.25.0",
|
||||
"cks_kubernetes_version_upgrade_to": "1.26.0",
|
||||
"cks_kubernetes_version": "1.28.4",
|
||||
"cks_kubernetes_version_upgrade_from": "1.27.8",
|
||||
"cks_kubernetes_version_upgrade_to": "1.28.4",
|
||||
"cks_service_offering": {
|
||||
"name": "CKS-Instance",
|
||||
"displaytext": "CKS Instance",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user