From 4e7c6682fd4959787b6422e9e5ae5560f571d2d5 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 12 Jun 2024 11:19:03 +0530 Subject: [PATCH 01/13] While starting VM with considerlasthost enabled, don't load host tags/details for the last host when it doesn't exist (#9037) --- .../deploy/DeploymentPlanningManagerImpl.java | 168 +++++++++--------- 1 file changed, 85 insertions(+), 83 deletions(-) diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 0ef462bb96c..b70530ef6db 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -415,105 +415,107 @@ StateListener, Configurable { s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); HostVO host = _hostDao.findById(vm.getLastHostId()); - _hostDao.loadHostTags(host); - _hostDao.loadDetails(host); ServiceOfferingDetailsVO offeringDetails = null; if (host == null) { s_logger.debug("The last host of this VM cannot be found"); - } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The last host of this VM is in avoid set"); - } else if (plan.getClusterId() != null && host.getClusterId() != null - && !plan.getClusterId().equals(host.getClusterId())) { - s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " - + plan.getClusterId()); - } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - s_logger.debug("The last Host, hostId: " + host.getId() + - " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { - ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); - if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.debug("The last host of this VM does not have required GPU devices available"); - } - } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); } else { - if (host.getStatus() == Status.Up) { - if (checkVmProfileAndHost(vmProfile, host)) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, - "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, - "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + _hostDao.loadHostTags(host); + _hostDao.loadDetails(host); + if (avoids.shouldAvoid(host)) { + s_logger.debug("The last host of this VM is in avoid set"); + } else if (plan.getClusterId() != null && host.getClusterId() != null + && !plan.getClusterId().equals(host.getClusterId())) { + s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + + plan.getClusterId()); + } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + s_logger.debug("The last Host, hostId: " + host.getId() + + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); + } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { + ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); + if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ + s_logger.debug("The last host of this VM does not have required GPU devices available"); + } + } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { + s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); + } else { + if (host.getStatus() == Status.Up) { + if (checkVmProfileAndHost(vmProfile, host)) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, + "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, + "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + boolean hostHasCpuCapability, hostHasCapacity = false; + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); - if (hostHasCpuCapability) { - // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + if (hostHasCpuCapability) { + // first check from reserved capacity + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); - // if not reserved, check the free capacity - if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); - } - - boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); - if (hostHasCapacity - && hostHasCpuCapability) { - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() - + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; + // if not reserved, check the free capacity + if (!hostHasCapacity) + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } - // search for storage under the zone, pod, cluster - // of - // the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), - host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes( - vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); + if (hostHasCapacity + && hostHasCpuCapability) { + s_logger.debug("The last host of this VM is UP and has enough capacity"); + s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - // choose the potential pool for this VM for this - // host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); - if (potentialResources != null) { - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from - // destination, since we don't have to - // prepare - // this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, - storageVolMap, displayStorage); + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); s_logger.debug("Returning Deployment Destination: " + dest); return dest; } + + // search for storage under the zone, pod, cluster + // of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), + host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes( + vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this + // host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, + getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); + if (potentialResources != null) { + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to + // prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, + storageVolMap, displayStorage); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } else { + s_logger.debug("The last host of this VM does not have enough capacity"); } - } else { - s_logger.debug("The last host of this VM does not have enough capacity"); } + } else { + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + + host.getResourceState()); } - } else { - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + - host.getResourceState()); } } s_logger.debug("Cannot choose the last host to deploy this VM "); From 56b69b157ea646d7408a6daa996d9c53b572d85d Mon Sep 17 00:00:00 2001 From: dahn Date: Fri, 14 Jun 2024 09:01:15 +0200 Subject: [PATCH 02/13] prevent duplicate ip table rules in SSVM (#8530) Co-authored-by: Wei Zhou --- .../SecondaryStorageManagerImpl.java | 4 +- .../storage/resource/IpTablesHelper.java | 66 +++++++++++++++++++ .../resource/NfsSecondaryStorageResource.java | 20 ++---- .../storage/template/DownloadManagerImpl.java | 40 +++++------ systemvm/debian/opt/cloud/bin/cs/CsHelper.py | 2 +- .../java/com/cloud/utils/script/Script.java | 11 +++- 6 files changed, 105 insertions(+), 38 deletions(-) create mode 100644 services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 59ac4f44938..9df8906070f 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -531,8 +531,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar /** * Get the default network for the secondary storage VM, based on the zone it is in. Delegates to - * either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)}, - * depending on the zone network type and whether or not security groups are enabled in the zone. + * either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)}, + * depending on the zone network type and whether security groups are enabled in the zone. * @param dc - The zone (DataCenter) of the secondary storage VM. * @return The default network for use with the secondary storage VM. */ diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java new file mode 100644 index 00000000000..b9f0d2fdc77 --- /dev/null +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java @@ -0,0 +1,66 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.resource; + +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; + +public class IpTablesHelper { + public static final Logger LOGGER = Logger.getLogger(IpTablesHelper.class); + + public static final String OUTPUT_CHAIN = "OUTPUT"; + public static final String INPUT_CHAIN = "INPUT"; + public static final String INSERT = " -I "; + public static final String APPEND = " -A "; + + public static boolean needsAdding(String chain, String rule) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables -C " + chain + " " + rule); + + String commandOutput = command.execute(); + boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?).")); + LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s", + rule, + needsAdding ? "does indeed" : "doesn't", + chain, + commandOutput + )); + return needsAdding; + } + + public static String addConditionally(String chain, boolean insert, String rule, String errMsg) { + LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain)); + if (needsAdding(chain, rule)) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule); + String result = command.execute(); + LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result)); + if (result != null) { + LOGGER.warn(String.format("%s , err = %s", errMsg, result)); + return errMsg + result; + } + } else { + LOGGER.warn("Rule already defined in SVM: " + rule); + } + return null; + } +} diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index d8f7395c885..8d2ebcd0458 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -2287,15 +2287,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (!_inSystemVM) { return null; } - Script command = new Script("/bin/bash", s_logger); String intf = "eth1"; - command.add("-c"); - command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); + String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr); + String errMsg = String.format("Error in allowing outgoing to %s", destCidr); - String result = command.execute(); + s_logger.info(String.format("Adding rule if required: " + rule)); + String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg); if (result != null) { - s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result); - return "Error in allowing outgoing to " + destCidr + ", err=" + result; + return result; } addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); @@ -2832,13 +2831,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (result != null) { s_logger.warn("Error in starting sshd service err=" + result); } - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in opening up ssh port err=" + result); - } + String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"; + IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port"); } private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index e14977682b6..4d68aff76a2 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -60,6 +60,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; +import org.apache.cloudstack.storage.resource.IpTablesHelper; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; import org.apache.log4j.Logger; @@ -1092,17 +1093,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } private void blockOutgoingOnPrivate() { - Script command = new Script("/bin/bash", LOGGER); - String intf = "eth1"; - command.add("-c"); - command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;"); - - String result = command.execute(); - if (result != null) { - LOGGER.warn("Error in blocking outgoing to port 80/443 err=" + result); - return; - } + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;" + , "Error in blocking outgoing to port 80"); + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;" + , "Error in blocking outgoing to port 443"); } @Override @@ -1128,17 +1126,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if (result != null) { LOGGER.warn("Error in stopping httpd service err=" + result); } - String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); - String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - command = new Script("/bin/bash", LOGGER); - command.add("-c"); - command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;"); - - result = command.execute(); + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT" + , "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE); + if (result != null) { + return; + } + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;" + , "Error in opening up apache2 port 443"); if (result != null) { - LOGGER.warn("Error in opening up apache2 port err=" + result); return; } diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index 2a83c215403..45f1953dc99 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -221,7 +221,7 @@ def save_iptables(command, iptables_file): def execute2(command, wait=True): """ Execute command """ - logging.info("Executing: %s" % command) + logging.info("Executing2: %s" % command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if wait: p.wait() diff --git a/utils/src/main/java/com/cloud/utils/script/Script.java b/utils/src/main/java/com/cloud/utils/script/Script.java index fb57f256690..31585534cbd 100644 --- a/utils/src/main/java/com/cloud/utils/script/Script.java +++ b/utils/src/main/java/com/cloud/utils/script/Script.java @@ -243,12 +243,19 @@ public class Script implements Callable { //process completed successfully if (_process.exitValue() == 0) { _logger.debug("Execution is successful."); + String result; + String method; if (interpreter != null) { - return interpreter.drain() ? task.getResult() : interpreter.interpret(ir); + _logger.debug("interpreting the result..."); + method = "result interpretation of execution: "; + result= interpreter.drain() ? task.getResult() : interpreter.interpret(ir); } else { // null return exitValue apparently - return String.valueOf(_process.exitValue()); + method = "return code of execution: "; + result = String.valueOf(_process.exitValue()); } + _logger.debug(method + result); + return result; } else { //process failed break; } From ed86dc973b15fde5687502ffc29b1ddaa8536e0e Mon Sep 17 00:00:00 2001 From: dahn Date: Fri, 14 Jun 2024 11:55:46 +0200 Subject: [PATCH 03/13] protect against missing service offering (#9235) * protect agains missing service offering * search removed before assuming none * import * javadoc --- .../cloud/service/dao/ServiceOfferingDao.java | 2 +- .../service/dao/ServiceOfferingDaoImpl.java | 4 ++-- .../main/java/com/cloud/api/ApiDBUtils.java | 4 ++-- .../api/query/dao/VolumeJoinDaoImpl.java | 24 +++++++++++++++++-- 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index e2fc5b49ae8..3b6fa8fa103 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,5 +54,5 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId); + ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 5c8e4993829..ef6d4e71989 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -284,10 +284,10 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = SearchComputeOfferingByComputeOnlyDiskOffering.create(); sc.setParameters("disk_offering_id", diskOfferingId); - List vos = listBy(sc); + List vos = includingRemoved ? listIncludingRemovedBy(sc) : listBy(sc); if (vos.size() == 0) { return null; } diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index 97ecd982f53..c45e60f8dd5 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1109,8 +1109,8 @@ public class ApiDBUtils { return null; } - public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId) { - ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId); + public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId, boolean includingRemoved) { + ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, includingRemoved); return off; } public static DomainVO findDomainById(Long domainId) { diff --git a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index e309a7534a3..1060fd840b5 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -188,8 +188,8 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation 0) { DiskOffering computeOnlyDiskOffering = ApiDBUtils.findComputeOnlyDiskOfferingById(volume.getDiskOfferingId()); - if (computeOnlyDiskOffering != null) { - ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForDiskOffering(volume, computeOnlyDiskOffering); + if (serviceOffering != null) { volResponse.setServiceOfferingId(String.valueOf(serviceOffering.getId())); volResponse.setServiceOfferingName(serviceOffering.getName()); volResponse.setServiceOfferingDisplayText(serviceOffering.getDisplayText()); @@ -283,6 +283,26 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation Date: Fri, 14 Jun 2024 07:54:01 -0300 Subject: [PATCH 04/13] Fix allocation of VMs with multiple clusters (#8611) * Fix allocation of VMs with multiple clusters * Readd debug guard --- .../allocator/impl/RandomAllocator.java | 26 +++++++++---------- .../allocator/impl/FirstFitAllocator.java | 9 ++++--- .../allocator/impl/RecreateHostAllocator.java | 3 ++- .../deploy/DeploymentPlanningManagerImpl.java | 13 +++++----- .../com/cloud/deploy/FirstFitPlanner.java | 2 +- .../cloud/server/ManagementServerImpl.java | 13 ++++------ 6 files changed, 32 insertions(+), 34 deletions(-) diff --git a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index 70920df5eb5..eb9ab7148b2 100644 --- a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -22,7 +22,6 @@ import java.util.List; import javax.inject.Inject; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.ListUtils; import org.apache.log4j.Logger; @@ -36,7 +35,6 @@ import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.host.Host.Type; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; @@ -66,18 +64,20 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); ServiceOffering offering = vmProfile.getServiceOffering(); - List hostsCopy = null; - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); if (type == Host.Type.Storage) { return suitableHosts; } + String hostTag = offering.getHostTag(); - if (hostTag != null) { - s_logger.debug(String.format("Looking for hosts in dc [%s], pod [%s], cluster [%s] and complying with host tag [%s].", dcId, podId, clusterId, hostTag)); - } else { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); - } + String hostTagToLog = hostTag != null ? String.format("and complying with host tags [%s]", hostTag) : ""; + String paramAsStringToLog = String.format("zone [%s], pod [%s], cluster [%s] %s", dcId, podId, clusterId, hostTagToLog); + + s_logger.debug(String.format("Looking for hosts in %s.", paramAsStringToLog)); + + List hostsCopy; + if (hosts != null) { // retain all computing hosts, regardless of whether they support routing...it's random after all hostsCopy = new ArrayList(hosts); @@ -88,7 +88,6 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { } } else { // list all computing hosts, regardless of whether they support routing...it's random after all - hostsCopy = new ArrayList(); if (hostTag != null) { hostsCopy = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTag); } else { @@ -98,14 +97,15 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { hostsCopy = ListUtils.union(hostsCopy, _hostDao.findHostsWithTagRuleThatMatchComputeOferringTags(hostTag)); if (hostsCopy.isEmpty()) { - s_logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag)); - throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); + s_logger.info(String.format("No suitable host found for VM [%s] in %s.", vmProfile, paramAsStringToLog)); + return null; } s_logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); - if (hostsCopy.size() == 0) { + if (hostsCopy.isEmpty()) { return suitableHosts; } + Collections.shuffle(hostsCopy); for (Host host : hostsCopy) { if (suitableHosts.size() == returnUpTo) { diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 862e8acc9e0..18d5d22a8e2 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -25,7 +25,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -132,8 +131,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { return new ArrayList(); } + String paramAsStringToLog = String.format("zone [%s], pod [%s], cluster [%s]", dcId, podId, clusterId); + if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + s_logger.debug(String.format("Looking for hosts in %s.", paramAsStringToLog)); } String hostTagOnOffering = offering.getHostTag(); @@ -206,8 +207,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (clusterHosts.isEmpty()) { - s_logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering)); - throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); + s_logger.info(String.format("No suitable host found for VM [%s] with tags [%s] in %s.", vmProfile, hostTagOnOffering, paramAsStringToLog)); + return null; } // add all hosts that we are not considering to the avoid list List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null); diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index be6f4012f8f..77174780405 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -26,6 +26,7 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -75,7 +76,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { List hosts = super.allocateTo(vm, plan, type, avoid, returnUpTo); - if (hosts != null && !hosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(hosts)) { return hosts; } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index cb22e81f366..74ea6a3d8a3 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1273,7 +1273,7 @@ StateListener, Configurable { List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); // if found suitable hosts in this cluster, find suitable storage // pools for each volume of the VM - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); return dest; @@ -1621,18 +1621,17 @@ StateListener, Configurable { List suitableHosts = new ArrayList(); for (HostAllocator allocator : _hostAllocators) { suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); + if (CollectionUtils.isEmpty(suitableHosts)) { + s_logger.debug("No suitable hosts found."); + } else { + reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); } - // re-order hosts by priority - reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - return suitableHosts; } diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index c2969ecce50..b812b5dbf66 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -525,7 +525,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla matchingClusters.addAll(hostDao.findClustersThatMatchHostTagRule(hostTagOnOffering)); if (matchingClusters.isEmpty()) { - s_logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering)); + s_logger.error(String.format("No suitable host found in any cluster for the following compute offering tags [%s].", hostTagOnOffering)); throw new CloudRuntimeException("No suitable host found."); } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 9b635cea5f3..28140d843ad 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1590,20 +1590,17 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false); } - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - // re-order hosts by priority _dpMgr.reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - if (s_logger.isDebugEnabled()) { - if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); - } else { - s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); - } + if (suitableHosts.isEmpty()) { + s_logger.warn("No suitable hosts found."); + } else { + s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); } return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion); From 96288ecf1fab1aab4a87001040b6804f05fdc710 Mon Sep 17 00:00:00 2001 From: Klaus de Freitas Dornsbach Date: Mon, 17 Jun 2024 05:34:46 -0300 Subject: [PATCH 05/13] fix: domain limits tab ui is now able to present 0 to the user (#9166) Co-authored-by: klaus.freitas.scclouds Co-authored-by: Bryan Lima <42067040+BryanMLima@users.noreply.github.com> --- ui/src/components/view/ResourceLimitTab.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/components/view/ResourceLimitTab.vue b/ui/src/components/view/ResourceLimitTab.vue index 0c09a14fcd5..875d2cc3b40 100644 --- a/ui/src/components/view/ResourceLimitTab.vue +++ b/ui/src/components/view/ResourceLimitTab.vue @@ -112,7 +112,7 @@ export default { this.formLoading = true this.dataResource = await this.listResourceLimits(params) this.dataResource.forEach(item => { - form[item.resourcetype] = item.max || -1 + form[item.resourcetype] = item.max == null ? -1 : item.max }) this.form = form this.formRef.value.resetFields() From bb0c1f93afea55a7e86240e5e5233205fb74abe9 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Mon, 17 Jun 2024 14:06:47 +0530 Subject: [PATCH 06/13] Add volume encryption checks during the disk offering change (#9209) --- .../subsystem/api/storage/VolumeService.java | 2 + .../storage/volume/VolumeServiceImpl.java | 15 +++++++ .../storage/volume/VolumeServiceTest.java | 43 +++++++++++++++++++ .../cloud/storage/VolumeApiServiceImpl.java | 4 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 7 +-- .../com/cloud/vm/UserVmManagerImplTest.java | 42 ------------------ 6 files changed, 64 insertions(+), 49 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index 7c4d56e12b9..682473ec94f 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -121,4 +121,6 @@ public interface VolumeService { Pair checkAndRepairVolume(VolumeInfo volume); void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host); + + void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 59d027e3c42..a47cb41a323 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,7 +32,10 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.dao.DiskOfferingDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -212,6 +215,8 @@ public class VolumeServiceImpl implements VolumeService { private SnapshotApiService snapshotApiService; @Inject private PassphraseDao passphraseDao; + @Inject + protected DiskOfferingDao diskOfferingDao; public VolumeServiceImpl() { } @@ -2794,6 +2799,16 @@ public class VolumeServiceImpl implements VolumeService { } } + @Override + public void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId) { + DiskOfferingVO existingDiskOffering = diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); + DiskOfferingVO newDiskOffering = diskOfferingDao.findById(newDiskOfferingId); + + if (existingDiskOffering.getEncrypt() != newDiskOffering.getEncrypt()) { + throw new InvalidParameterValueException("Cannot change the encryption type of a volume, please check the selected offering"); + } + } + @Override public Pair checkAndRepairVolume(VolumeInfo volume) { Long poolId = volume.getPoolId(); diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index 55ff2f659af..97ccc37be13 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -22,13 +22,16 @@ package org.apache.cloudstack.storage.volume; import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import java.util.ArrayList; @@ -88,6 +91,9 @@ public class VolumeServiceTest extends TestCase{ @Mock HostDao hostDaoMock; + @Mock + DiskOfferingDao diskOfferingDaoMock; + @Before public void setup(){ volumeServiceImplSpy = Mockito.spy(new VolumeServiceImpl()); @@ -96,6 +102,7 @@ public class VolumeServiceTest extends TestCase{ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @Test(expected = InterruptedException.class) @@ -303,4 +310,40 @@ public class VolumeServiceTest extends TestCase{ Assert.assertEquals(null, result); } + + @Test + public void validateDiskOfferingCheckForEncryption1Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test + public void validateDiskOfferingCheckForEncryption2Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail1Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail2Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + private void prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + + Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); + Mockito.when(diskOfferingDaoMock.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); + Mockito.when(diskOfferingDaoMock.findById(diskOfferingId)).thenReturn(diskOffering); + } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 1cf069feae8..31778b33b24 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2027,7 +2027,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException { - DiskOfferingVO existingDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); + long existingDiskOfferingId = volume.getDiskOfferingId(); + DiskOfferingVO existingDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newDiskOfferingId); Integer newHypervisorSnapshotReserve = null; @@ -2039,6 +2040,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Long[] updateNewMinIops = {newMinIops}; Long[] updateNewMaxIops = {newMaxIops}; Integer[] updateNewHypervisorSnapshotReserve = {newHypervisorSnapshotReserve}; + volService.validateChangeDiskOfferingEncryptionType(existingDiskOfferingId, newDiskOfferingId); validateVolumeResizeWithNewDiskOfferingAndLoad(volume, existingDiskOffering, newDiskOffering, updateNewSize, updateNewMinIops, updateNewMaxIops, updateNewHypervisorSnapshotReserve); newSize = updateNewSize[0]; newMinIops = updateNewMinIops[0]; diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 0c8ab1cd793..7fd074e13e8 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2116,12 +2116,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to Scale VM, since disk offering id associated with the old service offering is not same for new service offering"); } - DiskOfferingVO currentRootDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(currentServiceOffering.getDiskOfferingId()); - DiskOfferingVO newRootDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId()); - - if (currentRootDiskOffering.getEncrypt() != newRootDiskOffering.getEncrypt()) { - throw new InvalidParameterValueException("Cannot change volume encryption type via service offering change"); - } + _volService.validateChangeDiskOfferingEncryptionType(currentServiceOffering.getDiskOfferingId(), newServiceOffering.getDiskOfferingId()); } private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map customParameters, Long zoneId) throws ResourceAllocationException { diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 303a9b08b1c..ba9e5ba6922 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -677,34 +677,6 @@ public class UserVmManagerImplTest { prepareAndRunResizeVolumeTest(2L, 10L, 20L, largerDisdkOffering, smallerDisdkOffering); } - @Test - public void validateDiskOfferingCheckForEncryption1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test - public void validateDiskOfferingCheckForEncryption2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - private void prepareAndRunResizeVolumeTest(Long expectedOfferingId, long expectedMinIops, long expectedMaxIops, DiskOfferingVO currentRootDiskOffering, DiskOfferingVO newRootDiskOffering) { long rootVolumeId = 1l; VolumeVO rootVolumeOfVm = Mockito.mock(VolumeVO.class); @@ -728,20 +700,6 @@ public class UserVmManagerImplTest { return newRootDiskOffering; } - private ServiceOfferingVO prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { - ServiceOfferingVO svcOffering = Mockito.mock(ServiceOfferingVO.class); - DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); - - Mockito.when(svcOffering.getDiskOfferingId()).thenReturn(diskOfferingId); - Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); - - // Be aware - Multiple calls with the same disk offering ID could conflict - Mockito.when(diskOfferingDao.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); - Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering); - - return svcOffering; - } - @Test (expected = CloudRuntimeException.class) public void testUserDataDenyOverride() { Long userDataId = 1L; From 7e71e50578815c71c29239189ab53a44d838931c Mon Sep 17 00:00:00 2001 From: Bryan Lima <42067040+BryanMLima@users.noreply.github.com> Date: Tue, 18 Jun 2024 02:22:27 -0300 Subject: [PATCH 07/13] [Quota] Improve Quota balance calculation flow (#8581) --- .../cloudstack/quota/QuotaManagerImpl.java | 107 +++++----- .../plugins/quota/test_quota_balance.py | 191 ++++++++++++++++++ .../com/cloud/usage/UsageManagerImpl.java | 7 +- 3 files changed, 253 insertions(+), 52 deletions(-) create mode 100644 test/integration/plugins/quota/test_quota_balance.py diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java index 56a6edf5db4..f222f6596cf 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.TimeZone; @@ -54,6 +55,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; +import org.apache.commons.lang3.time.DateUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -156,78 +158,81 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { return; } - QuotaUsageVO firstQuotaUsage = accountQuotaUsages.get(0); - Date startDate = firstQuotaUsage.getStartDate(); - Date endDate = firstQuotaUsage.getStartDate(); + Date startDate = accountQuotaUsages.get(0).getStartDate(); + Date endDate = accountQuotaUsages.get(0).getEndDate(); + Date lastQuotaUsageEndDate = accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate(); - s_logger.info(String.format("Processing quota balance for account [%s] between [%s] and [%s].", accountToString, startDate, - accountQuotaUsages.get(accountQuotaUsages.size() - 1).getEndDate())); + LinkedHashSet> periods = accountQuotaUsages.stream() + .map(quotaUsageVO -> new Pair<>(quotaUsageVO.getStartDate(), quotaUsageVO.getEndDate())) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + s_logger.info(String.format("Processing quota balance for account[%s] between [%s] and [%s].", accountToString, startDate, lastQuotaUsageEndDate)); - BigDecimal aggregatedUsage = BigDecimal.ZERO; long accountId = accountVo.getAccountId(); long domainId = accountVo.getDomainId(); + BigDecimal accountBalance = retrieveBalanceForUsageCalculation(accountId, domainId, startDate, accountToString); - aggregatedUsage = getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(accountId, domainId, startDate, endDate, aggregatedUsage, accountToString); + for (Pair period : periods) { + startDate = period.first(); + endDate = period.second(); - for (QuotaUsageVO quotaUsage : accountQuotaUsages) { - Date quotaUsageStartDate = quotaUsage.getStartDate(); - Date quotaUsageEndDate = quotaUsage.getEndDate(); - BigDecimal quotaUsed = quotaUsage.getQuotaUsed(); - - if (quotaUsed.equals(BigDecimal.ZERO)) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, quotaUsageStartDate, quotaUsageEndDate, accountToString)); - continue; - } - - if (startDate.compareTo(quotaUsageStartDate) == 0) { - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); - continue; - } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - - aggregatedUsage = BigDecimal.ZERO; - startDate = quotaUsageStartDate; - endDate = quotaUsageEndDate; - - QuotaBalanceVO lastRealBalanceEntry = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); - Date lastBalanceDate = new Date(0); - - if (lastRealBalanceEntry != null) { - lastBalanceDate = lastRealBalanceEntry.getUpdatedOn(); - aggregatedUsage = aggregatedUsage.add(lastRealBalanceEntry.getCreditBalance()); - } - - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastBalanceDate, endDate, accountToString)); - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); + accountBalance = calculateBalanceConsideringCreditsAddedAndQuotaUsed(accountBalance, accountQuotaUsages, accountId, domainId, startDate, endDate, accountToString); + _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, accountBalance, endDate)); } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - saveQuotaAccount(accountId, aggregatedUsage, endDate); + saveQuotaAccount(accountId, accountBalance, endDate); } - protected BigDecimal getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(long accountId, long domainId, Date startDate, Date endDate, BigDecimal aggregatedUsage, - String accountToString) { + /** + * Calculates the balance for the given account considering the specified period. The balance is calculated as follows: + *
    + *
  1. The credits added in this period are added to the balance.
  2. + *
  3. All quota consumed in this period are subtracted from the account balance.
  4. + *
+ */ + protected BigDecimal calculateBalanceConsideringCreditsAddedAndQuotaUsed(BigDecimal accountBalance, List accountQuotaUsages, long accountId, long domainId, + Date startDate, Date endDate, String accountToString) { + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, startDate, endDate, accountToString)); + + for (QuotaUsageVO quotaUsageVO : accountQuotaUsages) { + if (DateUtils.isSameInstant(quotaUsageVO.getStartDate(), startDate)) { + accountBalance = accountBalance.subtract(quotaUsageVO.getQuotaUsed()); + } + } + return accountBalance; + } + + /** + * Retrieves the initial balance prior to the period of the quota processing. + *
    + *
  • + * If it is the first time of processing for the account, the credits prior to the quota processing are added, and the first balance is persisted in the DB. + *
  • + *
  • + * Otherwise, the last real balance of the account is retrieved. + *
  • + *
+ */ + protected BigDecimal retrieveBalanceForUsageCalculation(long accountId, long domainId, Date startDate, String accountToString) { + BigDecimal accountBalance = BigDecimal.ZERO; QuotaUsageVO lastQuotaUsage = _quotaUsageDao.findLastQuotaUsageEntry(accountId, domainId, startDate); if (lastQuotaUsage == null) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); - QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, aggregatedUsage, startDate); + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); + QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, accountBalance, startDate); s_logger.debug(String.format("Persisting the first quota balance [%s] for account [%s].", firstBalance, accountToString)); _quotaBalanceDao.saveQuotaBalance(firstBalance); } else { - QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); + QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, startDate); - if (lastRealBalance != null) { - aggregatedUsage = aggregatedUsage.add(lastRealBalance.getCreditBalance()); - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastRealBalance.getUpdatedOn(), endDate, accountToString)); - } else { + if (lastRealBalance == null) { s_logger.warn(String.format("Account [%s] has quota usage entries, however it does not have a quota balance.", accountToString)); + } else { + accountBalance = accountBalance.add(lastRealBalance.getCreditBalance()); } } - return aggregatedUsage; + return accountBalance; } protected void saveQuotaAccount(long accountId, BigDecimal aggregatedUsage, Date endDate) { diff --git a/test/integration/plugins/quota/test_quota_balance.py b/test/integration/plugins/quota/test_quota_balance.py new file mode 100644 index 00000000000..f5c1c75d7b2 --- /dev/null +++ b/test/integration/plugins/quota/test_quota_balance.py @@ -0,0 +1,191 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Test cases for validating the Quota balance of accounts +""" + +from marvin.cloudstackTestCase import * +from marvin.lib.utils import * +from marvin.lib.base import * +from marvin.lib.common import * +from nose.plugins.attrib import attr + + +class TestQuotaBalance(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestQuotaBalance, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.zone + + # Create Account + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup = [ + cls.account, + ] + + cls.services["account"] = cls.account.name + + if not is_config_suitable(apiclient=cls.apiclient, name='quota.enable.service', value='true'): + cls.debug("Quota service is not enabled, therefore the configuration `quota.enable.service` will be set to `true` and the management server will be restarted.") + Configurations.update(cls.apiclient, "quota.enable.service", "true") + cls.restartServer() + + return + + @classmethod + def restartServer(cls): + """Restart management server""" + + cls.debug("Restarting management server") + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + + command = "service cloudstack-management restart" + sshClient.execute(command) + + # Waits for management to come up in 5 mins, when it's up it will continue + timeout = time.time() + 300 + while time.time() < timeout: + if cls.isManagementUp() is True: + time.sleep(30) + return + time.sleep(5) + return cls.fail("Management server did not come up, failing") + + @classmethod + def isManagementUp(cls): + try: + cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.tariffs = [] + return + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + self.delete_tariffs() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def delete_tariffs(self): + for tariff in self.tariffs: + cmd = quotaTariffDelete.quotaTariffDeleteCmd() + cmd.id = tariff.uuid + self.apiclient.quotaTariffDelete(cmd) + + @attr(tags=["advanced", "smoke", "quota"], required_hardware="false") + def test_quota_balance(self): + """ + Test Quota balance + + Validate the following + 1. Add credits to an account + 2. Create Quota tariff for the usage type 21 (VM_DISK_IO_READ) + 3. Simulate quota usage by inserting a row in the `cloud_usage` table + 4. Update the balance of the account by calling the API quotaUpdate + 5. Verify the balance of the account according to the tariff created + """ + + # Create quota tariff for the usage type 21 (VM_DISK_IO_READ) + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'Tariff' + cmd.value = '10' + cmd.usagetype = '21' + self.tariffs.append(self.apiclient.quotaTariffCreate(cmd)) + + # Add credits to the account + cmd = quotaCredits.quotaCreditsCmd() + cmd.account = self.account.name + cmd.domainid = self.domain.id + cmd.value = 100 + self.apiclient.quotaCredits(cmd) + + # Fetch account ID from account_uuid + account_id_select = f"SELECT id FROM account WHERE uuid = '{self.account.id}';" + self.debug(account_id_select) + qresultset = self.dbclient.execute(account_id_select) + account_id = qresultset[0][0] + + # Fetch domain ID from domain_uuid + domain_id_select = f"SELECT id FROM `domain` d WHERE uuid = '{self.domain.id}';" + self.debug(domain_id_select) + qresultset = self.dbclient.execute(domain_id_select) + domain_id = qresultset[0][0] + + # Fetch zone ID from zone_uuid + zone_id_select = f"SELECT id from data_center dc where dc.uuid = '{self.zone.id}';" + self.debug(zone_id_select) + qresultset = self.dbclient.execute(zone_id_select) + zone_id = qresultset[0][0] + + start_date = datetime.datetime.now() + datetime.timedelta(seconds=1) + end_date = datetime.datetime.now() + datetime.timedelta(hours=1) + + # Manually insert a usage regarding the usage type 21 (VM_DISK_IO_READ) + sql_query = (f"INSERT INTO cloud_usage.cloud_usage (zone_id,account_id,domain_id,description,usage_display,usage_type,raw_usage,vm_instance_id,vm_name,offering_id,template_id," + f"usage_id,`type`,`size`,network_id,start_date,end_date,virtual_size,cpu_speed,cpu_cores,memory,quota_calculated,is_hidden,state)" + f" VALUES ('{zone_id}','{account_id}','{domain_id}','Test','1 Hrs',21,1,NULL,NULL,NULL,NULL,NULL,'VirtualMachine',NULL,NULL,'{start_date}','{end_date}',NULL,NULL,NULL,NULL,0,0,NULL);") + self.debug(sql_query) + self.dbclient.execute(sql_query) + + # Update quota to calculate the balance of the account + cmd = quotaUpdate.quotaUpdateCmd() + self.apiclient.quotaUpdate(cmd) + + # Retrieve the quota balance of the account + cmd = quotaBalance.quotaBalanceCmd() + cmd.domainid = self.account.domainid + cmd.account = self.account.name + response = self.apiclient.quotaBalance(cmd) + + self.debug(f"The quota balance for the account {self.account.name} is {response.balance}.") + self.assertEqual(response.balance.startquota, 90, f"The `startQuota` response field is supposed to be 90 but was {response.balance.startquota}.") + + return diff --git a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java index a5231209574..6125f6f604d 100644 --- a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java @@ -425,9 +425,14 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna cal.add(Calendar.MILLISECOND, -1); endDate = cal.getTime().getTime(); } else { - endDate = cal.getTime().getTime(); // current time cal.add(Calendar.MINUTE, -1 * _aggregationDuration); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); startDate = cal.getTime().getTime(); + + cal.add(Calendar.MINUTE, _aggregationDuration); + cal.add(Calendar.MILLISECOND, -1); + endDate = cal.getTime().getTime(); } parse(job, startDate, endDate); From f360f7048da13e9e3784c9b621342be4fe70e4cc Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Tue, 18 Jun 2024 08:28:20 +0200 Subject: [PATCH 08/13] vmware: do not tear down vm disks if deploy-as-is vm has vm snapshots (#9243) --- .../com/cloud/hypervisor/vmware/resource/VmwareResource.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 1b7a76e26cf..830f4e7a25b 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -2203,7 +2203,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes throw new Exception("Failed to find the newly create or relocated VM. vmName: " + vmInternalCSName); } } - if (deployAsIs) { + if (deployAsIs && !vmMo.hasSnapshot()) { s_logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)"); mapSpecDisksToClonedDisksAndTearDownDatadisks(vmMo, vmInternalCSName, specDisks); } From 591cc4f0027eca2945cd811c17fbb1ef0eba6ad8 Mon Sep 17 00:00:00 2001 From: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Date: Tue, 18 Jun 2024 12:02:13 +0530 Subject: [PATCH 09/13] Add action button to enable/disable Oauth provider (#9242) --- .../oauth2/OAuth2AuthManagerImpl.java | 12 +++++------ ui/src/config/section/config.js | 20 ++++++++++++++++++- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java index 85730651248..b0596f0c386 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java @@ -138,9 +138,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO registerOauthProvider(RegisterOAuthProviderCmd cmd) { String description = cmd.getDescription(); String provider = cmd.getProvider(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); if (!isOAuthPluginEnabled()) { throw new CloudRuntimeException("OAuth is not enabled, please enable to register"); @@ -170,9 +170,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO updateOauthProvider(UpdateOAuthProviderCmd cmd) { Long id = cmd.getId(); String description = cmd.getDescription(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); Boolean enabled = cmd.getEnabled(); OauthProviderVO providerVO = _oauthProviderDao.findById(id); diff --git a/ui/src/config/section/config.js b/ui/src/config/section/config.js index aa108b5b1fa..8f792e51ac9 100644 --- a/ui/src/config/section/config.js +++ b/ui/src/config/section/config.js @@ -101,7 +101,25 @@ export default { label: 'label.edit', dataView: true, popup: true, - args: ['description', 'clientid', 'redirecturi', 'secretkey', 'enabled'] + args: ['description', 'clientid', 'redirecturi', 'secretkey'] + }, + { + api: 'updateOauthProvider', + icon: 'play-circle-outlined', + label: 'label.enable.provider', + message: 'message.confirm.enable.provider', + dataView: true, + defaultArgs: { enabled: true }, + show: (record) => { return record.enabled === false } + }, + { + api: 'updateOauthProvider', + icon: 'pause-circle-outlined', + label: 'label.disable.provider', + message: 'message.confirm.disable.provider', + dataView: true, + defaultArgs: { enabled: false }, + show: (record) => { return record.enabled === true } }, { api: 'deleteOauthProvider', From 7c5b7ca0777ad0c1f2253521cc4cbd05279e3d80 Mon Sep 17 00:00:00 2001 From: dahn Date: Tue, 18 Jun 2024 09:01:17 +0200 Subject: [PATCH 10/13] Extra parameter for UpdateImageStore (#8941) * Extra parameter for UpdateImageStore * add name parameter * ui * cleanup * update DB from storage stats results --- .../com/cloud/storage/StorageService.java | 3 +++ .../admin/storage/UpdateImageStoreCmd.java | 21 +++++++++++++--- .../api/response/ImageStoreResponse.java | 14 +++++------ .../com/cloud/storage/StorageManager.java | 2 ++ .../com/cloud/api/query/QueryManagerImpl.java | 2 +- .../java/com/cloud/server/StatsCollector.java | 17 +++++++++++-- .../com/cloud/storage/StorageManagerImpl.java | 25 ++++++++++++++++--- .../config/section/infra/secondaryStorages.js | 17 +++---------- ui/src/views/AutogenView.vue | 1 - 9 files changed, 71 insertions(+), 31 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index bb086ad05cb..d11246b8eaf 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; @@ -103,6 +104,8 @@ public interface StorageService { */ ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException; + ImageStore updateImageStore(UpdateImageStoreCmd cmd); + ImageStore updateImageStoreStatus(Long id, Boolean readonly); void updateStorageCapabilities(Long poolId, boolean failOnChecks); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index d7dca93b485..87d056cabf7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -41,10 +41,17 @@ public class UpdateImageStoreCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " + - "hence not considering them during storage migration") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.") + private String name; + + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false, + description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration") private Boolean readonly; + @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false, + description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.") + private Long capacityBytes; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -53,17 +60,25 @@ public class UpdateImageStoreCmd extends BaseCmd { return id; } + public String getName() { + return name; + } + public Boolean getReadonly() { return readonly; } + public Long getCapacityBytes() { + return capacityBytes; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @Override public void execute() { - ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); + ImageStore result = _storageService.updateImageStore(this); ImageStoreResponse storeResponse = null; if (result != null) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java index 532963dbddc..ee44b6bc474 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java @@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName; @EntityReference(value = ImageStore.class) public class ImageStoreResponse extends BaseResponseWithAnnotations { - @SerializedName("id") + @SerializedName(ApiConstants.ID) @Param(description = "the ID of the image store") private String id; - @SerializedName("zoneid") + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "the Zone ID of the image store") private String zoneId; @@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the Zone name of the image store") private String zoneName; - @SerializedName("name") + @SerializedName(ApiConstants.NAME) @Param(description = "the name of the image store") private String name; - @SerializedName("url") + @SerializedName(ApiConstants.URL) @Param(description = "the url of the image store") private String url; - @SerializedName("protocol") + @SerializedName(ApiConstants.PROTOCOL) @Param(description = "the protocol of the image store") private String protocol; @@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the provider name of the image store") private String providerName; - @SerializedName("scope") + @SerializedName(ApiConstants.SCOPE) @Param(description = "the scope of the image store") private ScopeType scope; - @SerializedName("readonly") + @SerializedName(ApiConstants.READ_ONLY) @Param(description = "defines if store is read-only") private Boolean readonly; diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 7fdec905242..411a6caa83b 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -350,6 +350,8 @@ public interface StorageManager extends StorageService { Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering); + ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes); + void cleanupDownloadUrls(); void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 51aed5af66c..2dea6f85d9e 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -2739,7 +2739,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ListResponse searchForImageStores(ListImageStoresCmd cmd) { Pair, Integer> result = searchForImageStoresInternal(cmd); - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse<>(); List poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()])); response.setResponses(poolResponses, result.second()); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 19820093f3a..e44c094ab8e 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1671,7 +1671,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } List stores = _dataStoreMgr.listImageStores(); - ConcurrentHashMap storageStats = new ConcurrentHashMap(); + ConcurrentHashMap storageStats = new ConcurrentHashMap<>(); for (DataStore store : stores) { if (store.getUri() == null) { continue; @@ -1691,7 +1691,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); } } - _storageStats = storageStats; + updateStorageStats(storageStats); ConcurrentHashMap storagePoolStats = new ConcurrentHashMap(); List storagePools = _storagePoolDao.listAll(); @@ -1737,6 +1737,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc LOGGER.error("Error trying to retrieve storage stats", t); } } + + private void updateStorageStats(ConcurrentHashMap storageStats) { + for (Long storeId : storageStats.keySet()) { + if (_storageStats.containsKey(storeId) + && (_storageStats.get(storeId).getCapacityBytes() == 0l + || _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) { + // get add to DB rigorously + _storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes()); + } + } + // if in _storageStats and not in storageStats it gets discarded + _storageStats = storageStats; + } } class AutoScaleMonitor extends ManagedContextRunnable { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index c6379a7e4c3..290c6922e0d 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -57,6 +57,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -116,7 +117,6 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -212,6 +212,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -2998,17 +2999,35 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + public ImageStore updateImageStore(UpdateImageStoreCmd cmd) { + return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes()); + } + + @Override + public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) { // Input validation ImageStoreVO imageStoreVO = _imageStoreDao.findById(id); if (imageStoreVO == null) { throw new IllegalArgumentException("Unable to find image store with ID: " + id); } - imageStoreVO.setReadonly(readonly); + if (com.cloud.utils.StringUtils.isNotBlank(name)) { + imageStoreVO.setName(name); + } + if (capacityBytes != null) { + imageStoreVO.setTotalSize(capacityBytes); + } + if (readonly != null) { + imageStoreVO.setReadonly(readonly); + } _imageStoreDao.update(id, imageStoreVO); return imageStoreVO; } + @Override + public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + return updateImageStoreStatus(id, null, readonly, null); + } + /** * @param poolId - Storage pool id for pool to update. * @param failOnChecks - If true, throw an error if pool type and state checks fail. diff --git a/ui/src/config/section/infra/secondaryStorages.js b/ui/src/config/section/infra/secondaryStorages.js index 93c54dc1d79..793aa02e966 100644 --- a/ui/src/config/section/infra/secondaryStorages.js +++ b/ui/src/config/section/infra/secondaryStorages.js @@ -77,21 +77,10 @@ export default { }, { api: 'updateImageStore', - icon: 'stop-outlined', - label: 'label.action.image.store.read.only', - message: 'message.action.secondary.storage.read.only', + icon: 'edit-outlined', + label: 'label.edit', dataView: true, - defaultArgs: { readonly: true }, - show: (record) => { return record.readonly === false } - }, - { - api: 'updateImageStore', - icon: 'check-circle-outlined', - label: 'label.action.image.store.read.write', - message: 'message.action.secondary.storage.read.write', - dataView: true, - defaultArgs: { readonly: false }, - show: (record) => { return record.readonly === true } + args: ['name', 'readonly', 'capacitybytes'] }, { api: 'deleteImageStore', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index acb6fd6e89c..6cf054359fb 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -679,7 +679,6 @@ export default { }, getOkProps () { if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) { - return { props: { type: 'default' } } } else { return { props: { type: 'primary' } } } From a10eee25b3432a268597e54e05024933bed90c0a Mon Sep 17 00:00:00 2001 From: Bryan Lima <42067040+BryanMLima@users.noreply.github.com> Date: Tue, 18 Jun 2024 05:31:35 -0300 Subject: [PATCH 11/13] Add method for decrypting values accordingly (#9088) --- .../com/cloud/domain/dao/DomainDetailsDao.java | 2 ++ .../cloud/domain/dao/DomainDetailsDaoImpl.java | 15 +++++++++++++-- .../java/com/cloud/user/AccountDetailsDao.java | 2 ++ .../com/cloud/user/AccountDetailsDaoImpl.java | 15 +++++++++++++-- 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java index 51362cf885e..6b53e49764e 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java @@ -31,4 +31,6 @@ public interface DomainDetailsDao extends GenericDao { void deleteDetails(long domainId); void update(long domainId, Map details); + + String getActualValue(DomainDetailVO domainDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java index dad3fe9ad1e..50097d154f5 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java @@ -24,6 +24,7 @@ import javax.inject.Inject; import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; @@ -34,6 +35,7 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; public class DomainDetailsDaoImpl extends GenericDaoBase implements DomainDetailsDao, ScopedConfigStorage { protected final SearchBuilder domainSearch; @@ -111,7 +113,7 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i String enableDomainSettingsForChildDomain = _configDao.getValue("enable.domain.settings.for.child.domain"); if (!Boolean.parseBoolean(enableDomainSettingsForChildDomain)) { vo = findDetail(id, key.key()); - return vo == null ? null : vo.getValue(); + return vo == null ? null : getActualValue(vo); } DomainVO domain = _domainDao.findById(id); // if value is not configured in domain then check its parent domain till ROOT @@ -125,6 +127,15 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i break; } } - return vo == null ? null : vo.getValue(); + return vo == null ? null : getActualValue(vo); + } + + @Override + public String getActualValue(DomainDetailVO domainDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(domainDetailVO.getValue()); + } + return domainDetailVO.getValue(); } } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java index f4534ee41ee..514433e8068 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java @@ -34,4 +34,6 @@ public interface AccountDetailsDao extends GenericDao { * they will get created */ void update(long accountId, Map details); + + String getActualValue(AccountDetailVO accountDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java index 5451192fc6d..de562e27f9e 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java @@ -23,6 +23,7 @@ import java.util.Optional; import javax.inject.Inject; +import com.cloud.utils.crypt.DBEncryptionUtil; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; @@ -40,6 +41,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; public class AccountDetailsDaoImpl extends GenericDaoBase implements AccountDetailsDao, ScopedConfigStorage { protected final SearchBuilder accountSearch; @@ -119,7 +121,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase public String getConfigValue(long id, ConfigKey key) { // check if account level setting is configured AccountDetailVO vo = findDetail(id, key.key()); - String value = vo == null ? null : vo.getValue(); + String value = vo == null ? null : getActualValue(vo); if (value != null) { return value; } @@ -140,7 +142,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase while (domain != null) { DomainDetailVO domainVO = _domainDetailsDao.findDetail(domain.getId(), key.key()); if (domainVO != null) { - value = domainVO.getValue(); + value = _domainDetailsDao.getActualValue(domainVO); break; } else if (domain.getParent() != null) { domain = _domainDao.findById(domain.getParent()); @@ -152,4 +154,13 @@ public class AccountDetailsDaoImpl extends GenericDaoBase } return value; } + + @Override + public String getActualValue(AccountDetailVO accountDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(accountDetailVO.getValue()); + } + return accountDetailVO.getValue(); + } } From 7534196361434b8dafbaaa6f2ad0a22efddedab7 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 18 Jun 2024 06:11:08 -0400 Subject: [PATCH 12/13] UI: Fix Userdata registration from UI (#8791) * UI: Fix Userdata registration from UI * add isbase64 checkbox --- ui/public/locales/en.json | 1 + ui/src/utils/plugins.js | 12 ++++++++++-- ui/src/views/compute/RegisterUserData.vue | 9 +++++++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index e798a798c70..c49fef5e4fe 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1110,6 +1110,7 @@ "label.ipv6.subnets": "IPv6 Subnets", "label.ip.addresses": "IP Addresses", "label.iqn": "Target IQN", +"label.is.base64.encoded": "Base64 encoded", "label.is.in.progress": "is in progress", "label.is.shared": "Is shared", "label.is2faenabled": "Is 2FA enabled", diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index acaebf45892..e358988fac9 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -486,6 +486,15 @@ export const fileSizeUtilPlugin = { } } +function isBase64 (str) { + try { + const decoded = new TextDecoder().decode(Uint8Array.from(atob(str), c => c.charCodeAt(0))) + return btoa(decoded) === str + } catch (err) { + return false + } +} + export const genericUtilPlugin = { install (app) { app.config.globalProperties.$isValidUuid = function (uuid) { @@ -494,8 +503,7 @@ export const genericUtilPlugin = { } app.config.globalProperties.$toBase64AndURIEncoded = function (text) { - const base64regex = /^([0-9a-zA-Z+/]{4})*(([0-9a-zA-Z+/]{2}==)|([0-9a-zA-Z+/]{3}=))?$/ - if (base64regex.test(text)) { + if (isBase64(text)) { return text } return encodeURIComponent(btoa(unescape(encodeURIComponent(text)))) diff --git a/ui/src/views/compute/RegisterUserData.vue b/ui/src/views/compute/RegisterUserData.vue index 990e59ff277..8e6311fdf9a 100644 --- a/ui/src/views/compute/RegisterUserData.vue +++ b/ui/src/views/compute/RegisterUserData.vue @@ -43,6 +43,9 @@ v-model:value="form.userdata" :placeholder="apiParams.userdata.description"/> + + +