From 4e7c6682fd4959787b6422e9e5ae5560f571d2d5 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 12 Jun 2024 11:19:03 +0530 Subject: [PATCH 1/3] While starting VM with considerlasthost enabled, don't load host tags/details for the last host when it doesn't exist (#9037) --- .../deploy/DeploymentPlanningManagerImpl.java | 168 +++++++++--------- 1 file changed, 85 insertions(+), 83 deletions(-) diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 0ef462bb96c..b70530ef6db 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -415,105 +415,107 @@ StateListener, Configurable { s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); HostVO host = _hostDao.findById(vm.getLastHostId()); - _hostDao.loadHostTags(host); - _hostDao.loadDetails(host); ServiceOfferingDetailsVO offeringDetails = null; if (host == null) { s_logger.debug("The last host of this VM cannot be found"); - } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The last host of this VM is in avoid set"); - } else if (plan.getClusterId() != null && host.getClusterId() != null - && !plan.getClusterId().equals(host.getClusterId())) { - s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " - + plan.getClusterId()); - } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - s_logger.debug("The last Host, hostId: " + host.getId() + - " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { - ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); - if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.debug("The last host of this VM does not have required GPU devices available"); - } - } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); } else { - if (host.getStatus() == Status.Up) { - if (checkVmProfileAndHost(vmProfile, host)) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, - "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, - "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + _hostDao.loadHostTags(host); + _hostDao.loadDetails(host); + if (avoids.shouldAvoid(host)) { + s_logger.debug("The last host of this VM is in avoid set"); + } else if (plan.getClusterId() != null && host.getClusterId() != null + && !plan.getClusterId().equals(host.getClusterId())) { + s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + + plan.getClusterId()); + } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + s_logger.debug("The last Host, hostId: " + host.getId() + + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); + } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { + ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); + if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ + s_logger.debug("The last host of this VM does not have required GPU devices available"); + } + } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { + s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); + } else { + if (host.getStatus() == Status.Up) { + if (checkVmProfileAndHost(vmProfile, host)) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, + "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, + "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + boolean hostHasCpuCapability, hostHasCapacity = false; + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); - if (hostHasCpuCapability) { - // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + if (hostHasCpuCapability) { + // first check from reserved capacity + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); - // if not reserved, check the free capacity - if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); - } - - boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); - if (hostHasCapacity - && hostHasCpuCapability) { - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() - + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; + // if not reserved, check the free capacity + if (!hostHasCapacity) + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } - // search for storage under the zone, pod, cluster - // of - // the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), - host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes( - vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); + if (hostHasCapacity + && hostHasCpuCapability) { + s_logger.debug("The last host of this VM is UP and has enough capacity"); + s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - // choose the potential pool for this VM for this - // host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); - if (potentialResources != null) { - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from - // destination, since we don't have to - // prepare - // this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, - storageVolMap, displayStorage); + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); s_logger.debug("Returning Deployment Destination: " + dest); return dest; } + + // search for storage under the zone, pod, cluster + // of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), + host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes( + vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this + // host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, + getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); + if (potentialResources != null) { + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to + // prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, + storageVolMap, displayStorage); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } else { + s_logger.debug("The last host of this VM does not have enough capacity"); } - } else { - s_logger.debug("The last host of this VM does not have enough capacity"); } + } else { + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + + host.getResourceState()); } - } else { - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + - host.getResourceState()); } } s_logger.debug("Cannot choose the last host to deploy this VM "); From 56b69b157ea646d7408a6daa996d9c53b572d85d Mon Sep 17 00:00:00 2001 From: dahn Date: Fri, 14 Jun 2024 09:01:15 +0200 Subject: [PATCH 2/3] prevent duplicate ip table rules in SSVM (#8530) Co-authored-by: Wei Zhou --- .../SecondaryStorageManagerImpl.java | 4 +- .../storage/resource/IpTablesHelper.java | 66 +++++++++++++++++++ .../resource/NfsSecondaryStorageResource.java | 20 ++---- .../storage/template/DownloadManagerImpl.java | 40 +++++------ systemvm/debian/opt/cloud/bin/cs/CsHelper.py | 2 +- .../java/com/cloud/utils/script/Script.java | 11 +++- 6 files changed, 105 insertions(+), 38 deletions(-) create mode 100644 services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 59ac4f44938..9df8906070f 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -531,8 +531,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar /** * Get the default network for the secondary storage VM, based on the zone it is in. Delegates to - * either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)}, - * depending on the zone network type and whether or not security groups are enabled in the zone. + * either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)}, + * depending on the zone network type and whether security groups are enabled in the zone. * @param dc - The zone (DataCenter) of the secondary storage VM. * @return The default network for use with the secondary storage VM. */ diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java new file mode 100644 index 00000000000..b9f0d2fdc77 --- /dev/null +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/IpTablesHelper.java @@ -0,0 +1,66 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.resource; + +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; + +public class IpTablesHelper { + public static final Logger LOGGER = Logger.getLogger(IpTablesHelper.class); + + public static final String OUTPUT_CHAIN = "OUTPUT"; + public static final String INPUT_CHAIN = "INPUT"; + public static final String INSERT = " -I "; + public static final String APPEND = " -A "; + + public static boolean needsAdding(String chain, String rule) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables -C " + chain + " " + rule); + + String commandOutput = command.execute(); + boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?).")); + LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s", + rule, + needsAdding ? "does indeed" : "doesn't", + chain, + commandOutput + )); + return needsAdding; + } + + public static String addConditionally(String chain, boolean insert, String rule, String errMsg) { + LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain)); + if (needsAdding(chain, rule)) { + Script command = new Script("/bin/bash", LOGGER); + command.add("-c"); + command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule); + String result = command.execute(); + LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result)); + if (result != null) { + LOGGER.warn(String.format("%s , err = %s", errMsg, result)); + return errMsg + result; + } + } else { + LOGGER.warn("Rule already defined in SVM: " + rule); + } + return null; + } +} diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index d8f7395c885..8d2ebcd0458 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -2287,15 +2287,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (!_inSystemVM) { return null; } - Script command = new Script("/bin/bash", s_logger); String intf = "eth1"; - command.add("-c"); - command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); + String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr); + String errMsg = String.format("Error in allowing outgoing to %s", destCidr); - String result = command.execute(); + s_logger.info(String.format("Adding rule if required: " + rule)); + String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg); if (result != null) { - s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result); - return "Error in allowing outgoing to " + destCidr + ", err=" + result; + return result; } addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); @@ -2832,13 +2831,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (result != null) { s_logger.warn("Error in starting sshd service err=" + result); } - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in opening up ssh port err=" + result); - } + String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"; + IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port"); } private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index e14977682b6..4d68aff76a2 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -60,6 +60,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; +import org.apache.cloudstack.storage.resource.IpTablesHelper; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; import org.apache.log4j.Logger; @@ -1092,17 +1093,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager } private void blockOutgoingOnPrivate() { - Script command = new Script("/bin/bash", LOGGER); - String intf = "eth1"; - command.add("-c"); - command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;"); - - String result = command.execute(); - if (result != null) { - LOGGER.warn("Error in blocking outgoing to port 80/443 err=" + result); - return; - } + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;" + , "Error in blocking outgoing to port 80"); + IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN + , false + , "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;" + , "Error in blocking outgoing to port 443"); } @Override @@ -1128,17 +1126,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if (result != null) { LOGGER.warn("Error in stopping httpd service err=" + result); } - String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); - String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - command = new Script("/bin/bash", LOGGER); - command.add("-c"); - command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf + - " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;"); - - result = command.execute(); + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT" + , "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE); + if (result != null) { + return; + } + result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN + , true + , "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;" + , "Error in opening up apache2 port 443"); if (result != null) { - LOGGER.warn("Error in opening up apache2 port err=" + result); return; } diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index 2a83c215403..45f1953dc99 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -221,7 +221,7 @@ def save_iptables(command, iptables_file): def execute2(command, wait=True): """ Execute command """ - logging.info("Executing: %s" % command) + logging.info("Executing2: %s" % command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if wait: p.wait() diff --git a/utils/src/main/java/com/cloud/utils/script/Script.java b/utils/src/main/java/com/cloud/utils/script/Script.java index fb57f256690..31585534cbd 100644 --- a/utils/src/main/java/com/cloud/utils/script/Script.java +++ b/utils/src/main/java/com/cloud/utils/script/Script.java @@ -243,12 +243,19 @@ public class Script implements Callable { //process completed successfully if (_process.exitValue() == 0) { _logger.debug("Execution is successful."); + String result; + String method; if (interpreter != null) { - return interpreter.drain() ? task.getResult() : interpreter.interpret(ir); + _logger.debug("interpreting the result..."); + method = "result interpretation of execution: "; + result= interpreter.drain() ? task.getResult() : interpreter.interpret(ir); } else { // null return exitValue apparently - return String.valueOf(_process.exitValue()); + method = "return code of execution: "; + result = String.valueOf(_process.exitValue()); } + _logger.debug(method + result); + return result; } else { //process failed break; } From 7c5b7ca0777ad0c1f2253521cc4cbd05279e3d80 Mon Sep 17 00:00:00 2001 From: dahn Date: Tue, 18 Jun 2024 09:01:17 +0200 Subject: [PATCH 3/3] Extra parameter for UpdateImageStore (#8941) * Extra parameter for UpdateImageStore * add name parameter * ui * cleanup * update DB from storage stats results --- .../com/cloud/storage/StorageService.java | 3 +++ .../admin/storage/UpdateImageStoreCmd.java | 21 +++++++++++++--- .../api/response/ImageStoreResponse.java | 14 +++++------ .../com/cloud/storage/StorageManager.java | 2 ++ .../com/cloud/api/query/QueryManagerImpl.java | 2 +- .../java/com/cloud/server/StatsCollector.java | 17 +++++++++++-- .../com/cloud/storage/StorageManagerImpl.java | 25 ++++++++++++++++--- .../config/section/infra/secondaryStorages.js | 17 +++---------- ui/src/views/AutogenView.vue | 1 - 9 files changed, 71 insertions(+), 31 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index bb086ad05cb..d11246b8eaf 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; @@ -103,6 +104,8 @@ public interface StorageService { */ ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException; + ImageStore updateImageStore(UpdateImageStoreCmd cmd); + ImageStore updateImageStoreStatus(Long id, Boolean readonly); void updateStorageCapabilities(Long poolId, boolean failOnChecks); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index d7dca93b485..87d056cabf7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -41,10 +41,17 @@ public class UpdateImageStoreCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " + - "hence not considering them during storage migration") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.") + private String name; + + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false, + description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration") private Boolean readonly; + @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false, + description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.") + private Long capacityBytes; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -53,17 +60,25 @@ public class UpdateImageStoreCmd extends BaseCmd { return id; } + public String getName() { + return name; + } + public Boolean getReadonly() { return readonly; } + public Long getCapacityBytes() { + return capacityBytes; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @Override public void execute() { - ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); + ImageStore result = _storageService.updateImageStore(this); ImageStoreResponse storeResponse = null; if (result != null) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java index 532963dbddc..ee44b6bc474 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java @@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName; @EntityReference(value = ImageStore.class) public class ImageStoreResponse extends BaseResponseWithAnnotations { - @SerializedName("id") + @SerializedName(ApiConstants.ID) @Param(description = "the ID of the image store") private String id; - @SerializedName("zoneid") + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "the Zone ID of the image store") private String zoneId; @@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the Zone name of the image store") private String zoneName; - @SerializedName("name") + @SerializedName(ApiConstants.NAME) @Param(description = "the name of the image store") private String name; - @SerializedName("url") + @SerializedName(ApiConstants.URL) @Param(description = "the url of the image store") private String url; - @SerializedName("protocol") + @SerializedName(ApiConstants.PROTOCOL) @Param(description = "the protocol of the image store") private String protocol; @@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the provider name of the image store") private String providerName; - @SerializedName("scope") + @SerializedName(ApiConstants.SCOPE) @Param(description = "the scope of the image store") private ScopeType scope; - @SerializedName("readonly") + @SerializedName(ApiConstants.READ_ONLY) @Param(description = "defines if store is read-only") private Boolean readonly; diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 7fdec905242..411a6caa83b 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -350,6 +350,8 @@ public interface StorageManager extends StorageService { Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering); + ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes); + void cleanupDownloadUrls(); void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 51aed5af66c..2dea6f85d9e 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -2739,7 +2739,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ListResponse searchForImageStores(ListImageStoresCmd cmd) { Pair, Integer> result = searchForImageStoresInternal(cmd); - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse<>(); List poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()])); response.setResponses(poolResponses, result.second()); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 19820093f3a..e44c094ab8e 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1671,7 +1671,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } List stores = _dataStoreMgr.listImageStores(); - ConcurrentHashMap storageStats = new ConcurrentHashMap(); + ConcurrentHashMap storageStats = new ConcurrentHashMap<>(); for (DataStore store : stores) { if (store.getUri() == null) { continue; @@ -1691,7 +1691,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); } } - _storageStats = storageStats; + updateStorageStats(storageStats); ConcurrentHashMap storagePoolStats = new ConcurrentHashMap(); List storagePools = _storagePoolDao.listAll(); @@ -1737,6 +1737,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc LOGGER.error("Error trying to retrieve storage stats", t); } } + + private void updateStorageStats(ConcurrentHashMap storageStats) { + for (Long storeId : storageStats.keySet()) { + if (_storageStats.containsKey(storeId) + && (_storageStats.get(storeId).getCapacityBytes() == 0l + || _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) { + // get add to DB rigorously + _storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes()); + } + } + // if in _storageStats and not in storageStats it gets discarded + _storageStats = storageStats; + } } class AutoScaleMonitor extends ManagedContextRunnable { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index c6379a7e4c3..290c6922e0d 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -57,6 +57,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -116,7 +117,6 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -212,6 +212,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -2998,17 +2999,35 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + public ImageStore updateImageStore(UpdateImageStoreCmd cmd) { + return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes()); + } + + @Override + public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) { // Input validation ImageStoreVO imageStoreVO = _imageStoreDao.findById(id); if (imageStoreVO == null) { throw new IllegalArgumentException("Unable to find image store with ID: " + id); } - imageStoreVO.setReadonly(readonly); + if (com.cloud.utils.StringUtils.isNotBlank(name)) { + imageStoreVO.setName(name); + } + if (capacityBytes != null) { + imageStoreVO.setTotalSize(capacityBytes); + } + if (readonly != null) { + imageStoreVO.setReadonly(readonly); + } _imageStoreDao.update(id, imageStoreVO); return imageStoreVO; } + @Override + public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + return updateImageStoreStatus(id, null, readonly, null); + } + /** * @param poolId - Storage pool id for pool to update. * @param failOnChecks - If true, throw an error if pool type and state checks fail. diff --git a/ui/src/config/section/infra/secondaryStorages.js b/ui/src/config/section/infra/secondaryStorages.js index 93c54dc1d79..793aa02e966 100644 --- a/ui/src/config/section/infra/secondaryStorages.js +++ b/ui/src/config/section/infra/secondaryStorages.js @@ -77,21 +77,10 @@ export default { }, { api: 'updateImageStore', - icon: 'stop-outlined', - label: 'label.action.image.store.read.only', - message: 'message.action.secondary.storage.read.only', + icon: 'edit-outlined', + label: 'label.edit', dataView: true, - defaultArgs: { readonly: true }, - show: (record) => { return record.readonly === false } - }, - { - api: 'updateImageStore', - icon: 'check-circle-outlined', - label: 'label.action.image.store.read.write', - message: 'message.action.secondary.storage.read.write', - dataView: true, - defaultArgs: { readonly: false }, - show: (record) => { return record.readonly === true } + args: ['name', 'readonly', 'capacitybytes'] }, { api: 'deleteImageStore', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index acb6fd6e89c..6cf054359fb 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -679,7 +679,6 @@ export default { }, getOkProps () { if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) { - return { props: { type: 'default' } } } else { return { props: { type: 'primary' } } }