Merge remote-tracking branch 'apache/4.19'

This commit is contained in:
Wei Zhou 2024-02-13 10:38:21 +01:00
commit 93406f5582
36 changed files with 760 additions and 392 deletions

View File

@ -161,7 +161,7 @@ public class Storage {
public static final StoragePoolType LVM = new StoragePoolType("LVM", false, false, false);
public static final StoragePoolType CLVM = new StoragePoolType("CLVM", true, false, false);
public static final StoragePoolType RBD = new StoragePoolType("RBD", true, true, false);
public static final StoragePoolType SharedMountPoint = new StoragePoolType("SharedMountPoint", true, false, true);
public static final StoragePoolType SharedMountPoint = new StoragePoolType("SharedMountPoint", true, true, true);
public static final StoragePoolType VMFS = new StoragePoolType("VMFS", true, true, false);
public static final StoragePoolType PreSetup = new StoragePoolType("PreSetup", true, true, false);
public static final StoragePoolType EXT = new StoragePoolType("EXT", false, true, false);
@ -178,7 +178,7 @@ public class Storage {
private final String name;
private final boolean shared;
private final boolean overprovisioning;
private final boolean overProvisioning;
private final boolean encryption;
/**
@ -188,7 +188,7 @@ public class Storage {
public StoragePoolType(String name) {
this.name = name;
this.shared = false;
this.overprovisioning = false;
this.overProvisioning = false;
this.encryption = false;
}
@ -196,13 +196,13 @@ public class Storage {
* Define a new StoragePoolType, and register it into the map of pool types known to the management server.
* @param name Simple unique name of the StoragePoolType.
* @param shared Storage pool is shared/accessible to multiple hypervisors
* @param overprovisioning Storage pool supports overprovisioning
* @param overProvisioning Storage pool supports overProvisioning
* @param encryption Storage pool supports encrypted volumes
*/
public StoragePoolType(String name, boolean shared, boolean overprovisioning, boolean encryption) {
public StoragePoolType(String name, boolean shared, boolean overProvisioning, boolean encryption) {
this.name = name;
this.shared = shared;
this.overprovisioning = overprovisioning;
this.overProvisioning = overProvisioning;
this.encryption = encryption;
addStoragePoolType(this);
}
@ -212,10 +212,12 @@ public class Storage {
}
public boolean supportsOverProvisioning() {
return overprovisioning;
return overProvisioning;
}
public boolean supportsEncryption() { return encryption; }
public boolean supportsEncryption() {
return encryption;
}
private static void addStoragePoolType(StoragePoolType storagePoolType) {
map.putIfAbsent(storagePoolType.name, storagePoolType);
@ -261,7 +263,7 @@ public class Storage {
}
public static List<StoragePoolType> getNonSharedStoragePoolTypes() {
List<StoragePoolType> nonSharedStoragePoolTypes = new ArrayList<StoragePoolType>();
List<StoragePoolType> nonSharedStoragePoolTypes = new ArrayList<>();
for (StoragePoolType storagePoolType : StoragePoolType.values()) {
if (!storagePoolType.isShared()) {
nonSharedStoragePoolTypes.add(storagePoolType);

View File

@ -33,6 +33,10 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetricType;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetricUseRatio;
public interface ClusterDrsAlgorithm extends Adapter {
/**
@ -42,16 +46,17 @@ public interface ClusterDrsAlgorithm extends Adapter {
* @param clusterId
* the ID of the cluster to check
* @param cpuList
* a list of CPU allocated values for each host in the cluster
* a list of Ternary of used, reserved & total CPU for each host in the cluster
* @param memoryList
* a list of memory allocated values for each host in the cluster
* a list of Ternary of used, reserved & total memory values for each host in the cluster
*
* @return true if a DRS operation is needed, false otherwise
*
* @throws ConfigurationException
* if there is an error in the configuration
*/
boolean needsDrs(long clusterId, List<Long> cpuList, List<Long> memoryList) throws ConfigurationException;
boolean needsDrs(long clusterId, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException;
/**
@ -65,18 +70,19 @@ public interface ClusterDrsAlgorithm extends Adapter {
* the service offering for the virtual machine
* @param destHost
* the destination host for the virtual machine
* @param hostCpuFreeMap
* a map of host IDs to the amount of CPU free on each host
* @param hostMemoryFreeMap
* a map of host IDs to the amount of memory free on each host
* @param hostCpuMap
* a map of host IDs to the Ternary of used, reserved and total CPU on each host
* @param hostMemoryMap
* a map of host IDs to the Ternary of used, reserved and total memory on each host
* @param requiresStorageMotion
* whether storage motion is required for the virtual machine
*
* @return a ternary containing improvement, cost, benefit
*/
Ternary<Double, Double, Double> getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering,
Host destHost, Map<Long, Long> hostCpuFreeMap,
Map<Long, Long> hostMemoryFreeMap, Boolean requiresStorageMotion);
Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
Boolean requiresStorageMotion) throws ConfigurationException;
/**
* Calculates the imbalance of the cluster after a virtual machine migration.
@ -87,54 +93,93 @@ public interface ClusterDrsAlgorithm extends Adapter {
* the virtual machine being migrated
* @param destHost
* the destination host for the virtual machine
* @param hostCpuFreeMap
* a map of host IDs to the amount of CPU free on each host
* @param hostMemoryFreeMap
* a map of host IDs to the amount of memory free on each host
* @param hostCpuMap
* a map of host IDs to the Ternary of used, reserved and total CPU on each host
* @param hostMemoryMap
* a map of host IDs to the Ternary of used, reserved and total memory on each host
*
* @return a pair containing the CPU and memory imbalance of the cluster after the migration
*/
default Pair<Double, Double> getImbalancePostMigration(ServiceOffering serviceOffering, VirtualMachine vm,
Host destHost, Map<Long, Long> hostCpuFreeMap,
Map<Long, Long> hostMemoryFreeMap) {
List<Long> postCpuList = new ArrayList<>();
List<Long> postMemoryList = new ArrayList<>();
final int vmCpu = serviceOffering.getCpu() * serviceOffering.getSpeed();
final long vmRam = serviceOffering.getRamSize() * 1024L * 1024L;
default Double getImbalancePostMigration(ServiceOffering serviceOffering, VirtualMachine vm,
Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap) throws ConfigurationException {
Pair<Long, Map<Long, Ternary<Long, Long, Long>>> pair = getHostMetricsMapAndType(destHost.getClusterId(), serviceOffering, hostCpuMap, hostMemoryMap);
long vmMetric = pair.first();
Map<Long, Ternary<Long, Long, Long>> hostMetricsMap = pair.second();
for (Long hostId : hostCpuFreeMap.keySet()) {
long cpu = hostCpuFreeMap.get(hostId);
long memory = hostMemoryFreeMap.get(hostId);
if (hostId == destHost.getId()) {
postCpuList.add(cpu - vmCpu);
postMemoryList.add(memory - vmRam);
} else if (hostId.equals(vm.getHostId())) {
postCpuList.add(cpu + vmCpu);
postMemoryList.add(memory + vmRam);
} else {
postCpuList.add(cpu);
postMemoryList.add(memory);
List<Double> list = new ArrayList<>();
for (Long hostId : hostMetricsMap.keySet()) {
list.add(getMetricValuePostMigration(destHost.getClusterId(), hostMetricsMap.get(hostId), vmMetric, hostId, destHost.getId(), vm.getHostId()));
}
}
return new Pair<>(getClusterImbalance(postCpuList), getClusterImbalance(postMemoryList));
return getImbalance(list);
}
/**
* The cluster imbalance is defined as the percentage deviation from the mean
* for a configured metric of the cluster. The standard deviation is used as a
* mathematical tool to normalize the metric data for all the resource and the
* percentage deviation provides an easy tool to compare a clusters current
* state against the defined imbalance threshold. Because this is essentially a
* percentage, the value is a number between 0.0 and 1.0.
* Cluster Imbalance, Ic = σc / mavg , where σc is the standard deviation and
* mavg is the mean metric value for the cluster.
*/
default Double getClusterImbalance(List<Long> metricList) {
private Pair<Long, Map<Long, Ternary<Long, Long, Long>>> getHostMetricsMapAndType(Long clusterId,
ServiceOffering serviceOffering, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap) throws ConfigurationException {
String metric = getClusterDrsMetric(clusterId);
Pair<Long, Map<Long, Ternary<Long, Long, Long>>> pair;
switch (metric) {
case "cpu":
pair = new Pair<>((long) serviceOffering.getCpu() * serviceOffering.getSpeed(), hostCpuMap);
break;
case "memory":
pair = new Pair<>(serviceOffering.getRamSize() * 1024L * 1024L, hostMemoryMap);
break;
default:
throw new ConfigurationException(
String.format("Invalid metric: %s for cluster: %d", metric, clusterId));
}
return pair;
}
private Double getMetricValuePostMigration(Long clusterId, Ternary<Long, Long, Long> metrics, long vmMetric,
long hostId, long destHostId, long vmHostId) {
long used = metrics.first();
long actualTotal = metrics.third() - metrics.second();
long free = actualTotal - metrics.first();
if (hostId == destHostId) {
used += vmMetric;
free -= vmMetric;
} else if (hostId == vmHostId) {
used -= vmMetric;
free += vmMetric;
}
return getMetricValue(clusterId, used, free, actualTotal, null);
}
private static Double getImbalance(List<Double> metricList) {
Double clusterMeanMetric = getClusterMeanMetric(metricList);
Double clusterStandardDeviation = getClusterStandardDeviation(metricList, clusterMeanMetric);
return clusterStandardDeviation / clusterMeanMetric;
}
static String getClusterDrsMetric(long clusterId) {
return ClusterDrsMetric.valueIn(clusterId);
}
static Double getMetricValue(long clusterId, long used, long free, long total, Float skipThreshold) {
boolean useRatio = getDrsMetricUseRatio(clusterId);
switch (getDrsMetricType(clusterId)) {
case "free":
if (skipThreshold != null && free < skipThreshold * total) return null;
if (useRatio) {
return (double) free / total;
} else {
return (double) free;
}
case "used":
if (skipThreshold != null && used > skipThreshold * total) return null;
if (useRatio) {
return (double) used / total;
} else {
return (double) used;
}
}
return null;
}
/**
* Mean is the average of a collection or set of metrics. In context of a DRS
* cluster, the cluster metrics defined as the average metrics value for some
@ -142,7 +187,7 @@ public interface ClusterDrsAlgorithm extends Adapter {
* Cluster Mean Metric, mavg = (mi) / N, where mi is a measurable metric for a
* resource i in a cluster with total N number of resources.
*/
default Double getClusterMeanMetric(List<Long> metricList) {
static Double getClusterMeanMetric(List<Double> metricList) {
return new Mean().evaluate(metricList.stream().mapToDouble(i -> i).toArray());
}
@ -157,11 +202,62 @@ public interface ClusterDrsAlgorithm extends Adapter {
* mean metric value and mi is a measurable metric for some resource i in the
* cluster with total N number of resources.
*/
default Double getClusterStandardDeviation(List<Long> metricList, Double mean) {
static Double getClusterStandardDeviation(List<Double> metricList, Double mean) {
if (mean != null) {
return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray(), mean);
} else {
return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray());
}
}
static boolean getDrsMetricUseRatio(long clusterId) {
return ClusterDrsMetricUseRatio.valueIn(clusterId);
}
static String getDrsMetricType(long clusterId) {
return ClusterDrsMetricType.valueIn(clusterId);
}
/**
* The cluster imbalance is defined as the percentage deviation from the mean
* for a configured metric of the cluster. The standard deviation is used as a
* mathematical tool to normalize the metric data for all the resource and the
* percentage deviation provides an easy tool to compare a clusters current
* state against the defined imbalance threshold. Because this is essentially a
* percentage, the value is a number between 0.0 and 1.0.
* Cluster Imbalance, Ic = σc / mavg , where σc is the standard deviation and
* mavg is the mean metric value for the cluster.
*/
static Double getClusterImbalance(Long clusterId, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList, Float skipThreshold) throws ConfigurationException {
String metric = getClusterDrsMetric(clusterId);
List<Double> list;
switch (metric) {
case "cpu":
list = getMetricList(clusterId, cpuList, skipThreshold);
break;
case "memory":
list = getMetricList(clusterId, memoryList, skipThreshold);
break;
default:
throw new ConfigurationException(
String.format("Invalid metric: %s for cluster: %d", metric, clusterId));
}
return getImbalance(list);
}
static List<Double> getMetricList(Long clusterId, List<Ternary<Long, Long, Long>> hostMetricsList,
Float skipThreshold) {
List<Double> list = new ArrayList<>();
for (Ternary<Long, Long, Long> ternary : hostMetricsList) {
long used = ternary.first();
long actualTotal = ternary.third() - ternary.second();
long free = actualTotal - ternary.first();
Double metricValue = getMetricValue(clusterId, used, free, actualTotal, skipThreshold);
if (metricValue != null) {
list.add(metricValue);
}
}
return list;
}
}

View File

@ -66,6 +66,29 @@ public interface ClusterDrsService extends Manager, Configurable, Scheduler {
true, ConfigKey.Scope.Cluster, null, "DRS metric", null, null, null, ConfigKey.Kind.Select,
"memory,cpu");
ConfigKey<String> ClusterDrsMetricType = new ConfigKey<>(String.class, "drs.metric.type", ConfigKey.CATEGORY_ADVANCED,
"used",
"The metric type used to measure imbalance in a cluster. This can completely change the imbalance value. Possible values are free, used.",
true, ConfigKey.Scope.Cluster, null, "DRS metric type", null, null, null, ConfigKey.Kind.Select,
"free,used");
ConfigKey<Boolean> ClusterDrsMetricUseRatio = new ConfigKey<>(Boolean.class, "drs.metric.use.ratio", ConfigKey.CATEGORY_ADVANCED,
"true",
"Whether to use ratio of selected metric & total. Useful when the cluster has hosts with different capacities",
true, ConfigKey.Scope.Cluster, null, "DRS metric use ratio", null, null, null, ConfigKey.Kind.Select,
"true,false");
ConfigKey<Float> ClusterDrsImbalanceSkipThreshold = new ConfigKey<>(Float.class,
"drs.imbalance.condensed.skip.threshold", ConfigKey.CATEGORY_ADVANCED, "0.95",
"Threshold to ignore the metric for a host while calculating the imbalance to decide " +
"whether DRS is required for a cluster.This is to avoid cases when the calculated imbalance" +
" gets skewed due to a single host having a very high/low metric value resulting in imbalance" +
" being higher than 1. If " + ClusterDrsMetricType.key() + " is 'free', set a lower value and if it is 'used' " +
"set a higher value. The value should be between 0.0 and 1.0",
true, ConfigKey.Scope.Cluster, null, "DRS imbalance skip threshold for Condensed algorithm",
null, null, null);
/**
* Generate a DRS plan for a cluster and save it as per the parameters
*

View File

@ -52,7 +52,7 @@ public class StorageTest {
}
@Test
public void supportsOverprovisioningStoragePool() {
public void supportsOverProvisioningTestAllStoragePoolTypes() {
Assert.assertTrue(StoragePoolType.Filesystem.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.NetworkFilesystem.supportsOverProvisioning());
Assert.assertFalse(StoragePoolType.IscsiLUN.supportsOverProvisioning());
@ -63,7 +63,7 @@ public class StorageTest {
Assert.assertFalse(StoragePoolType.CLVM.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.RBD.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.PowerFlex.supportsOverProvisioning());
Assert.assertFalse(StoragePoolType.SharedMountPoint.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.SharedMountPoint.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.VMFS.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.PreSetup.supportsOverProvisioning());
Assert.assertTrue(StoragePoolType.EXT.supportsOverProvisioning());

View File

@ -0,0 +1,97 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.cluster;
import com.cloud.utils.Ternary;
import junit.framework.TestCase;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.List;
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyFloat;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class ClusterDrsAlgorithmTest extends TestCase {
@Test
public void testGetMetricValue() {
List<Ternary<Boolean, String, Double>> testData = List.of(
new Ternary<>(true, "free", 0.4),
new Ternary<>(false, "free", 40.0),
new Ternary<>(true, "used", 0.3),
new Ternary<>(false, "used", 30.0)
);
long used = 30;
long free = 40;
long total = 100;
for (Ternary<Boolean, String, Double> data : testData) {
boolean useRatio = data.first();
String metricType = data.second();
double expectedValue = data.third();
try (MockedStatic<ClusterDrsAlgorithm> ignored = Mockito.mockStatic(ClusterDrsAlgorithm.class)) {
when(ClusterDrsAlgorithm.getDrsMetricUseRatio(1L)).thenReturn(useRatio);
when(ClusterDrsAlgorithm.getDrsMetricType(1L)).thenReturn(metricType);
when(ClusterDrsAlgorithm.getMetricValue(anyLong(), anyLong(), anyLong(), anyLong(), any())).thenCallRealMethod();
assertEquals(expectedValue, getMetricValue(1, used, free, total, null));
}
}
}
@Test
public void testGetMetricValueWithSkipThreshold() {
List<Ternary<Boolean, String, Double>> testData = List.of(
new Ternary<>(true, "free", 0.15),
new Ternary<>(false, "free", 15.0),
new Ternary<>(true, "used", null),
new Ternary<>(false, "used", null)
);
long used = 80;
long free = 15;
long total = 100;
for (Ternary<Boolean, String, Double> data : testData) {
boolean useRatio = data.first();
String metricType = data.second();
Double expectedValue = data.third();
float skipThreshold = metricType.equals("free") ? 0.1f : 0.7f;
try (MockedStatic<ClusterDrsAlgorithm> ignored = Mockito.mockStatic(ClusterDrsAlgorithm.class)) {
when(ClusterDrsAlgorithm.getDrsMetricUseRatio(1L)).thenReturn(useRatio);
when(ClusterDrsAlgorithm.getDrsMetricType(1L)).thenReturn(metricType);
when(ClusterDrsAlgorithm.getMetricValue(anyLong(), anyLong(), anyLong(), anyLong(), anyFloat())).thenCallRealMethod();
assertEquals(expectedValue, ClusterDrsAlgorithm.getMetricValue(1L, used, free, total, skipThreshold));
}
}
}
}

View File

@ -35,4 +35,5 @@
/usr/bin/cloudstack-migrate-databases
/usr/bin/cloudstack-setup-encryption
/usr/bin/cloudstack-sysvmadm
/usr/bin/cmk
/usr/share/cloudstack-management/*

4
debian/rules vendored
View File

@ -84,6 +84,10 @@ override_dh_auto_install:
cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/
rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/md5sum.txt
# Bundle cmk in cloudstack-management
wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/6.3.0/cmk.linux.x86-64 -O $(DESTDIR)/usr/bin/cmk
chmod +x $(DESTDIR)/usr/bin/cmk
# nast hack for a couple of configuration files
mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/
mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-sudoers $(DESTDIR)/$(SYSCONFDIR)/sudoers.d/$(PACKAGE)

View File

@ -277,6 +277,9 @@ install -D client/target/utilities/bin/cloud-setup-management ${RPM_BUILD_ROOT}%
install -D client/target/utilities/bin/cloud-setup-baremetal ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-baremetal
install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm
install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses
# Bundle cmk in cloudstack-management
wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/6.3.0/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk
chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk
cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup
@ -609,6 +612,7 @@ pip3 install --upgrade urllib3
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-sshkey
%attr(0755,root,root) %{_bindir}/%{name}-sysvmadm
%attr(0755,root,root) %{_bindir}/%{name}-setup-encryption
%attr(0755,root,root) %{_bindir}/cmk
%{_datadir}/%{name}-management/setup/*.sql
%{_datadir}/%{name}-management/setup/*.sh
%{_datadir}/%{name}-management/setup/server-setup.xml

View File

@ -259,6 +259,9 @@ install -D client/target/utilities/bin/cloud-setup-management ${RPM_BUILD_ROOT}%
install -D client/target/utilities/bin/cloud-setup-baremetal ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-baremetal
install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm
install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses
# Bundle cmk in cloudstack-management
wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/6.3.0/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk
chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk
cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup
@ -588,6 +591,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-sshkey
%attr(0755,root,root) %{_bindir}/%{name}-sysvmadm
%attr(0755,root,root) %{_bindir}/%{name}-setup-encryption
%attr(0755,root,root) %{_bindir}/cmk
%{_datadir}/%{name}-management/setup/*.sql
%{_datadir}/%{name}-management/setup/*.sh
%{_datadir}/%{name}-management/setup/server-setup.xml

View File

@ -21,10 +21,10 @@ package org.apache.cloudstack.cluster;
import com.cloud.host.Host;
import com.cloud.offering.ServiceOffering;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.VirtualMachine;
import org.apache.log4j.Logger;
import javax.naming.ConfigurationException;
import java.util.ArrayList;
@ -32,68 +32,56 @@ import java.util.List;
import java.util.Map;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric;
public class Balanced extends AdapterBase implements ClusterDrsAlgorithm {
private static final Logger logger = Logger.getLogger(Balanced.class);
@Override
public boolean needsDrs(long clusterId, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException {
double threshold = getThreshold(clusterId);
Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null);
String drsMetric = ClusterDrsAlgorithm.getClusterDrsMetric(clusterId);
String metricType = ClusterDrsAlgorithm.getDrsMetricType(clusterId);
Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId);
if (imbalance > threshold) {
logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s",
clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio));
return true;
} else {
logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s",
clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio));
return false;
}
}
private double getThreshold(long clusterId) {
return 1.0 - ClusterDrsImbalanceThreshold.valueIn(clusterId);
}
@Override
public String getName() {
return "balanced";
}
@Override
public boolean needsDrs(long clusterId, List<Long> cpuList, List<Long> memoryList) throws ConfigurationException {
Double cpuImbalance = getClusterImbalance(cpuList);
Double memoryImbalance = getClusterImbalance(memoryList);
double threshold = getThreshold(clusterId);
String metric = ClusterDrsMetric.valueIn(clusterId);
switch (metric) {
case "cpu":
return cpuImbalance > threshold;
case "memory":
return memoryImbalance > threshold;
default:
throw new ConfigurationException(
String.format("Invalid metric: %s for cluster: %d", metric, clusterId));
}
}
private double getThreshold(long clusterId) throws ConfigurationException {
return 1.0 - ClusterDrsImbalanceThreshold.valueIn(clusterId);
}
@Override
public Ternary<Double, Double, Double> getMetrics(long clusterId, VirtualMachine vm,
ServiceOffering serviceOffering, Host destHost,
Map<Long, Long> hostCpuUsedMap, Map<Long, Long> hostMemoryUsedMap,
Boolean requiresStorageMotion) {
Double preCpuImbalance = getClusterImbalance(new ArrayList<>(hostCpuUsedMap.values()));
Double preMemoryImbalance = getClusterImbalance(new ArrayList<>(hostMemoryUsedMap.values()));
Map<Long, Ternary<Long, Long, Long>> hostCpuMap, Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
Boolean requiresStorageMotion) throws ConfigurationException {
Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null);
Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap);
Pair<Double, Double> imbalancePair = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuUsedMap,
hostMemoryUsedMap);
Double postCpuImbalance = imbalancePair.first();
Double postMemoryImbalance = imbalancePair.second();
logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s",
clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid()));
// This needs more research to determine the cost and benefit of a migration
// TODO: Cost should be a factor of the VM size and the host capacity
// TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host
double cost = 0.0;
double benefit = 1.0;
String metric = ClusterDrsMetric.valueIn(clusterId);
final double improvement;
switch (metric) {
case "cpu":
improvement = preCpuImbalance - postCpuImbalance;
break;
case "memory":
improvement = preMemoryImbalance - postMemoryImbalance;
break;
default:
improvement = preCpuImbalance + preMemoryImbalance - postCpuImbalance - postMemoryImbalance;
}
final double improvement = preImbalance - postImbalance;
final double cost = 0.0;
final double benefit = 1.0;
return new Ternary<>(improvement, cost, benefit);
}
}

View File

@ -21,7 +21,6 @@ package org.apache.cloudstack.cluster;
import com.cloud.host.Host;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.utils.Ternary;
import com.cloud.vm.VirtualMachine;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -30,13 +29,13 @@ import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.mockito.junit.MockitoJUnitRunner;
import javax.naming.ConfigurationException;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@ -66,14 +65,7 @@ public class BalancedTest {
Map<Long, List<VirtualMachine>> hostVmMap;
List<Long> cpuList, memoryList;
Map<Long, Long> hostCpuFreeMap, hostMemoryFreeMap;
@Mock
private ServiceOfferingDao serviceOfferingDao;
Map<Long, Ternary<Long, Long, Long>> hostCpuFreeMap, hostMemoryFreeMap;
private AutoCloseable closeable;
@ -98,20 +90,17 @@ public class BalancedTest {
Mockito.when(serviceOffering.getCpu()).thenReturn(1);
Mockito.when(serviceOffering.getSpeed()).thenReturn(1000);
Mockito.when(serviceOffering.getRamSize()).thenReturn(512);
Mockito.when(serviceOffering.getRamSize()).thenReturn(1024);
overrideDefaultConfigValue(ClusterDrsImbalanceThreshold, "_defaultValue", "0.5");
cpuList = Arrays.asList(1L, 2L);
memoryList = Arrays.asList(512L, 2048L);
hostCpuFreeMap = new HashMap<>();
hostCpuFreeMap.put(1L, 2000L);
hostCpuFreeMap.put(2L, 1000L);
hostCpuFreeMap.put(1L, new Ternary<>(1000L, 0L, 10000L));
hostCpuFreeMap.put(2L, new Ternary<>(2000L, 0L, 10000L));
hostMemoryFreeMap = new HashMap<>();
hostMemoryFreeMap.put(1L, 2048L * 1024L * 1024L);
hostMemoryFreeMap.put(2L, 512L * 1024L * 1024L);
hostMemoryFreeMap.put(1L, new Ternary<>(512L * 1024L * 1024L, 0L, 8192L * 1024L * 1024L));
hostMemoryFreeMap.put(2L, new Ternary<>(2048L * 1024L * 1024L, 0L, 8192L * 1024L * 1024L));
}
private void overrideDefaultConfigValue(final ConfigKey configKey, final String name,
@ -144,7 +133,7 @@ public class BalancedTest {
@Test
public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
assertFalse(balanced.needsDrs(clusterId, cpuList, memoryList));
assertFalse(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/*
@ -154,14 +143,14 @@ public class BalancedTest {
@Test
public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
assertTrue(balanced.needsDrs(clusterId, cpuList, memoryList));
assertTrue(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/* 3. cluster with "unknown" metric */
@Test
public void needsDrsWithUnknown() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown");
assertThrows(ConfigurationException.class, () -> balanced.needsDrs(clusterId, cpuList, memoryList));
assertThrows(ConfigurationException.class, () -> balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/**
@ -188,7 +177,7 @@ public class BalancedTest {
improvement = 0.3333 - 0.3333 = 0.0
*/
@Test
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException {
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
@ -202,7 +191,7 @@ public class BalancedTest {
improvement = 0.6 - 0.2 = 0.4
*/
@Test
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException {
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
@ -210,18 +199,4 @@ public class BalancedTest {
assertEquals(0, result.second(), 0.0);
assertEquals(1, result.third(), 0.0);
}
/*
3. cluster with default metric
improvement = 0.3333 + 0.6 - 0.3333 - 0.2 = 0.4
*/
@Test
public void getMetricsWithDefault() throws NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "both");
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
assertEquals(0.4, result.first(), 0.01);
assertEquals(0, result.second(), 0.0);
assertEquals(1, result.third(), 0.0);
}
}

View File

@ -21,78 +21,71 @@ package org.apache.cloudstack.cluster;
import com.cloud.host.Host;
import com.cloud.offering.ServiceOffering;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.VirtualMachine;
import org.apache.log4j.Logger;
import javax.naming.ConfigurationException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceSkipThreshold;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold;
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric;
public class Condensed extends AdapterBase implements ClusterDrsAlgorithm {
private static final Logger logger = Logger.getLogger(Condensed.class);
@Override
public boolean needsDrs(long clusterId, List<Ternary<Long, Long, Long>> cpuList,
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException {
double threshold = getThreshold(clusterId);
Float skipThreshold = ClusterDrsImbalanceSkipThreshold.valueIn(clusterId);
Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, skipThreshold);
String drsMetric = ClusterDrsAlgorithm.getClusterDrsMetric(clusterId);
String metricType = ClusterDrsAlgorithm.getDrsMetricType(clusterId);
Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId);
if (imbalance < threshold) {
logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s",
clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold));
return true;
} else {
logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s",
clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold));
return false;
}
}
private double getThreshold(long clusterId) {
return ClusterDrsImbalanceThreshold.valueIn(clusterId);
}
@Override
public String getName() {
return "condensed";
}
@Override
public boolean needsDrs(long clusterId, List<Long> cpuList, List<Long> memoryList) throws ConfigurationException {
Double cpuImbalance = getClusterImbalance(cpuList);
Double memoryImbalance = getClusterImbalance(memoryList);
double threshold = getThreshold(clusterId);
String metric = ClusterDrsMetric.valueIn(clusterId);
switch (metric) {
case "cpu":
return cpuImbalance < threshold;
case "memory":
return memoryImbalance < threshold;
default:
throw new ConfigurationException(
String.format("Invalid metric: %s for cluster: %d", metric, clusterId));
}
}
private double getThreshold(long clusterId) throws ConfigurationException {
return ClusterDrsImbalanceThreshold.valueIn(clusterId);
}
@Override
public Ternary<Double, Double, Double> getMetrics(long clusterId, VirtualMachine vm,
ServiceOffering serviceOffering, Host destHost,
Map<Long, Long> hostCpuUsedMap, Map<Long, Long> hostMemoryUsedMap,
Boolean requiresStorageMotion) {
Double preCpuImbalance = getClusterImbalance(new ArrayList<>(hostCpuUsedMap.values()));
Double preMemoryImbalance = getClusterImbalance(new ArrayList<>(hostMemoryUsedMap.values()));
Map<Long, Ternary<Long, Long, Long>> hostCpuMap, Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
Boolean requiresStorageMotion) throws ConfigurationException {
Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()),
new ArrayList<>(hostMemoryMap.values()), null);
Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap);
Pair<Double, Double> imbalancePair = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuUsedMap,
hostMemoryUsedMap);
Double postCpuImbalance = imbalancePair.first();
Double postMemoryImbalance = imbalancePair.second();
logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s",
clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid()));
// This needs more research to determine the cost and benefit of a migration
// TODO: Cost should be a factor of the VM size and the host capacity
// TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host
double cost = 0;
double benefit = 1;
String metric = ClusterDrsMetric.valueIn(clusterId);
double improvement;
switch (metric) {
case "cpu":
improvement = postCpuImbalance - preCpuImbalance;
break;
case "memory":
improvement = postMemoryImbalance - preMemoryImbalance;
break;
default:
improvement = postCpuImbalance + postMemoryImbalance - preCpuImbalance - preMemoryImbalance;
}
final double improvement = postImbalance - preImbalance;
final double cost = 0;
final double benefit = 1;
return new Ternary<>(improvement, cost, benefit);
}
}

View File

@ -35,6 +35,7 @@ import org.mockito.junit.MockitoJUnitRunner;
import javax.naming.ConfigurationException;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@ -64,9 +65,7 @@ public class CondensedTest {
Map<Long, List<VirtualMachine>> hostVmMap;
List<Long> cpuList, memoryList;
Map<Long, Long> hostCpuFreeMap, hostMemoryFreeMap;
Map<Long, Ternary<Long, Long, Long>> hostCpuFreeMap, hostMemoryFreeMap;
private AutoCloseable closeable;
@ -95,16 +94,13 @@ public class CondensedTest {
overrideDefaultConfigValue(ClusterDrsImbalanceThreshold, "_defaultValue", "0.5");
cpuList = Arrays.asList(1L, 2L);
memoryList = Arrays.asList(512L, 2048L);
hostCpuFreeMap = new HashMap<>();
hostCpuFreeMap.put(1L, 2000L);
hostCpuFreeMap.put(2L, 1000L);
hostCpuFreeMap.put(1L, new Ternary<>(1000L, 0L, 10000L));
hostCpuFreeMap.put(2L, new Ternary<>(2000L, 0L, 10000L));
hostMemoryFreeMap = new HashMap<>();
hostMemoryFreeMap.put(1L, 2048L * 1024L * 1024L);
hostMemoryFreeMap.put(2L, 512L * 1024L * 1024L);
hostMemoryFreeMap.put(1L, new Ternary<>(512L * 1024L * 1024L, 0L, 8192L * 1024L * 1024L));
hostMemoryFreeMap.put(2L, new Ternary<>(2048L * 1024L * 1024L, 0L, 8192L * 1024L * 1024L));
}
private void overrideDefaultConfigValue(final ConfigKey configKey,
@ -138,7 +134,7 @@ public class CondensedTest {
@Test
public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
assertTrue(condensed.needsDrs(clusterId, cpuList, memoryList));
assertTrue(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/*
@ -148,14 +144,14 @@ public class CondensedTest {
@Test
public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
assertFalse(condensed.needsDrs(clusterId, cpuList, memoryList));
assertFalse(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/* 3. cluster with "unknown" metric */
@Test
public void needsDrsWithUnknown() throws ConfigurationException, NoSuchFieldException, IllegalAccessException {
public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown");
assertThrows(ConfigurationException.class, () -> condensed.needsDrs(clusterId, cpuList, memoryList));
assertThrows(ConfigurationException.class, () -> condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values())));
}
/**
@ -182,7 +178,7 @@ public class CondensedTest {
improvement = 0.3333 - 0.3333 = 0.0
*/
@Test
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException {
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
@ -196,7 +192,7 @@ public class CondensedTest {
improvement = 0.2 - 0.6 = -0.4
*/
@Test
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException {
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
@ -204,18 +200,4 @@ public class CondensedTest {
assertEquals(0, result.second(), 0.0);
assertEquals(1, result.third(), 0.0);
}
/*
3. cluster with default metric
improvement = 0.3333 + 0.2 - 0.3333 - 0.6 = -0.4
*/
@Test
public void getMetricsWithDefault() throws NoSuchFieldException, IllegalAccessException {
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "both");
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
hostCpuFreeMap, hostMemoryFreeMap, false);
assertEquals(-0.4, result.first(), 0.0001);
assertEquals(0, result.second(), 0.0);
assertEquals(1, result.third(), 0.0);
}
}

View File

@ -2201,7 +2201,7 @@ public class LibvirtVMDef {
public static class WatchDogDef {
enum WatchDogModel {
I6300ESB("i6300esb"), IB700("ib700"), DIAG288("diag288");
I6300ESB("i6300esb"), IB700("ib700"), DIAG288("diag288"), ITCO("itco");
String model;
WatchDogModel(String model) {
@ -2215,7 +2215,7 @@ public class LibvirtVMDef {
}
enum WatchDogAction {
RESET("reset"), SHUTDOWN("shutdown"), POWEROFF("poweroff"), PAUSE("pause"), NONE("none"), DUMP("dump");
RESET("reset"), SHUTDOWN("shutdown"), POWEROFF("poweroff"), PAUSE("pause"), NONE("none"), DUMP("dump"), INJECT_NMI("inject-nmi");
String action;
WatchDogAction(String action) {

View File

@ -29,7 +29,9 @@ import java.util.Map;
import java.util.Properties;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.utils.Ternary;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.ListClustersMetricsCmd;
import org.apache.cloudstack.api.ListDbMetricsCmd;
@ -55,6 +57,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.cluster.ClusterDrsAlgorithm;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.management.ManagementServerHost.State;
import org.apache.cloudstack.response.ClusterMetricsResponse;
@ -760,10 +763,13 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
final Long clusterId = cluster.getId();
// CPU and memory capacities
final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, null, clusterId);
final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, null, clusterId);
final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity(Capacity.CAPACITY_TYPE_CPU, null, clusterId);
final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity(Capacity.CAPACITY_TYPE_MEMORY, null, clusterId);
final HostMetrics hostMetrics = new HostMetrics(cpuCapacity, memoryCapacity);
List<Ternary<Long, Long, Long>> cpuList = new ArrayList<>();
List<Ternary<Long, Long, Long>> memoryList = new ArrayList<>();
for (final Host host: hostDao.findByClusterId(clusterId)) {
if (host == null || host.getType() != Host.Type.Routing) {
continue;
@ -772,7 +778,18 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
hostMetrics.incrUpResources();
}
hostMetrics.incrTotalResources();
updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId()));
HostJoinVO hostJoin = hostJoinDao.findById(host.getId());
updateHostMetrics(hostMetrics, hostJoin);
cpuList.add(new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed()));
memoryList.add(new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory()));
}
try {
Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null);
metricsResponse.setDrsImbalance(imbalance.isNaN() ? null : 100.0 * imbalance);
} catch (ConfigurationException e) {
LOGGER.warn("Failed to get cluster imbalance for cluster " + clusterId, e);
}
metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState());

View File

@ -94,6 +94,10 @@ public class ClusterMetricsResponse extends ClusterResponse implements HostMetri
@Param(description = "memory allocated disable threshold exceeded")
private Boolean memoryAllocatedDisableThresholdExceeded;
@SerializedName("drsimbalance")
@Param(description = "DRS imbalance for the cluster")
private String drsImbalance;
public void setState(final String allocationState, final String managedState) {
this.state = allocationState;
if (managedState.equals("Unmanaged")) {
@ -208,4 +212,12 @@ public class ClusterMetricsResponse extends ClusterResponse implements HostMetri
this.memoryAllocatedDisableThresholdExceeded = (1.0 * memAllocated / memTotal) > threshold;
}
}
public void setDrsImbalance(Double drsImbalance) {
if (drsImbalance != null) {
this.drsImbalance = String.format("%.2f%%", drsImbalance);
} else {
this.drsImbalance = null;
}
}
}

View File

@ -16,18 +16,16 @@
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.StringJoiner;
import javax.annotation.Nonnull;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
@ -36,8 +34,6 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.libvirt.LibvirtException;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import com.linbit.linstor.api.ApiClient;
import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.Configuration;
@ -48,7 +44,6 @@ import com.linbit.linstor.api.model.Properties;
import com.linbit.linstor.api.model.ProviderKind;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceMakeAvailable;
import com.linbit.linstor.api.model.ResourceWithVolumes;
@ -75,28 +70,6 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
return LinstorUtil.RSC_PREFIX + name;
}
private String getHostname() {
// either there is already some function for that in the agent or a better way.
ProcessBuilder pb = new ProcessBuilder("/usr/bin/hostname");
try
{
String result;
Process p = pb.start();
final BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
StringJoiner sj = new StringJoiner(System.getProperty("line.separator"));
reader.lines().iterator().forEachRemaining(sj::add);
result = sj.toString();
p.waitFor();
p.destroy();
return result.trim();
} catch (IOException | InterruptedException exc) {
Thread.currentThread().interrupt();
throw new CloudRuntimeException("Unable to run '/usr/bin/hostname' command.");
}
}
private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
if (answer.isError()) {
logger.error(answer.getMessage());
@ -127,7 +100,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
}
public LinstorStorageAdaptor() {
localNodeName = getHostname();
localNodeName = LinstorStoragePool.getHostname();
}
@Override
@ -515,25 +488,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
DevelopersApi linstorApi = getLinstorAPI(pool);
final String rscGroupName = pool.getResourceGroup();
try {
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
Collections.singletonList(rscGroupName),
null,
null,
null);
if (rscGrps.isEmpty()) {
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
List<StoragePool> storagePools = linstorApi.viewStoragePools(
Collections.emptyList(),
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
null,
null,
null
);
List<StoragePool> storagePools = LinstorUtil.getRscGroupStoragePools(linstorApi, rscGroupName);
final long free = storagePools.stream()
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
@ -551,25 +506,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
DevelopersApi linstorApi = getLinstorAPI(pool);
final String rscGroupName = pool.getResourceGroup();
try {
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
Collections.singletonList(rscGroupName),
null,
null,
null);
if (rscGrps.isEmpty()) {
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
List<StoragePool> storagePools = linstorApi.viewStoragePools(
Collections.emptyList(),
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
null,
null,
null
);
List<StoragePool> storagePools = LinstorUtil.getRscGroupStoragePools(linstorApi, rscGroupName);
final long used = storagePools.stream()
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)

View File

@ -19,20 +19,33 @@ package com.cloud.hypervisor.kvm.storage;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.joda.time.Duration;
import com.cloud.agent.api.to.HostTO;
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonIOException;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonSyntaxException;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.log4j.Logger;
import org.joda.time.Duration;
public class LinstorStoragePool implements KVMStoragePool {
private static final Logger s_logger = Logger.getLogger(LinstorStoragePool.class);
private final String _uuid;
private final String _sourceHost;
private final int _sourcePort;
private final Storage.StoragePoolType _storagePoolType;
private final StorageAdaptor _storageAdaptor;
private final String _resourceGroup;
private final String localNodeName;
public LinstorStoragePool(String uuid, String host, int port, String resourceGroup,
Storage.StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
@ -42,6 +55,7 @@ public class LinstorStoragePool implements KVMStoragePool {
_storagePoolType = storagePoolType;
_storageAdaptor = storageAdaptor;
_resourceGroup = resourceGroup;
localNodeName = getHostname();
}
@Override
@ -200,32 +214,132 @@ public class LinstorStoragePool implements KVMStoragePool {
@Override
public boolean isPoolSupportHA() {
return false;
return true;
}
@Override
public String getHearthBeatPath() {
return null;
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
return Script.findScript(kvmScriptsDir, "kvmspheartbeat.sh");
}
@Override
public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp,
public String createHeartBeatCommand(HAStoragePool pool, String hostPrivateIp,
boolean hostValidation) {
return null;
s_logger.trace(String.format("Linstor.createHeartBeatCommand: %s, %s, %b", pool.getPoolIp(), hostPrivateIp, hostValidation));
boolean isStorageNodeUp = checkingHeartBeat(pool, null);
if (!isStorageNodeUp && !hostValidation) {
//restart the host
s_logger.debug(String.format("The host [%s] will be restarted because the health check failed for the storage pool [%s]", hostPrivateIp, pool.getPool().getType()));
Script cmd = new Script(pool.getPool().getHearthBeatPath(), Duration.millis(HeartBeatUpdateTimeout), s_logger);
cmd.add("-c");
cmd.execute();
return "Down";
}
return isStorageNodeUp ? null : "Down";
}
@Override
public String getStorageNodeId() {
// only called by storpool
return null;
}
static String getHostname() {
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
Script sc = new Script("hostname", Duration.millis(10000L), s_logger);
String res = sc.execute(parser);
if (res != null) {
throw new CloudRuntimeException(String.format("Unable to run 'hostname' command: %s", res));
}
String response = parser.getLines();
return response.trim();
}
@Override
public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) {
return null;
String hostName;
if (host == null) {
hostName = localNodeName;
} else {
hostName = host.getParent();
if (hostName == null) {
s_logger.error("No hostname set in host.getParent()");
return false;
}
}
return checkHostUpToDateAndConnected(hostName);
}
private String executeDrbdSetupStatus(OutputInterpreter.AllLinesParser parser) {
Script sc = new Script("drbdsetup", Duration.millis(HeartBeatUpdateTimeout), s_logger);
sc.add("status");
sc.add("--json");
return sc.execute(parser);
}
private boolean checkDrbdSetupStatusOutput(String output, String otherNodeName) {
JsonParser jsonParser = new JsonParser();
JsonArray jResources = (JsonArray) jsonParser.parse(output);
for (JsonElement jElem : jResources) {
JsonObject jRes = (JsonObject) jElem;
JsonArray jConnections = jRes.getAsJsonArray("connections");
for (JsonElement jConElem : jConnections) {
JsonObject jConn = (JsonObject) jConElem;
if (jConn.getAsJsonPrimitive("name").getAsString().equals(otherNodeName)
&& jConn.getAsJsonPrimitive("connection-state").getAsString().equalsIgnoreCase("Connected")) {
return true;
}
}
}
s_logger.warn(String.format("checkDrbdSetupStatusOutput: no resource connected to %s.", otherNodeName));
return false;
}
private String executeDrbdEventsNow(OutputInterpreter.AllLinesParser parser) {
Script sc = new Script("drbdsetup", Duration.millis(HeartBeatUpdateTimeout), s_logger);
sc.add("events2");
sc.add("--now");
return sc.execute(parser);
}
private boolean checkDrbdEventsNowOutput(String output) {
boolean healthy = output.lines().noneMatch(line -> line.matches(".*role:Primary .* promotion_score:0.*"));
if (!healthy) {
s_logger.warn("checkDrbdEventsNowOutput: primary resource with promotion score==0; HA false");
}
return healthy;
}
private boolean checkHostUpToDateAndConnected(String hostName) {
s_logger.trace(String.format("checkHostUpToDateAndConnected: %s/%s", localNodeName, hostName));
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
if (localNodeName.equalsIgnoreCase(hostName)) {
String res = executeDrbdEventsNow(parser);
if (res != null) {
return false;
}
return checkDrbdEventsNowOutput(parser.getLines());
} else {
// check drbd connections
String res = executeDrbdSetupStatus(parser);
if (res != null) {
return false;
}
try {
return checkDrbdSetupStatusOutput(parser.getLines(), hostName);
} catch (JsonIOException | JsonSyntaxException e) {
s_logger.error("Error parsing drbdsetup status --json", e);
}
}
return false;
}
@Override
public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) {
return null;
s_logger.trace(String.format("Linstor.vmActivityCheck: %s, %s", pool.getPoolIp(), host.getPrivateNetwork().getIp()));
return checkingHeartBeat(pool, host);
}
}

View File

@ -1241,7 +1241,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Override
public boolean isStorageSupportHA(StoragePoolType type) {
return false;
return true;
}
@Override

View File

@ -0,0 +1,32 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import com.cloud.exception.StorageConflictException;
import com.cloud.host.HostVO;
public class LinstorHostListener extends DefaultHostListener {
@Override
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
HostVO host = hostDao.findById(hostId);
if (host.getParent() == null) {
host.setParent(host.getName());
hostDao.update(host.getId(), host);
}
return super.hostConnect(hostId, poolId);
}
}

View File

@ -48,7 +48,7 @@ public class LinstorPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
public boolean configure(Map<String, Object> params) {
lifecycle = ComponentContext.inject(LinstorPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(LinstorPrimaryDataStoreDriverImpl.class);
listener = ComponentContext.inject(DefaultHostListener.class);
listener = ComponentContext.inject(LinstorHostListener.class);
return true;
}

View File

@ -137,10 +137,9 @@ public class LinstorUtil {
return path;
}
public static long getCapacityBytes(String linstorUrl, String rscGroupName) {
DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
try {
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
public static List<StoragePool> getRscGroupStoragePools(DevelopersApi api, String rscGroupName)
throws ApiException {
List<ResourceGroup> rscGrps = api.resourceGroupList(
Collections.singletonList(rscGroupName),
null,
null,
@ -152,13 +151,19 @@ public class LinstorUtil {
throw new CloudRuntimeException(errMsg);
}
List<StoragePool> storagePools = linstorApi.viewStoragePools(
return api.viewStoragePools(
Collections.emptyList(),
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
null,
null,
null
);
}
public static long getCapacityBytes(String linstorUrl, String rscGroupName) {
DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
try {
List<StoragePool> storagePools = getRscGroupStoragePools(linstorApi, rscGroupName);
return storagePools.stream()
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)

View File

@ -575,6 +575,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
weightBasedParametersForValidation.add(Config.VmUserDispersionWeight.key());
weightBasedParametersForValidation.add(CapacityManager.SecondaryStorageCapacityThreshold.key());
weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceThreshold.key());
weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceSkipThreshold.key());
}

View File

@ -27,6 +27,7 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.Answer;
@ -292,7 +293,23 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian
}
}
return super.finalizeVirtualMachineProfile(profile, dest, context);
super.finalizeVirtualMachineProfile(profile, dest, context);
appendSourceNatIpToBootArgs(profile);
return true;
}
private void appendSourceNatIpToBootArgs(final VirtualMachineProfile profile) {
final StringBuilder buf = profile.getBootArgsBuilder();
final DomainRouterVO router = _routerDao.findById(profile.getVirtualMachine().getId());
if (router != null && router.getVpcId() != null) {
List<IPAddressVO> vpcIps = _ipAddressDao.listByAssociatedVpc(router.getVpcId(), true);
if (CollectionUtils.isNotEmpty(vpcIps)) {
buf.append(String.format(" source_nat_ip=%s", vpcIps.get(0).getAddress().toString()));
if (s_logger.isDebugEnabled()) {
s_logger.debug("The final Boot Args for " + profile + ": " + buf);
}
}
}
}
@Override

View File

@ -768,20 +768,16 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
String[] hostPossibleValues = {host.getPrivateIpAddress(), host.getName()};
String[] datastoresPossibleValues = {datastore.getUuid(), datastore.getName()};
updateVmState(vm, VirtualMachine.Event.RestoringRequested, VirtualMachine.State.Restoring);
Pair<Boolean, String> result = restoreBackedUpVolume(backedUpVolumeUuid, backup, backupProvider, hostPossibleValues, datastoresPossibleValues);
if (BooleanUtils.isFalse(result.first())) {
updateVmState(vm, VirtualMachine.Event.RestoringFailed, VirtualMachine.State.Stopped);
throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].",
backedUpVolumeUuid, vm.getUuid(), host.getUuid(), backupProvider.getName(), result.second()));
}
if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), vmFromBackup.getBackupVolumeList(),
backedUpVolumeUuid, vm, datastore.getUuid(), backup)) {
updateVmState(vm, VirtualMachine.Event.RestoringFailed, VirtualMachine.State.Stopped);
throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s]." + backedUpVolumeUuid, vm.getUuid()));
}
updateVmState(vm, VirtualMachine.Event.RestoringSuccess, VirtualMachine.State.Stopped);
return true;
}

View File

@ -350,10 +350,10 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
List<HostJoinVO> hostJoinList = hostJoinDao.searchByIds(
hostList.stream().map(HostVO::getId).toArray(Long[]::new));
Map<Long, Long> hostCpuMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
hostJoin -> hostJoin.getCpus() * hostJoin.getSpeed() - hostJoin.getCpuReservedCapacity() - hostJoin.getCpuUsedCapacity()));
Map<Long, Long> hostMemoryMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
hostJoin -> hostJoin.getTotalMemory() - hostJoin.getMemUsedCapacity() - hostJoin.getMemReservedCapacity()));
Map<Long, Ternary<Long, Long, Long>> hostCpuMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
hostJoin -> new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed())));
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
hostJoin -> new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory())));
Map<Long, ServiceOffering> vmIdServiceOfferingMap = new HashMap<>();
@ -372,6 +372,8 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
logger.debug("VM migrating to it's original host or no host found for migration");
break;
}
logger.debug(String.format("Plan for VM %s to migrate from host %s to host %s", vm.getUuid(),
hostMap.get(vm.getHostId()).getUuid(), destHost.getUuid()));
ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId());
migrationPlan.add(new Ternary<>(vm, hostMap.get(vm.getHostId()), hostMap.get(destHost.getId())));
@ -384,10 +386,11 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
long vmCpu = (long) serviceOffering.getCpu() * serviceOffering.getSpeed();
long vmMemory = serviceOffering.getRamSize() * 1024L * 1024L;
hostCpuMap.put(vm.getHostId(), hostCpuMap.get(vm.getHostId()) + vmCpu);
hostCpuMap.put(destHost.getId(), hostCpuMap.get(destHost.getId()) - vmCpu);
hostMemoryMap.put(vm.getHostId(), hostMemoryMap.get(vm.getHostId()) + vmMemory);
hostMemoryMap.put(destHost.getId(), hostMemoryMap.get(destHost.getId()) - vmMemory);
// Updating the map as per the migration
hostCpuMap.get(vm.getHostId()).first(hostCpuMap.get(vm.getHostId()).first() - vmCpu);
hostCpuMap.get(destHost.getId()).first(hostCpuMap.get(destHost.getId()).first() + vmCpu);
hostMemoryMap.get(vm.getHostId()).first(hostMemoryMap.get(vm.getHostId()).first() - vmMemory);
hostMemoryMap.get(destHost.getId()).first(hostMemoryMap.get(destHost.getId()).first() + vmMemory);
vm.setHostId(destHost.getId());
iteration++;
}
@ -440,8 +443,8 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
Pair<VirtualMachine, Host> getBestMigration(Cluster cluster, ClusterDrsAlgorithm algorithm,
List<VirtualMachine> vmList,
Map<Long, ServiceOffering> vmIdServiceOfferingMap,
Map<Long, Long> hostCpuCapacityMap,
Map<Long, Long> hostMemoryCapacityMap) {
Map<Long, Ternary<Long, Long, Long>> hostCpuCapacityMap,
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap) throws ConfigurationException {
double improvement = 0;
Pair<VirtualMachine, Host> bestMigration = new Pair<>(null, null);
@ -624,8 +627,9 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[]{ClusterDrsPlanExpireInterval, ClusterDrsEnabled, ClusterDrsInterval,
ClusterDrsMaxMigrations, ClusterDrsAlgorithm, ClusterDrsImbalanceThreshold, ClusterDrsMetric};
return new ConfigKey<?>[]{ClusterDrsPlanExpireInterval, ClusterDrsEnabled, ClusterDrsInterval, ClusterDrsMaxMigrations,
ClusterDrsAlgorithm, ClusterDrsImbalanceThreshold, ClusterDrsMetric, ClusterDrsMetricType, ClusterDrsMetricUseRatio,
ClusterDrsImbalanceSkipThreshold};
}
@Override

View File

@ -180,14 +180,12 @@ public class ClusterDrsServiceImplTest {
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(512L);
HostJoinVO hostJoin2 = Mockito.mock(HostJoinVO.class);
Mockito.when(hostJoin2.getId()).thenReturn(2L);
Mockito.when(hostJoin2.getCpuUsedCapacity()).thenReturn(1000L);
Mockito.when(hostJoin2.getCpuReservedCapacity()).thenReturn(0L);
Mockito.when(hostJoin2.getMemUsedCapacity()).thenReturn(1024L);
Mockito.when(hostJoin2.getMemReservedCapacity()).thenReturn(512L);
List<VMInstanceVO> vmList = new ArrayList<>();
vmList.add(vm1);
@ -350,7 +348,7 @@ public class ClusterDrsServiceImplTest {
}
@Test
public void testGetBestMigration() {
public void testGetBestMigration() throws ConfigurationException {
ClusterVO cluster = Mockito.mock(ClusterVO.class);
Mockito.when(cluster.getId()).thenReturn(1L);

View File

@ -453,8 +453,8 @@ class CsIP:
["", "", "-A NETWORK_STATS_%s -o %s ! -i eth0 -p tcp" % (self.dev, self.dev)])
self.fw.append(
["", "", "-A NETWORK_STATS_%s -i %s ! -o eth0 -p tcp" % (self.dev, self.dev)])
self.fw.append(["nat", "",
"-A POSTROUTING -o %s -j SNAT --to-source %s" % (self.dev, self.cl.get_eth2_ip())])
self.fw.append(
["nat", "", "-A POSTROUTING -o %s -j SNAT --to-source %s" % (self.dev, self.cl.get_eth2_ip())])
self.fw.append(["mangle", "",
"-A PREROUTING -i %s -m state --state NEW " % self.dev +
"-j CONNMARK --set-xmark %s/0xffffffff" % self.dnum])
@ -695,6 +695,9 @@ class CsIP:
["filter", 3, "-A FORWARD -s %s ! -d %s -j ACCEPT" % (vpccidr, vpccidr)])
self.fw.append(
["nat", "", "-A POSTROUTING -j SNAT -o %s --to-source %s" % (self.dev, self.address['public_ip'])])
elif cmdline.get_source_nat_ip() and not self.is_private_gateway():
self.fw.append(
["nat", "", "-A POSTROUTING -j SNAT -o %s --to-source %s" % (self.dev, cmdline.get_source_nat_ip())])
def list(self):
self.iplist = {}

View File

@ -181,6 +181,12 @@ class CsCmdLine(CsDataBag):
return False
return "%s/%s" % (self.idata()[ipkey], self.idata()[prelenkey])
def get_source_nat_ip(self):
if "source_nat_ip" in self.idata():
return self.idata()['source_nat_ip']
return False
class CsGuestNetwork(CsDataBag):
""" Get guestnetwork config parameters """

View File

@ -783,6 +783,7 @@
"label.dpd": "Dead peer detection",
"label.driver": "Driver",
"label.drs": "DRS",
"label.drsimbalance": "DRS imbalance",
"label.drs.plan": "DRS Plan",
"label.drs.generate.plan": "Generate DRS plan",
"label.drs.no.plan.generated": "No DRS plan has been generated as the cluster is not imbalanced according to the threshold set",
@ -1299,7 +1300,7 @@
"label.memoryallocatedgb": "Memory allocated",
"label.memorylimit": "Memory limits (MiB)",
"label.memorymaxdeviation": "Deviation",
"label.memorytotal": "Memory allocated",
"label.memorytotal": "Memory total",
"label.memorytotalgb": "Memory total",
"label.memoryused": "Used memory",
"label.memoryusedgb": "Memory used",
@ -3057,7 +3058,8 @@
"message.remove.vpc": "Please confirm that you want to remove the VPC",
"message.request.failed": "Request failed.",
"message.required.add.least.ip": "Please add at least 1 IP Range",
"message.required.traffic.type": "Error in configuration! All required traffic types should be added and with multiple physical Networks each Network should have a label.",
"message.required.traffic.type": "All required traffic types should be added and with multiple physical networks each network should have a label.",
"message.required.tagged.physical.network": "There can only be one untagged physical network with guest traffic type.",
"message.reset.vpn.connection": "Please confirm that you want to reset VPN connection.",
"message.resize.volume.failed": "Failed to resize volume.",
"message.resize.volume.processing": "Volume resize is in progress",
@ -3093,7 +3095,7 @@
"message.set.default.nic": "Please confirm that you would like to make this NIC the default for this Instance.",
"message.set.default.nic.manual": "Please manually update the default NIC on the Instance now.",
"message.setting.updated": "Setting Updated:",
"message.setup.physical.network.during.zone.creation": "When adding a zone, you need to set up one or more physical Networks. Each Network corresponds to a NIC on the hypervisor. Each physical Network can carry one or more types of traffic, with certain restrictions on how they may be combined. Add or remove one or more traffic types onto each physical Network.",
"message.setup.physical.network.during.zone.creation": "When adding a zone, you need to set up one or more physical networks. Each physical network can carry one or more types of traffic, with certain restrictions on how they may be combined. Add or remove one or more traffic types onto each physical network.",
"message.setup.physical.network.during.zone.creation.basic": "When adding a basic zone, you can set up one physical Network, which corresponds to a NIC on the hypervisor. The Network carries several types of traffic.<br/><br/>You may also <strong>add</strong> other traffic types onto the physical Network.",
"message.shared.network.offering.warning": "Domain admins and regular Users can only create shared Networks from Network offering with the setting specifyvlan=false. Please contact an administrator to create a Network offering if this list is empty.",
"message.shutdown.triggered": "A shutdown has been triggered. CloudStack will not accept new jobs",

View File

@ -337,6 +337,9 @@
<template v-if="column.key === 'templateversion'">
<span> {{ record.version }} </span>
</template>
<template v-if="column.key === 'drsimbalance'">
<span> {{ record.drsimbalance }} </span>
</template>
<template v-if="column.key === 'softwareversion'">
<span> {{ record.softwareversion ? record.softwareversion : 'N/A' }} </span>
</template>

View File

@ -603,30 +603,35 @@ export default {
return types
},
fetchState () {
const state = []
if (this.apiName.indexOf('listVolumes') > -1) {
state.push({
if (this.apiName.includes('listVolumes')) {
return [
{
id: 'Allocated',
name: 'label.allocated'
})
state.push({
},
{
id: 'Ready',
name: 'label.isready'
})
state.push({
},
{
id: 'Destroy',
name: 'label.destroy'
})
state.push({
},
{
id: 'Expunging',
name: 'label.expunging'
})
state.push({
},
{
id: 'Expunged',
name: 'label.expunged'
})
},
{
id: 'Migrating',
name: 'label.migrating'
}
return state
]
}
return []
},
fetchEntityType () {
const entityType = []

View File

@ -26,7 +26,7 @@ export default {
permission: ['listClustersMetrics'],
columns: () => {
const fields = ['name', 'state', 'allocationstate', 'clustertype', 'hypervisortype', 'hosts']
const metricsFields = ['cpuused', 'cpumaxdeviation', 'cpuallocated', 'cputotal', 'memoryused', 'memorymaxdeviation', 'memoryallocated', 'memorytotal']
const metricsFields = ['cpuused', 'cpumaxdeviation', 'cpuallocated', 'cputotal', 'memoryused', 'memorymaxdeviation', 'memoryallocated', 'memorytotal', 'drsimbalance']
if (store.getters.metrics) {
fields.push(...metricsFields)
}
@ -34,7 +34,7 @@ export default {
fields.push('zonename')
return fields
},
details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'hypervisortype', 'podname', 'zonename'],
details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'hypervisortype', 'podname', 'zonename', 'drsimbalance'],
related: [{
name: 'host',
title: 'label.hosts',

View File

@ -57,7 +57,8 @@
:columns="migrationColumns"
:dataSource="record.migrations"
:rowKey="(record, index) => index"
:pagination="{hideOnSinglePage: true, showSizeChanger: true}">
:pagination="{hideOnSinglePage: true, showSizeChanger: true}"
@resizeColumn="resizeColumn">
<template #bodyCell="{ column, text, record }">
<template v-if="column.key === 'vm'">
<router-link :to="{ path: '/vm/' + record.virtualmachineid }">
@ -117,7 +118,8 @@
:columns="generatedPlanMigrationColumns"
:dataSource="generatedMigrations"
:rowKey="(record, index) => index"
:pagination="{ showTotal: (total, range) => [range[0], '-', range[1], $t('label.of'), total, $t('label.items')].join(' ') }" >
:pagination="{ showTotal: (total, range) => [range[0], '-', range[1], $t('label.of'), total, $t('label.items')].join(' ') }"
@resizeColumn="resizeColumn" >
<template #bodyCell="{ column, text, record }">
<template v-if="column.key === 'vm'">
<router-link :to="{ path: '/vm/' + record.virtualmachineid }">
@ -166,19 +168,22 @@ export default {
key: 'vm',
title: this.$t('label.vm'),
dataIndex: 'vm',
ellipsis: true
ellipsis: true,
resizable: true
},
{
key: 'sourcehost',
title: this.$t('label.sourcehost'),
dataIndex: 'sourcehost',
ellipsis: true
ellipsis: true,
resizable: true
},
{
key: 'destinationhost',
title: this.$t('label.desthost'),
dataIndex: 'created',
ellipsis: true
ellipsis: true,
resizable: true
}
]
return {
@ -291,6 +296,9 @@ export default {
closeModal () {
this.showModal = false
this.generatedMigrations = reactive([])
},
resizeColumn (w, col) {
col.width = w
}
}
}

View File

@ -466,6 +466,9 @@ export default {
if (physicalNetwork.isolationMethod) {
params.isolationmethods = physicalNetwork.isolationMethod
}
if (physicalNetwork.tags) {
params.tags = physicalNetwork.tags
}
try {
if (!this.stepData.stepMove.includes('createPhysicalNetwork' + index)) {

View File

@ -66,7 +66,6 @@
<a-select-option value="VSP"> VSP </a-select-option>
<a-select-option value="VCS"> VCS </a-select-option>
<a-select-option value="TF"> TF </a-select-option>
<template #suffixIcon>
<a-tooltip
v-if="tungstenNetworkIndex > -1 && tungstenNetworkIndex !== index"
@ -78,14 +77,18 @@
</template>
<template v-if="column.key === 'traffics'">
<div v-for="traffic in record.traffics" :key="traffic.type">
<a-tooltip :title="traffic.type.toUpperCase() + ' (' + traffic.label + ')'">
<a-tag
:color="trafficColors[traffic.type]"
style="margin:2px"
>
{{ traffic.type.toUpperCase() }}
{{ (traffic.type.toUpperCase() + ' (' + traffic.label + ')').slice(0, 20) }}
{{ (traffic.type.toUpperCase() + ' (' + traffic.label + ')').length > 20 ? '...' : '' }}
<edit-outlined class="traffic-type-action" @click="editTraffic(record.key, traffic, $event)"/>
<delete-outlined class="traffic-type-action" @click="deleteTraffic(record.key, traffic, $event)"/>
</a-tag>
</a-tooltip>
</div>
<div v-if="isShowAddTraffic(record.traffics, index)">
<div class="traffic-select-item" v-if="addingTrafficForKey === record.key">
@ -143,6 +146,13 @@
icon="delete-outlined"
@onClick="onDelete(record)" />
</template>
<template v-if="column.key === 'tags'">
<a-input
:disabled="tungstenNetworkIndex > -1 && tungstenNetworkIndex !== index"
:value="text"
@change="e => onCellChange(record.key, 'tags', e.target.value)"
/>
</template>
</template>
<template #footer v-if="isAdvancedZone">
<a-button
@ -177,9 +187,17 @@
centered
>
<div v-ctrl-enter="() => showError = false" >
<span>{{ $t('message.required.traffic.type') }}</span>
<a-list item-layout="horizontal" :dataSource="errorList">
<template #renderItem="{ item }">
<a-list-item>
<exclamation-circle-outlined
:style="{ color: $config.theme['@error-color'], fontSize: '20px', marginRight: '10px' }"
/>
{{ item }}
</a-list-item>
</template>
</a-list>
<div :span="24" class="action-button">
<a-button @click="showError = false">{{ $t('label.cancel') }}</a-button>
<a-button type="primary" ref="submit" @click="showError = false">{{ $t('label.ok') }}</a-button>
</div>
</div>
@ -290,6 +308,7 @@ export default {
addingTrafficForKey: '-1',
trafficLabelSelected: null,
showError: false,
errorList: [],
defaultTrafficOptions: [],
isChangeHyperv: false
}
@ -307,7 +326,7 @@ export default {
key: 'isolationMethod',
title: this.$t('label.isolation.method'),
dataIndex: 'isolationMethod',
width: 150
width: 125
})
columns.push({
key: 'traffics',
@ -315,6 +334,12 @@ export default {
dataIndex: 'traffics',
width: 250
})
columns.push({
title: this.$t('label.tags'),
key: 'tags',
dataIndex: 'tags',
width: 175
})
if (this.isAdvancedZone) {
columns.push({
key: 'actions',
@ -406,7 +431,7 @@ export default {
return { type: item, label: '' }
})
this.count = 1
this.physicalNetworks = [{ key: this.randomKeyTraffic(this.count), name: 'Physical Network 1', isolationMethod: 'VLAN', traffics: traffics }]
this.physicalNetworks = [{ key: this.randomKeyTraffic(this.count), name: 'Physical Network 1', isolationMethod: 'VLAN', traffics: traffics, tags: null }]
}
if (this.isAdvancedZone) {
this.availableTrafficToAdd.push('guest')
@ -447,28 +472,32 @@ export default {
key: this.randomKeyTraffic(count + 1),
name: `Physical Network ${count + 1}`,
isolationMethod: 'VLAN',
traffics: []
traffics: [],
tags: null
}
this.physicalNetworks = [...physicalNetworks, newData]
this.count = count + 1
this.hasUnusedPhysicalNetwork = this.getHasUnusedPhysicalNetwork()
},
isValidSetup () {
this.errorList = []
let physicalNetworks = this.physicalNetworks
if (this.tungstenNetworkIndex > -1) {
physicalNetworks = [this.physicalNetworks[this.tungstenNetworkIndex]]
}
const shouldHaveLabels = physicalNetworks.length > 1
let isValid = true
let countPhysicalNetworkWithoutTags = 0
this.requiredTrafficTypes.forEach(type => {
if (!isValid) return false
let foundType = false
physicalNetworks.forEach(net => {
net.traffics.forEach(traffic => {
if (!isValid) return false
if (traffic.type === type) {
foundType = true
}
if (traffic.type === 'guest' && type === 'guest' && (!net.tags || net.tags.length === 0)) {
countPhysicalNetworkWithoutTags++
}
if (this.hypervisor !== 'VMware') {
if (shouldHaveLabels && (!traffic.label || traffic.label.length === 0)) {
isValid = false
@ -482,8 +511,15 @@ export default {
})
if (!foundType || !isValid) {
isValid = false
if (this.errorList.indexOf(this.$t('message.required.traffic.type')) === -1) {
this.errorList.push(this.$t('message.required.traffic.type'))
}
}
})
if (countPhysicalNetworkWithoutTags > 1) {
this.errorList.push(this.$t('message.required.tagged.physical.network'))
isValid = false
}
return isValid
},
handleSubmit (e) {