Merge release branch 4.18 to main

* 4.18:
  Storage and volumes statistics tasks for StorPool primary storage (#7404)
  proper storage construction (#6797)
  guarantee MAC uniqueness (#7634)
  server: allow migration of all VMs with local storage on KVM (#7656)
  Add L2 networks to Zones with SG (#7719)
This commit is contained in:
Daan Hoogland 2023-07-19 10:59:19 +02:00
commit 6bb95c0200
27 changed files with 323 additions and 108 deletions

View File

@ -90,7 +90,7 @@ public interface NetworkModel {
INSTANCE_ID_FILE, VM_ID_FILE, PUBLIC_KEYS_FILE, CLOUD_IDENTIFIER_FILE, HYPERVISOR_HOST_NAME_FILE));
static final ConfigKey<Integer> MACIdentifier = new ConfigKey<>("Advanced",Integer.class, "mac.identifier", "0",
"This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is null which means this feature is disabled.Its scope is global.", true, ConfigKey.Scope.Global);
"This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is zero (0) which means that the DB id of the zone will be used.", true, ConfigKey.Scope.Zone);
static final ConfigKey<Boolean> AdminIsAllowedToDeployAnywhere = new ConfigKey<>("Advanced",Boolean.class, "admin.is.allowed.to.deploy.anywhere", "false",
"This will determine if the root admin is allowed to deploy in networks in subdomains.", true, ConfigKey.Scope.Global);
@ -114,6 +114,13 @@ public interface NetworkModel {
List<? extends Nic> getNics(long vmId);
/**
* Gets the next available MAC and checks it for global uniqueness in the nics table. It will keep looking until it finds a MAC address that is unique.
*
* @param networkConfigurationId the id of the network to use the nic in. used for finding the zone
* @return a string containing a MAC address
* @throws InsufficientAddressCapacityException if no MAC can be returned
*/
String getNextAvailableMacAddressInNetwork(long networkConfigurationId) throws InsufficientAddressCapacityException;
PublicIpAddress getPublicIpAddress(long ipAddressId);

View File

@ -323,10 +323,10 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd {
}
}
if (physicalNetworkId != null) {
if (offering.getGuestType() == GuestType.Shared) {
if ((offering.getGuestType() == GuestType.Shared) || (offering.getGuestType() == GuestType.L2)) {
return physicalNetworkId;
} else {
throw new InvalidParameterValueException("Physical network ID can be specified for networks of guest IP type " + GuestType.Shared + " only.");
throw new InvalidParameterValueException("Physical network ID can be specified for networks of guest IP type " + GuestType.Shared + " or " + GuestType.L2 + " only.");
}
} else {
if (zoneId == null) {

View File

@ -251,7 +251,23 @@ public class CreateNetworkCmdTest {
try {
cmd.getPhysicalNetworkId();
} catch (Exception e) {
Assert.assertTrue(e.getMessage().startsWith("Physical network ID can be specified for networks of guest IP type Shared only"));
Assert.assertTrue(e.getMessage().startsWith("Physical network ID can be specified for networks of guest IP type Shared or L2 only."));
}
}
@Test
public void testGetPhysicalNetworkIdForL2Net() {
Long physicalNetworkId = 1L;
Long networkOfferingId = 1L;
ReflectionTestUtils.setField(cmd, "networkOfferingId", networkOfferingId);
NetworkOffering networkOffering = Mockito.mock(NetworkOffering.class);
ReflectionTestUtils.setField(cmd, "physicalNetworkId", physicalNetworkId);
Mockito.when(_entityMgr.findById(NetworkOffering.class, networkOfferingId)).thenReturn(networkOffering);
Mockito.when(networkOffering.getGuestType()).thenReturn(Network.GuestType.L2);
try {
Assert.assertEquals(cmd.getPhysicalNetworkId(), physicalNetworkId);
} catch (Exception e) {
Assert.fail("Failed to get physical network id");
}
}

View File

@ -28,14 +28,6 @@ public interface EngineDataCenterDao extends GenericDao<EngineDataCenterVO, Long
StateDao<DataCenterResourceEntity.State, DataCenterResourceEntity.State.Event, DataCenterResourceEntity> {
EngineDataCenterVO findByName(String name);
/**
* @param id data center id
* @return a pair of mac address strings. The first one is private and second is public.
*/
String[] getNextAvailableMacAddressPair(long id);
String[] getNextAvailableMacAddressPair(long id, long mask);
List<EngineDataCenterVO> findZonesByDomainId(Long domainId);
List<EngineDataCenterVO> listPublicZones(String keyword);

View File

@ -23,7 +23,6 @@ import java.util.Random;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.TableGenerator;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -39,10 +38,8 @@ import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SequenceFetcher;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.UpdateBuilder;
import com.cloud.utils.net.NetUtils;
/**
* @config
@ -66,7 +63,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
protected long _prefix;
protected Random _rand = new Random(System.currentTimeMillis());
protected TableGenerator _tgMacAddress;
@Inject
protected DcDetailsDao _detailsDao;
@ -139,25 +135,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
return listBy(ssc);
}
@Override
public String[] getNextAvailableMacAddressPair(long id) {
return getNextAvailableMacAddressPair(id, 0);
}
@Override
public String[] getNextAvailableMacAddressPair(long id, long mask) {
SequenceFetcher fetch = SequenceFetcher.getInstance();
long seq = fetch.getNextSequence(Long.class, _tgMacAddress, id);
seq = seq | _prefix | ((id & 0x7f) << 32);
seq |= mask;
seq |= ((_rand.nextInt(Short.MAX_VALUE) << 16) & 0x00000000ffff0000l);
String[] pair = new String[2];
pair[0] = NetUtils.long2Mac(seq);
pair[1] = NetUtils.long2Mac(seq | 0x1l << 39);
return pair;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
if (!super.configure(name, params)) {
@ -204,9 +181,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
UUIDSearch = createSearchBuilder();
UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ);
UUIDSearch.done();
_tgMacAddress = _tgs.get("macAddress");
assert _tgMacAddress != null : "Couldn't get mac address table generator";
}
@Override

View File

@ -2665,8 +2665,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
// enabled zone
if (ntwkOff.getGuestType() != GuestType.Shared) {
throw new InvalidParameterValueException("Only shared guest network can be created in security group enabled zone");
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
}
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");

View File

@ -52,14 +52,6 @@ public interface DataCenterDao extends GenericDao<DataCenterVO, Long> {
DataCenterVO findByName(String name);
/**
* @param id data center id
* @return a pair of mac address strings. The first one is private and second is public.
*/
String[] getNextAvailableMacAddressPair(long id);
String[] getNextAvailableMacAddressPair(long id, long mask);
PrivateAllocationData allocatePrivateIpAddress(long id, long podId, long instanceId, String reservationId, boolean forSystemVms);
DataCenterIpAddressVO allocatePrivateIpAddress(long id, String reservationId);

View File

@ -24,7 +24,6 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.TableGenerator;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
@ -45,9 +44,7 @@ import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SequenceFetcher;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.net.NetUtils;
/**
* @config
@ -83,7 +80,7 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
protected long _prefix;
protected Random _rand = new Random(System.currentTimeMillis());
protected TableGenerator _tgMacAddress;
@Override
public DataCenterVO findByName(String name) {
@ -230,25 +227,6 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
return vo.getVlan();
}
@Override
public String[] getNextAvailableMacAddressPair(long id) {
return getNextAvailableMacAddressPair(id, 0);
}
@Override
public String[] getNextAvailableMacAddressPair(long id, long mask) {
SequenceFetcher fetch = SequenceFetcher.getInstance();
long seq = fetch.getNextSequence(Long.class, _tgMacAddress, id);
seq = seq | _prefix | ((id & 0x7f) << 32);
seq |= mask;
seq |= ((_rand.nextInt(Short.MAX_VALUE) << 16) & 0x00000000ffff0000l);
String[] pair = new String[2];
pair[0] = NetUtils.long2Mac(seq);
pair[1] = NetUtils.long2Mac(seq | 0x1l << 39);
return pair;
}
@Override
public PrivateAllocationData allocatePrivateIpAddress(long dcId, long podId, long instanceId, String reservationId, boolean forSystemVms) {
_ipAllocDao.releaseIpAddress(instanceId);
@ -348,9 +326,6 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
TokenSearch = createSearchBuilder();
TokenSearch.and("zoneToken", TokenSearch.entity().getZoneToken(), SearchCriteria.Op.EQ);
TokenSearch.done();
_tgMacAddress = _tgs.get("macAddress");
assert _tgMacAddress != null : "Couldn't get mac address table generator";
}
@Override

View File

@ -436,9 +436,6 @@ public class NetworkDaoImpl extends GenericDaoBase<NetworkVO, Long>implements Ne
if(zoneMacIdentifier != null && zoneMacIdentifier.intValue() != 0 ){
seq = seq | _prefix << 40 | (long)zoneMacIdentifier << 32 | networkConfigId << 16 & 0x00000000ffff0000l;
}
else {
seq = seq | _prefix << 40 | _rand.nextInt(Short.MAX_VALUE) << 16 & 0x00000000ffff0000l;
}
return NetUtils.long2Mac(seq);
}

View File

@ -87,7 +87,7 @@ public interface NicDao extends GenericDao<NicVO, Long> {
List<NicVO> listByVmIdAndKeyword(long instanceId, String keyword);
NicVO findByInstanceIdAndMacAddress(long instanceId, String macAddress);
NicVO findByMacAddress(String macAddress);
NicVO findByNetworkIdAndMacAddressIncludingRemoved(long networkId, String mac);

View File

@ -376,9 +376,8 @@ public class NicDaoImpl extends GenericDaoBase<NicVO, Long> implements NicDao {
}
@Override
public NicVO findByInstanceIdAndMacAddress(long instanceId, String macAddress) {
public NicVO findByMacAddress(String macAddress) {
SearchCriteria<NicVO> sc = AllFieldsSearch.create();
sc.setParameters("instance", instanceId);
sc.setParameters("macAddress", macAddress);
return findOneBy(sc);
}

View File

@ -59,7 +59,7 @@ public class SequenceFetcher {
}
public <T> T getNextSequence(Class<T> clazz, TableGenerator tg, Object key, boolean isRandom) {
Future<T> future = _executors.submit(new Fetcher<T>(clazz, tg, key, isRandom));
Future<T> future = _executors.submit(new Fetcher<>(clazz, tg, key, isRandom));
try {
return future.get();
} catch (Exception e) {
@ -69,7 +69,7 @@ public class SequenceFetcher {
}
protected SequenceFetcher() {
_executors = new ThreadPoolExecutor(100, 100, 120l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(250), new NamedThreadFactory("SequenceFetcher"));
_executors = new ThreadPoolExecutor(100, 100, 120l, TimeUnit.SECONDS, new LinkedBlockingQueue<>(250), new NamedThreadFactory("SequenceFetcher"));
}
protected static final SequenceFetcher s_instance = new SequenceFetcher();

View File

@ -50,7 +50,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
@Inject
private GuestOSDao _guestOsDao;
@Inject HypervManager _hypervMgr;
@Inject NetworkModel _networkMgr;
@Inject NetworkModel networkModel;
int MaxNicSupported = 8;
@Override
public final HypervisorType getHypervisorType() {
@ -120,7 +120,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
nicTo.setName(profile.getName());
try {
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(networkId);
String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId);
nicTo.setMac(mac);
} catch (InsufficientAddressCapacityException e) {
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
@ -136,7 +136,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
nicTo.setBroadcastUri(profile.getBroadCastUri());
nicTo.setIsolationuri(profile.getIsolationUri());
Integer networkRate = _networkMgr.getNetworkRate(network.getId(), null);
Integer networkRate = networkModel.getNetworkRate(network.getId(), null);
nicTo.setNetworkRateMbps(networkRate);
expandedNics[i] = nicTo;

View File

@ -113,7 +113,8 @@ public class KVMStoragePoolManager {
s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", info.storagePoolType().toString(), storageAdaptor.getName()));
} else {
try {
this._storageMapper.put(info.storagePoolType().toString(), storageAdaptor.newInstance());
s_logger.info(String.format("adding storage adaptor for %s", storageAdaptor.getName()));
this._storageMapper.put(info.storagePoolType().toString(), storageAdaptor.getDeclaredConstructor().newInstance());
} catch (Exception ex) {
throw new CloudRuntimeException(ex.toString());
}

View File

@ -72,7 +72,7 @@ class VmwareVmImplementer {
@Inject
NetworkDao networkDao;
@Inject
NetworkModel networkMgr;
NetworkModel networkModel;
@Inject
NicDao nicDao;
@Inject
@ -237,7 +237,7 @@ class VmwareVmImplementer {
nicTo.setNetmask("255.255.255.255");
try {
String mac = networkMgr.getNextAvailableMacAddressInNetwork(networkId);
String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId);
nicTo.setMac(mac);
} catch (InsufficientAddressCapacityException e) {
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
@ -253,7 +253,7 @@ class VmwareVmImplementer {
nicTo.setBroadcastUri(publicNicProfile.getBroadCastUri());
nicTo.setIsolationuri(publicNicProfile.getIsolationUri());
Integer networkRate = networkMgr.getNetworkRate(network.getId(), null);
Integer networkRate = networkModel.getNetworkRate(network.getId(), null);
nicTo.setNetworkRateMbps(networkRate);
expandedNics[i] = nicTo;
@ -296,7 +296,7 @@ class VmwareVmImplementer {
for (NicProfile nicProfile : nicProfiles) {
if (nicProfile.getTrafficType() == Networks.TrafficType.Guest) {
if (networkMgr.isProviderSupportServiceInNetwork(nicProfile.getNetworkId(), Network.Service.Firewall, Network.Provider.CiscoVnmc)) {
if (networkModel.isProviderSupportServiceInNetwork(nicProfile.getNetworkId(), Network.Service.Firewall, Network.Provider.CiscoVnmc)) {
details.put("ConfigureVServiceInNexus", Boolean.TRUE.toString());
}
break;

View File

@ -324,7 +324,7 @@ public class NetScalerVMManagerImpl extends ManagerBase implements NetScalerVMMa
defaultNic2.setIPv4Address("");
defaultNic2.setIPv4Gateway("");
defaultNic2.setIPv4Netmask("");
String macAddress = _networkDao.getNextAvailableMacAddress(defaultPublicNetwork.getId(), null);
String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(defaultPublicNetwork.getId());
defaultNic2.setMacAddress(macAddress);
networks.put(_networkMgr.setupNetwork(_accountMgr.getSystemAccount(), _networkOfferingDao.findByUniqueName(NetworkOffering.SystemPublicNetwork), plan, null, null, false).get(0),

View File

@ -82,6 +82,7 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
@ -97,6 +98,7 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.log4j.Logger;
import javax.inject.Inject;
@ -1047,18 +1049,54 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
}
public boolean canProvideStorageStats() {
return false;
return StorPoolConfigurationManager.StorageStatsInterval.value() > 0;
}
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
if (storagePool == null) {
return null;
}
Map<Long, Map<String, Pair<Long, Long>>> templatesStats = StorPoolStatsCollector.templatesStats;
if (MapUtils.isNotEmpty(templatesStats) && templatesStats.containsKey(storagePool.getDataCenterId())) {
Map<String, Pair<Long, Long>> storageStats = templatesStats.get(storagePool.getDataCenterId());
StoragePoolDetailVO templateName = storagePoolDetailsDao.findDetail(storagePool.getId(), StorPoolUtil.SP_TEMPLATE);
if (storageStats.containsKey(templateName.getValue()) && templateName != null) {
Pair<Long, Long> stats = storageStats.get(templateName.getValue());
if (stats.first() != storagePool.getCapacityBytes()) {
primaryStoreDao.updateCapacityBytes(storagePool.getId(), stats.first());
}
return storageStats.get(templateName.getValue());
}
}
return null;
}
public boolean canProvideVolumeStats() {
return false;
return StorPoolConfigurationManager.VolumesStatsInterval.value() > 0;
}
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
if (volumeId == null) {
return null;
}
Map<String, Pair<Long, Long>> volumesStats = StorPoolStatsCollector.volumesStats;
if (MapUtils.isNotEmpty(volumesStats)) {
Pair<Long, Long> volumeStats = volumesStats.get(StorPoolStorageAdaptor.getVolumeNameFromPath(volumeId, true));
if (volumeStats != null) {
return volumeStats;
}
} else {
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePool.getId());
for (VolumeVO volume : volumes) {
if (volume.getPath() != null && volume.getPath().equals(volumeId)) {
long size = volume.getSize();
StorPoolUtil.spLog("Volume [%s] doesn't have any statistics, returning its size [%s]", volumeId, size);
return new Pair<>(size, size);
}
}
}
return null;
}

View File

@ -0,0 +1,188 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.driver;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
public class StorPoolStatsCollector extends ManagerBase {
private static Logger log = Logger.getLogger(StorPoolStatsCollector.class);
@Inject
private PrimaryDataStoreDao storagePoolDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
private ConfigurationDao configurationDao;
private ScheduledExecutorService executor;
static volatile Map<String, Pair<Long, Long>> volumesStats = new ConcurrentHashMap<>();
static volatile Map<Long, Map<String, Pair<Long, Long>>> templatesStats = new ConcurrentHashMap<>();
enum StorPoolObject {
VOLUME, TEMPLATE;
}
@Override
public boolean start() {
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
if (CollectionUtils.isNotEmpty(spPools)) {
executor = Executors.newScheduledThreadPool(2,new NamedThreadFactory("StorPoolStatsCollector"));
long storageStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("storage.stats.interval"), 60000L);
long volumeStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("volume.stats.interval"), 60000L);
if (StorPoolConfigurationManager.VolumesStatsInterval.value() > 0 && volumeStatsInterval > 0) {
executor.scheduleAtFixedRate(new StorPoolVolumeStatsMonitorTask(),120, StorPoolConfigurationManager.VolumesStatsInterval.value(), TimeUnit.SECONDS);
}
if (StorPoolConfigurationManager.StorageStatsInterval.value() > 0 && storageStatsInterval > 0) {
executor.scheduleAtFixedRate(new StorPoolStorageStatsMonitorTask(), 120, StorPoolConfigurationManager.StorageStatsInterval.value(), TimeUnit.SECONDS);
}
}
return true;
}
class StorPoolVolumeStatsMonitorTask implements Runnable {
@Override
public void run() {
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
if (CollectionUtils.isNotEmpty(spPools)) {
volumesStats.clear();
log.debug("Collecting StorPool volumes used space");
Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
for (StoragePoolVO storagePoolVO : spPools) {
onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO);
}
for (StoragePoolVO storagePool : onePoolforZone.values()) {
try {
log.debug(String.format("Collecting volumes statistics for zone [%s]", storagePool.getDataCenterId()));
JsonArray arr = StorPoolUtil.volumesSpace(StorPoolUtil.getSpConnection(storagePool.getUuid(),
storagePool.getId(), storagePoolDetailsDao, storagePoolDao));
volumesStats.putAll(getClusterVolumeOrTemplateSpace(arr, StorPoolObject.VOLUME));
} catch (Exception e) {
log.debug(String.format("Could not collect StorPool volumes statistics due to %s", e.getMessage()));
}
}
}
}
}
class StorPoolStorageStatsMonitorTask implements Runnable {
@Override
public void run() {
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
if (CollectionUtils.isNotEmpty(spPools)) {
templatesStats.clear();
Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
for (StoragePoolVO storagePoolVO : spPools) {
onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO);
}
for (StoragePoolVO storagePool : onePoolforZone.values()) {
try {
log.debug(String.format("Collecting templates statistics for zone [%s]", storagePool.getDataCenterId()));
JsonArray arr = StorPoolUtil.templatesStats(StorPoolUtil.getSpConnection(storagePool.getUuid(),
storagePool.getId(), storagePoolDetailsDao, storagePoolDao));
templatesStats.put(storagePool.getDataCenterId(), getClusterVolumeOrTemplateSpace(arr, StorPoolObject.TEMPLATE));
} catch (Exception e) {
log.debug(String.format("Could not collect StorPool templates statistics %s", e.getMessage()));
}
}
}
}
}
private Map<String, Pair<Long, Long>> getClusterVolumeOrTemplateSpace(JsonArray arr, StorPoolObject spObject) {
Map<String, Pair<Long, Long>> map = new HashMap<>();
for (JsonElement jsonElement : arr) {
JsonObject name = jsonElement.getAsJsonObject().getAsJsonObject("response");
if (name != null) {
JsonArray data = name.getAsJsonObject().getAsJsonArray("data");
if (StorPoolObject.VOLUME == spObject) {
map.putAll(getStatsForVolumes(data));
} else if (StorPoolObject.TEMPLATE == spObject) {
getClusterStats(data, map);
}
} else if (StorPoolObject.TEMPLATE == spObject) {
return map;
}
}
return map;
}
private Map<String, Pair<Long, Long>> getStatsForVolumes(JsonArray arr) {
Map<String, Pair<Long, Long>> map = new HashMap<>();
for (int i = 0; i < arr.size(); i++) {
String name = arr.get(i).getAsJsonObject().get("name").getAsString();
if (!name.startsWith("*") && !name.contains("@")) {
Long spaceUsed = arr.get(i).getAsJsonObject().get("spaceUsed").getAsLong();
Long size = arr.get(i).getAsJsonObject().get("size").getAsLong();
map.put(name, new Pair<>(spaceUsed, size));
}
}
return map;
}
private void getClusterStats(JsonArray data, Map<String, Pair<Long, Long>> map) {
for (JsonElement dat : data) {
long capacity = dat.getAsJsonObject().get("stored").getAsJsonObject().get("capacity").getAsLong();
long free = dat.getAsJsonObject().get("stored").getAsJsonObject().get("free").getAsLong();
long used = capacity - free;
String templateName = dat.getAsJsonObject().get("name").getAsString();
if (!map.containsKey(templateName)) {
map.put(templateName, new Pair<>(capacity, used));
} else {
Pair<Long, Long> template = map.get(templateName);
template.first(template.first() + capacity);
template.second(template.second() + used);
map.put(templateName, template);
}
}
}
}

View File

@ -411,6 +411,18 @@ public class StorPoolUtil {
return data;
}
public static JsonArray volumesSpace(SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/AllClusters/VolumesSpace", conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
return obj.getAsJsonObject("data").getAsJsonArray("clusters");
}
public static JsonArray templatesStats(SpConnectionDesc conn) {
SpApiResponse resp = GET("MultiCluster/AllClusters/VolumeTemplatesStatus", conn);
JsonObject obj = resp.fullJson.getAsJsonObject();
return obj.getAsJsonObject("data").getAsJsonArray("clusters");
}
private static boolean objectExists(SpApiError err) {
if (!err.getName().equals("objectDoesNotExist")) {
throw new CloudRuntimeException(err.getDescr());

View File

@ -34,6 +34,16 @@ public class StorPoolConfigurationManager implements Configurable {
public static final ConfigKey<String> AlternativeEndpoint = new ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
"Used for StorPool primary storage for an alternative endpoint. Structure of the endpoint is - SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, ConfigKey.Scope.StoragePool, null);
public static final ConfigKey<Integer> VolumesStatsInterval = new ConfigKey<>("Advanced", Integer.class,
"storpool.volumes.stats.interval", "3600",
"The interval in seconds to get StorPool volumes statistics",
false);
public static final ConfigKey<Integer> StorageStatsInterval = new ConfigKey<>("Advanced", Integer.class,
"storpool.storage.stats.interval", "3600",
"The interval in seconds to get StorPool template statistics",
false);
@Override
public String getConfigComponentName() {
return StorPoolConfigurationManager.class.getSimpleName();
@ -41,6 +51,6 @@ public class StorPoolConfigurationManager implements Configurable {
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint };
return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval };
}
}

View File

@ -35,4 +35,7 @@
<bean id="cleanupTags"
class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" />
<bean id="statistics"
class="org.apache.cloudstack.storage.datastore.driver.StorPoolStatsCollector" />
</beans>

View File

@ -80,7 +80,6 @@ import com.cloud.network.dao.IPAddressVO;
import com.cloud.network.dao.NetworkAccountDao;
import com.cloud.network.dao.NetworkAccountVO;
import com.cloud.network.dao.NetworkDao;
import com.cloud.network.dao.NetworkDetailsDao;
import com.cloud.network.dao.NetworkDomainDao;
import com.cloud.network.dao.NetworkDomainVO;
import com.cloud.network.dao.NetworkServiceMapDao;
@ -172,8 +171,6 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
@Inject
NetworkDao _networksDao = null;
@Inject
NetworkDetailsDao networkDetailsDao;
@Inject
NicDao _nicDao = null;
@Inject
PodVlanMapDao _podVlanMapDao;
@ -593,13 +590,24 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
@Override
public String getNextAvailableMacAddressInNetwork(long networkId) throws InsufficientAddressCapacityException {
NetworkVO network = _networksDao.findById(networkId);
String mac = _networksDao.getNextAvailableMacAddress(networkId, MACIdentifier.value());
Integer zoneIdentifier = MACIdentifier.value();
if (zoneIdentifier.intValue() == 0) {
zoneIdentifier = Long.valueOf(network.getDataCenterId()).intValue();
}
String mac;
do {
mac = _networksDao.getNextAvailableMacAddress(networkId, zoneIdentifier);
if (mac == null) {
throw new InsufficientAddressCapacityException("Unable to create another mac address", Network.class, networkId);
}
} while(! isMACUnique(mac));
return mac;
}
private boolean isMACUnique(String mac) {
return (_nicDao.findByMacAddress(mac) == null);
}
@Override
@DB
public Network getNetwork(long id) {

View File

@ -59,7 +59,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
@Inject
ConfigurationDao _configDao;
@Inject
NetworkModel _networkMgr;
NetworkModel networkModel;
String _cidr;
String _gateway;
@ -114,7 +114,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
if (vm.getHypervisorType() == HypervisorType.VMware && !isRouterVm(vm)) {
NicProfile nicProf = new NicProfile(Nic.ReservationStrategy.Create, null, null, null, null);
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(config.getId());
String mac = networkModel.getNextAvailableMacAddressInNetwork(config.getId());
nicProf.setMacAddress(mac);
return nicProf;
}
@ -140,7 +140,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
if (((hType == HypervisorType.VMware) || (hType == HypervisorType.Hyperv)) && isRouterVm(vm)) {
super.reserve(nic, config, vm, dest, context);
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(config.getId());
String mac = networkModel.getNextAvailableMacAddressInNetwork(config.getId());
nic.setMacAddress(mac);
return;
}

View File

@ -1417,7 +1417,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
// Check if the vm can be migrated with storage.
boolean canMigrateWithStorage = false;
if (VirtualMachine.Type.User.equals(vm.getType()) || HypervisorType.VMware.equals(vm.getHypervisorType())) {
List<HypervisorType> hypervisorTypes = Arrays.asList(new HypervisorType[]{HypervisorType.VMware, HypervisorType.KVM});
if (VirtualMachine.Type.User.equals(vm.getType()) || hypervisorTypes.contains(vm.getHypervisorType())) {
canMigrateWithStorage = _hypervisorCapabilitiesDao.isStorageMotionSupported(srcHost.getHypervisorType(), srcHostVersion);
}

View File

@ -238,6 +238,7 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
import com.cloud.hypervisor.kvm.dpdk.DpdkHelper;
import com.cloud.network.IpAddressManager;
import com.cloud.network.Network;
import com.cloud.network.Network.GuestType;
import com.cloud.network.Network.IpAddresses;
import com.cloud.network.Network.Provider;
import com.cloud.network.Network.Service;
@ -3594,13 +3595,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
for (Long networkId : networkIdList) {
NetworkVO network = _networkDao.findById(networkId);
NetworkOffering ntwkOffering = _networkOfferingDao.findById(network.getNetworkOfferingId());
if (network == null) {
throw new InvalidParameterValueException("Unable to find network by id " + networkId);
}
if (!_networkModel.isSecurityGroupSupportedInNetwork(network)) {
throw new InvalidParameterValueException("Network is not security group enabled: " + network.getId());
if (!_networkModel.isSecurityGroupSupportedInNetwork(network) && (ntwkOffering.getGuestType() != GuestType.L2)) {
throw new InvalidParameterValueException("Network is not security group enabled or not L2 network: " + network.getId());
}
_accountMgr.checkAccess(owner, AccessType.UseEntry, false, network);

View File

@ -315,7 +315,7 @@ export default {
api('listZones', params).then(json => {
for (const i in json.listzonesresponse.zone) {
const zone = json.listzonesresponse.zone[i]
if (zone.networktype === 'Advanced' && zone.securitygroupsenabled !== true) {
if (zone.networktype === 'Advanced') {
this.zones.push(zone)
}
}

View File

@ -26,7 +26,7 @@
@refresh-data="refreshParent"
@refresh="handleRefresh"/>
</a-tab-pane>
<a-tab-pane :tab="$t('label.l2')" key="3" v-if="isAdvancedZoneWithoutSGAvailable">
<a-tab-pane :tab="$t('label.l2')" key="3">
<CreateL2NetworkForm
:loading="loading"
:resource="resource"