mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge release branch 4.18 to main
* 4.18: Storage and volumes statistics tasks for StorPool primary storage (#7404) proper storage construction (#6797) guarantee MAC uniqueness (#7634) server: allow migration of all VMs with local storage on KVM (#7656) Add L2 networks to Zones with SG (#7719)
This commit is contained in:
commit
6bb95c0200
@ -90,7 +90,7 @@ public interface NetworkModel {
|
|||||||
INSTANCE_ID_FILE, VM_ID_FILE, PUBLIC_KEYS_FILE, CLOUD_IDENTIFIER_FILE, HYPERVISOR_HOST_NAME_FILE));
|
INSTANCE_ID_FILE, VM_ID_FILE, PUBLIC_KEYS_FILE, CLOUD_IDENTIFIER_FILE, HYPERVISOR_HOST_NAME_FILE));
|
||||||
|
|
||||||
static final ConfigKey<Integer> MACIdentifier = new ConfigKey<>("Advanced",Integer.class, "mac.identifier", "0",
|
static final ConfigKey<Integer> MACIdentifier = new ConfigKey<>("Advanced",Integer.class, "mac.identifier", "0",
|
||||||
"This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is null which means this feature is disabled.Its scope is global.", true, ConfigKey.Scope.Global);
|
"This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is zero (0) which means that the DB id of the zone will be used.", true, ConfigKey.Scope.Zone);
|
||||||
|
|
||||||
static final ConfigKey<Boolean> AdminIsAllowedToDeployAnywhere = new ConfigKey<>("Advanced",Boolean.class, "admin.is.allowed.to.deploy.anywhere", "false",
|
static final ConfigKey<Boolean> AdminIsAllowedToDeployAnywhere = new ConfigKey<>("Advanced",Boolean.class, "admin.is.allowed.to.deploy.anywhere", "false",
|
||||||
"This will determine if the root admin is allowed to deploy in networks in subdomains.", true, ConfigKey.Scope.Global);
|
"This will determine if the root admin is allowed to deploy in networks in subdomains.", true, ConfigKey.Scope.Global);
|
||||||
@ -114,6 +114,13 @@ public interface NetworkModel {
|
|||||||
|
|
||||||
List<? extends Nic> getNics(long vmId);
|
List<? extends Nic> getNics(long vmId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the next available MAC and checks it for global uniqueness in the nics table. It will keep looking until it finds a MAC address that is unique.
|
||||||
|
*
|
||||||
|
* @param networkConfigurationId the id of the network to use the nic in. used for finding the zone
|
||||||
|
* @return a string containing a MAC address
|
||||||
|
* @throws InsufficientAddressCapacityException if no MAC can be returned
|
||||||
|
*/
|
||||||
String getNextAvailableMacAddressInNetwork(long networkConfigurationId) throws InsufficientAddressCapacityException;
|
String getNextAvailableMacAddressInNetwork(long networkConfigurationId) throws InsufficientAddressCapacityException;
|
||||||
|
|
||||||
PublicIpAddress getPublicIpAddress(long ipAddressId);
|
PublicIpAddress getPublicIpAddress(long ipAddressId);
|
||||||
|
|||||||
@ -323,10 +323,10 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (physicalNetworkId != null) {
|
if (physicalNetworkId != null) {
|
||||||
if (offering.getGuestType() == GuestType.Shared) {
|
if ((offering.getGuestType() == GuestType.Shared) || (offering.getGuestType() == GuestType.L2)) {
|
||||||
return physicalNetworkId;
|
return physicalNetworkId;
|
||||||
} else {
|
} else {
|
||||||
throw new InvalidParameterValueException("Physical network ID can be specified for networks of guest IP type " + GuestType.Shared + " only.");
|
throw new InvalidParameterValueException("Physical network ID can be specified for networks of guest IP type " + GuestType.Shared + " or " + GuestType.L2 + " only.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (zoneId == null) {
|
if (zoneId == null) {
|
||||||
|
|||||||
@ -251,7 +251,23 @@ public class CreateNetworkCmdTest {
|
|||||||
try {
|
try {
|
||||||
cmd.getPhysicalNetworkId();
|
cmd.getPhysicalNetworkId();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
Assert.assertTrue(e.getMessage().startsWith("Physical network ID can be specified for networks of guest IP type Shared only"));
|
Assert.assertTrue(e.getMessage().startsWith("Physical network ID can be specified for networks of guest IP type Shared or L2 only."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetPhysicalNetworkIdForL2Net() {
|
||||||
|
Long physicalNetworkId = 1L;
|
||||||
|
Long networkOfferingId = 1L;
|
||||||
|
ReflectionTestUtils.setField(cmd, "networkOfferingId", networkOfferingId);
|
||||||
|
NetworkOffering networkOffering = Mockito.mock(NetworkOffering.class);
|
||||||
|
ReflectionTestUtils.setField(cmd, "physicalNetworkId", physicalNetworkId);
|
||||||
|
Mockito.when(_entityMgr.findById(NetworkOffering.class, networkOfferingId)).thenReturn(networkOffering);
|
||||||
|
Mockito.when(networkOffering.getGuestType()).thenReturn(Network.GuestType.L2);
|
||||||
|
try {
|
||||||
|
Assert.assertEquals(cmd.getPhysicalNetworkId(), physicalNetworkId);
|
||||||
|
} catch (Exception e) {
|
||||||
|
Assert.fail("Failed to get physical network id");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -28,14 +28,6 @@ public interface EngineDataCenterDao extends GenericDao<EngineDataCenterVO, Long
|
|||||||
StateDao<DataCenterResourceEntity.State, DataCenterResourceEntity.State.Event, DataCenterResourceEntity> {
|
StateDao<DataCenterResourceEntity.State, DataCenterResourceEntity.State.Event, DataCenterResourceEntity> {
|
||||||
EngineDataCenterVO findByName(String name);
|
EngineDataCenterVO findByName(String name);
|
||||||
|
|
||||||
/**
|
|
||||||
* @param id data center id
|
|
||||||
* @return a pair of mac address strings. The first one is private and second is public.
|
|
||||||
*/
|
|
||||||
String[] getNextAvailableMacAddressPair(long id);
|
|
||||||
|
|
||||||
String[] getNextAvailableMacAddressPair(long id, long mask);
|
|
||||||
|
|
||||||
List<EngineDataCenterVO> findZonesByDomainId(Long domainId);
|
List<EngineDataCenterVO> findZonesByDomainId(Long domainId);
|
||||||
|
|
||||||
List<EngineDataCenterVO> listPublicZones(String keyword);
|
List<EngineDataCenterVO> listPublicZones(String keyword);
|
||||||
|
|||||||
@ -23,7 +23,6 @@ import java.util.Random;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
import javax.persistence.TableGenerator;
|
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
@ -39,10 +38,8 @@ import com.cloud.utils.db.DB;
|
|||||||
import com.cloud.utils.db.GenericDaoBase;
|
import com.cloud.utils.db.GenericDaoBase;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
import com.cloud.utils.db.SequenceFetcher;
|
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
import com.cloud.utils.db.UpdateBuilder;
|
import com.cloud.utils.db.UpdateBuilder;
|
||||||
import com.cloud.utils.net.NetUtils;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @config
|
* @config
|
||||||
@ -66,7 +63,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
|
|||||||
|
|
||||||
protected long _prefix;
|
protected long _prefix;
|
||||||
protected Random _rand = new Random(System.currentTimeMillis());
|
protected Random _rand = new Random(System.currentTimeMillis());
|
||||||
protected TableGenerator _tgMacAddress;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
protected DcDetailsDao _detailsDao;
|
protected DcDetailsDao _detailsDao;
|
||||||
@ -139,25 +135,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
|
|||||||
return listBy(ssc);
|
return listBy(ssc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String[] getNextAvailableMacAddressPair(long id) {
|
|
||||||
return getNextAvailableMacAddressPair(id, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String[] getNextAvailableMacAddressPair(long id, long mask) {
|
|
||||||
SequenceFetcher fetch = SequenceFetcher.getInstance();
|
|
||||||
|
|
||||||
long seq = fetch.getNextSequence(Long.class, _tgMacAddress, id);
|
|
||||||
seq = seq | _prefix | ((id & 0x7f) << 32);
|
|
||||||
seq |= mask;
|
|
||||||
seq |= ((_rand.nextInt(Short.MAX_VALUE) << 16) & 0x00000000ffff0000l);
|
|
||||||
String[] pair = new String[2];
|
|
||||||
pair[0] = NetUtils.long2Mac(seq);
|
|
||||||
pair[1] = NetUtils.long2Mac(seq | 0x1l << 39);
|
|
||||||
return pair;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
if (!super.configure(name, params)) {
|
if (!super.configure(name, params)) {
|
||||||
@ -204,9 +181,6 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase<EngineDataCenterVO,
|
|||||||
UUIDSearch = createSearchBuilder();
|
UUIDSearch = createSearchBuilder();
|
||||||
UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ);
|
UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ);
|
||||||
UUIDSearch.done();
|
UUIDSearch.done();
|
||||||
|
|
||||||
_tgMacAddress = _tgs.get("macAddress");
|
|
||||||
assert _tgMacAddress != null : "Couldn't get mac address table generator";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -2665,8 +2665,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
}
|
}
|
||||||
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
||||||
// enabled zone
|
// enabled zone
|
||||||
if (ntwkOff.getGuestType() != GuestType.Shared) {
|
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
|
||||||
throw new InvalidParameterValueException("Only shared guest network can be created in security group enabled zone");
|
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
|
||||||
}
|
}
|
||||||
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
||||||
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
||||||
|
|||||||
@ -52,14 +52,6 @@ public interface DataCenterDao extends GenericDao<DataCenterVO, Long> {
|
|||||||
|
|
||||||
DataCenterVO findByName(String name);
|
DataCenterVO findByName(String name);
|
||||||
|
|
||||||
/**
|
|
||||||
* @param id data center id
|
|
||||||
* @return a pair of mac address strings. The first one is private and second is public.
|
|
||||||
*/
|
|
||||||
String[] getNextAvailableMacAddressPair(long id);
|
|
||||||
|
|
||||||
String[] getNextAvailableMacAddressPair(long id, long mask);
|
|
||||||
|
|
||||||
PrivateAllocationData allocatePrivateIpAddress(long id, long podId, long instanceId, String reservationId, boolean forSystemVms);
|
PrivateAllocationData allocatePrivateIpAddress(long id, long podId, long instanceId, String reservationId, boolean forSystemVms);
|
||||||
|
|
||||||
DataCenterIpAddressVO allocatePrivateIpAddress(long id, String reservationId);
|
DataCenterIpAddressVO allocatePrivateIpAddress(long id, String reservationId);
|
||||||
|
|||||||
@ -24,7 +24,6 @@ import java.util.stream.Collectors;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
import javax.persistence.TableGenerator;
|
|
||||||
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
@ -45,9 +44,7 @@ import com.cloud.utils.db.DB;
|
|||||||
import com.cloud.utils.db.GenericDaoBase;
|
import com.cloud.utils.db.GenericDaoBase;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
import com.cloud.utils.db.SequenceFetcher;
|
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
import com.cloud.utils.net.NetUtils;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @config
|
* @config
|
||||||
@ -83,7 +80,7 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
|
|||||||
|
|
||||||
protected long _prefix;
|
protected long _prefix;
|
||||||
protected Random _rand = new Random(System.currentTimeMillis());
|
protected Random _rand = new Random(System.currentTimeMillis());
|
||||||
protected TableGenerator _tgMacAddress;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataCenterVO findByName(String name) {
|
public DataCenterVO findByName(String name) {
|
||||||
@ -230,25 +227,6 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
|
|||||||
return vo.getVlan();
|
return vo.getVlan();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String[] getNextAvailableMacAddressPair(long id) {
|
|
||||||
return getNextAvailableMacAddressPair(id, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String[] getNextAvailableMacAddressPair(long id, long mask) {
|
|
||||||
SequenceFetcher fetch = SequenceFetcher.getInstance();
|
|
||||||
|
|
||||||
long seq = fetch.getNextSequence(Long.class, _tgMacAddress, id);
|
|
||||||
seq = seq | _prefix | ((id & 0x7f) << 32);
|
|
||||||
seq |= mask;
|
|
||||||
seq |= ((_rand.nextInt(Short.MAX_VALUE) << 16) & 0x00000000ffff0000l);
|
|
||||||
String[] pair = new String[2];
|
|
||||||
pair[0] = NetUtils.long2Mac(seq);
|
|
||||||
pair[1] = NetUtils.long2Mac(seq | 0x1l << 39);
|
|
||||||
return pair;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PrivateAllocationData allocatePrivateIpAddress(long dcId, long podId, long instanceId, String reservationId, boolean forSystemVms) {
|
public PrivateAllocationData allocatePrivateIpAddress(long dcId, long podId, long instanceId, String reservationId, boolean forSystemVms) {
|
||||||
_ipAllocDao.releaseIpAddress(instanceId);
|
_ipAllocDao.releaseIpAddress(instanceId);
|
||||||
@ -348,9 +326,6 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
|
|||||||
TokenSearch = createSearchBuilder();
|
TokenSearch = createSearchBuilder();
|
||||||
TokenSearch.and("zoneToken", TokenSearch.entity().getZoneToken(), SearchCriteria.Op.EQ);
|
TokenSearch.and("zoneToken", TokenSearch.entity().getZoneToken(), SearchCriteria.Op.EQ);
|
||||||
TokenSearch.done();
|
TokenSearch.done();
|
||||||
|
|
||||||
_tgMacAddress = _tgs.get("macAddress");
|
|
||||||
assert _tgMacAddress != null : "Couldn't get mac address table generator";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -436,9 +436,6 @@ public class NetworkDaoImpl extends GenericDaoBase<NetworkVO, Long>implements Ne
|
|||||||
if(zoneMacIdentifier != null && zoneMacIdentifier.intValue() != 0 ){
|
if(zoneMacIdentifier != null && zoneMacIdentifier.intValue() != 0 ){
|
||||||
seq = seq | _prefix << 40 | (long)zoneMacIdentifier << 32 | networkConfigId << 16 & 0x00000000ffff0000l;
|
seq = seq | _prefix << 40 | (long)zoneMacIdentifier << 32 | networkConfigId << 16 & 0x00000000ffff0000l;
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
seq = seq | _prefix << 40 | _rand.nextInt(Short.MAX_VALUE) << 16 & 0x00000000ffff0000l;
|
|
||||||
}
|
|
||||||
return NetUtils.long2Mac(seq);
|
return NetUtils.long2Mac(seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -87,7 +87,7 @@ public interface NicDao extends GenericDao<NicVO, Long> {
|
|||||||
|
|
||||||
List<NicVO> listByVmIdAndKeyword(long instanceId, String keyword);
|
List<NicVO> listByVmIdAndKeyword(long instanceId, String keyword);
|
||||||
|
|
||||||
NicVO findByInstanceIdAndMacAddress(long instanceId, String macAddress);
|
NicVO findByMacAddress(String macAddress);
|
||||||
|
|
||||||
NicVO findByNetworkIdAndMacAddressIncludingRemoved(long networkId, String mac);
|
NicVO findByNetworkIdAndMacAddressIncludingRemoved(long networkId, String mac);
|
||||||
|
|
||||||
|
|||||||
@ -376,9 +376,8 @@ public class NicDaoImpl extends GenericDaoBase<NicVO, Long> implements NicDao {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NicVO findByInstanceIdAndMacAddress(long instanceId, String macAddress) {
|
public NicVO findByMacAddress(String macAddress) {
|
||||||
SearchCriteria<NicVO> sc = AllFieldsSearch.create();
|
SearchCriteria<NicVO> sc = AllFieldsSearch.create();
|
||||||
sc.setParameters("instance", instanceId);
|
|
||||||
sc.setParameters("macAddress", macAddress);
|
sc.setParameters("macAddress", macAddress);
|
||||||
return findOneBy(sc);
|
return findOneBy(sc);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -59,7 +59,7 @@ public class SequenceFetcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public <T> T getNextSequence(Class<T> clazz, TableGenerator tg, Object key, boolean isRandom) {
|
public <T> T getNextSequence(Class<T> clazz, TableGenerator tg, Object key, boolean isRandom) {
|
||||||
Future<T> future = _executors.submit(new Fetcher<T>(clazz, tg, key, isRandom));
|
Future<T> future = _executors.submit(new Fetcher<>(clazz, tg, key, isRandom));
|
||||||
try {
|
try {
|
||||||
return future.get();
|
return future.get();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -69,7 +69,7 @@ public class SequenceFetcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected SequenceFetcher() {
|
protected SequenceFetcher() {
|
||||||
_executors = new ThreadPoolExecutor(100, 100, 120l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(250), new NamedThreadFactory("SequenceFetcher"));
|
_executors = new ThreadPoolExecutor(100, 100, 120l, TimeUnit.SECONDS, new LinkedBlockingQueue<>(250), new NamedThreadFactory("SequenceFetcher"));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static final SequenceFetcher s_instance = new SequenceFetcher();
|
protected static final SequenceFetcher s_instance = new SequenceFetcher();
|
||||||
|
|||||||
@ -50,7 +50,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
|
|||||||
@Inject
|
@Inject
|
||||||
private GuestOSDao _guestOsDao;
|
private GuestOSDao _guestOsDao;
|
||||||
@Inject HypervManager _hypervMgr;
|
@Inject HypervManager _hypervMgr;
|
||||||
@Inject NetworkModel _networkMgr;
|
@Inject NetworkModel networkModel;
|
||||||
int MaxNicSupported = 8;
|
int MaxNicSupported = 8;
|
||||||
@Override
|
@Override
|
||||||
public final HypervisorType getHypervisorType() {
|
public final HypervisorType getHypervisorType() {
|
||||||
@ -120,7 +120,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
|
|||||||
nicTo.setName(profile.getName());
|
nicTo.setName(profile.getName());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(networkId);
|
String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId);
|
||||||
nicTo.setMac(mac);
|
nicTo.setMac(mac);
|
||||||
} catch (InsufficientAddressCapacityException e) {
|
} catch (InsufficientAddressCapacityException e) {
|
||||||
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
|
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
|
||||||
@ -136,7 +136,7 @@ public class HypervGuru extends HypervisorGuruBase implements HypervisorGuru {
|
|||||||
nicTo.setBroadcastUri(profile.getBroadCastUri());
|
nicTo.setBroadcastUri(profile.getBroadCastUri());
|
||||||
nicTo.setIsolationuri(profile.getIsolationUri());
|
nicTo.setIsolationuri(profile.getIsolationUri());
|
||||||
|
|
||||||
Integer networkRate = _networkMgr.getNetworkRate(network.getId(), null);
|
Integer networkRate = networkModel.getNetworkRate(network.getId(), null);
|
||||||
nicTo.setNetworkRateMbps(networkRate);
|
nicTo.setNetworkRateMbps(networkRate);
|
||||||
|
|
||||||
expandedNics[i] = nicTo;
|
expandedNics[i] = nicTo;
|
||||||
|
|||||||
@ -113,7 +113,8 @@ public class KVMStoragePoolManager {
|
|||||||
s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", info.storagePoolType().toString(), storageAdaptor.getName()));
|
s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", info.storagePoolType().toString(), storageAdaptor.getName()));
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
this._storageMapper.put(info.storagePoolType().toString(), storageAdaptor.newInstance());
|
s_logger.info(String.format("adding storage adaptor for %s", storageAdaptor.getName()));
|
||||||
|
this._storageMapper.put(info.storagePoolType().toString(), storageAdaptor.getDeclaredConstructor().newInstance());
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
throw new CloudRuntimeException(ex.toString());
|
throw new CloudRuntimeException(ex.toString());
|
||||||
}
|
}
|
||||||
|
|||||||
@ -72,7 +72,7 @@ class VmwareVmImplementer {
|
|||||||
@Inject
|
@Inject
|
||||||
NetworkDao networkDao;
|
NetworkDao networkDao;
|
||||||
@Inject
|
@Inject
|
||||||
NetworkModel networkMgr;
|
NetworkModel networkModel;
|
||||||
@Inject
|
@Inject
|
||||||
NicDao nicDao;
|
NicDao nicDao;
|
||||||
@Inject
|
@Inject
|
||||||
@ -237,7 +237,7 @@ class VmwareVmImplementer {
|
|||||||
nicTo.setNetmask("255.255.255.255");
|
nicTo.setNetmask("255.255.255.255");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String mac = networkMgr.getNextAvailableMacAddressInNetwork(networkId);
|
String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId);
|
||||||
nicTo.setMac(mac);
|
nicTo.setMac(mac);
|
||||||
} catch (InsufficientAddressCapacityException e) {
|
} catch (InsufficientAddressCapacityException e) {
|
||||||
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
|
throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId);
|
||||||
@ -253,7 +253,7 @@ class VmwareVmImplementer {
|
|||||||
nicTo.setBroadcastUri(publicNicProfile.getBroadCastUri());
|
nicTo.setBroadcastUri(publicNicProfile.getBroadCastUri());
|
||||||
nicTo.setIsolationuri(publicNicProfile.getIsolationUri());
|
nicTo.setIsolationuri(publicNicProfile.getIsolationUri());
|
||||||
|
|
||||||
Integer networkRate = networkMgr.getNetworkRate(network.getId(), null);
|
Integer networkRate = networkModel.getNetworkRate(network.getId(), null);
|
||||||
nicTo.setNetworkRateMbps(networkRate);
|
nicTo.setNetworkRateMbps(networkRate);
|
||||||
|
|
||||||
expandedNics[i] = nicTo;
|
expandedNics[i] = nicTo;
|
||||||
@ -296,7 +296,7 @@ class VmwareVmImplementer {
|
|||||||
|
|
||||||
for (NicProfile nicProfile : nicProfiles) {
|
for (NicProfile nicProfile : nicProfiles) {
|
||||||
if (nicProfile.getTrafficType() == Networks.TrafficType.Guest) {
|
if (nicProfile.getTrafficType() == Networks.TrafficType.Guest) {
|
||||||
if (networkMgr.isProviderSupportServiceInNetwork(nicProfile.getNetworkId(), Network.Service.Firewall, Network.Provider.CiscoVnmc)) {
|
if (networkModel.isProviderSupportServiceInNetwork(nicProfile.getNetworkId(), Network.Service.Firewall, Network.Provider.CiscoVnmc)) {
|
||||||
details.put("ConfigureVServiceInNexus", Boolean.TRUE.toString());
|
details.put("ConfigureVServiceInNexus", Boolean.TRUE.toString());
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|||||||
@ -324,7 +324,7 @@ public class NetScalerVMManagerImpl extends ManagerBase implements NetScalerVMMa
|
|||||||
defaultNic2.setIPv4Address("");
|
defaultNic2.setIPv4Address("");
|
||||||
defaultNic2.setIPv4Gateway("");
|
defaultNic2.setIPv4Gateway("");
|
||||||
defaultNic2.setIPv4Netmask("");
|
defaultNic2.setIPv4Netmask("");
|
||||||
String macAddress = _networkDao.getNextAvailableMacAddress(defaultPublicNetwork.getId(), null);
|
String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(defaultPublicNetwork.getId());
|
||||||
defaultNic2.setMacAddress(macAddress);
|
defaultNic2.setMacAddress(macAddress);
|
||||||
|
|
||||||
networks.put(_networkMgr.setupNetwork(_accountMgr.getSystemAccount(), _networkOfferingDao.findByUniqueName(NetworkOffering.SystemPublicNetwork), plan, null, null, false).get(0),
|
networks.put(_networkMgr.setupNetwork(_accountMgr.getSystemAccount(), _networkOfferingDao.findByUniqueName(NetworkOffering.SystemPublicNetwork), plan, null, null, false).get(0),
|
||||||
|
|||||||
@ -82,6 +82,7 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
|||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||||
@ -97,6 +98,7 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
|||||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||||
import org.apache.commons.collections4.CollectionUtils;
|
import org.apache.commons.collections4.CollectionUtils;
|
||||||
|
import org.apache.commons.collections4.MapUtils;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
@ -1047,18 +1049,54 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public boolean canProvideStorageStats() {
|
public boolean canProvideStorageStats() {
|
||||||
return false;
|
return StorPoolConfigurationManager.StorageStatsInterval.value() > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
|
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
|
||||||
|
if (storagePool == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Map<Long, Map<String, Pair<Long, Long>>> templatesStats = StorPoolStatsCollector.templatesStats;
|
||||||
|
if (MapUtils.isNotEmpty(templatesStats) && templatesStats.containsKey(storagePool.getDataCenterId())) {
|
||||||
|
Map<String, Pair<Long, Long>> storageStats = templatesStats.get(storagePool.getDataCenterId());
|
||||||
|
StoragePoolDetailVO templateName = storagePoolDetailsDao.findDetail(storagePool.getId(), StorPoolUtil.SP_TEMPLATE);
|
||||||
|
if (storageStats.containsKey(templateName.getValue()) && templateName != null) {
|
||||||
|
Pair<Long, Long> stats = storageStats.get(templateName.getValue());
|
||||||
|
if (stats.first() != storagePool.getCapacityBytes()) {
|
||||||
|
primaryStoreDao.updateCapacityBytes(storagePool.getId(), stats.first());
|
||||||
|
}
|
||||||
|
return storageStats.get(templateName.getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean canProvideVolumeStats() {
|
public boolean canProvideVolumeStats() {
|
||||||
return false;
|
return StorPoolConfigurationManager.VolumesStatsInterval.value() > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
|
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
|
||||||
|
|
||||||
|
if (volumeId == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String, Pair<Long, Long>> volumesStats = StorPoolStatsCollector.volumesStats;
|
||||||
|
if (MapUtils.isNotEmpty(volumesStats)) {
|
||||||
|
Pair<Long, Long> volumeStats = volumesStats.get(StorPoolStorageAdaptor.getVolumeNameFromPath(volumeId, true));
|
||||||
|
if (volumeStats != null) {
|
||||||
|
return volumeStats;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePool.getId());
|
||||||
|
for (VolumeVO volume : volumes) {
|
||||||
|
if (volume.getPath() != null && volume.getPath().equals(volumeId)) {
|
||||||
|
long size = volume.getSize();
|
||||||
|
StorPoolUtil.spLog("Volume [%s] doesn't have any statistics, returning its size [%s]", volumeId, size);
|
||||||
|
return new Pair<>(size, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1,188 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.storage.datastore.driver;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
|
||||||
|
import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.utils.NumbersUtil;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.component.ManagerBase;
|
||||||
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
|
import com.google.gson.JsonArray;
|
||||||
|
import com.google.gson.JsonElement;
|
||||||
|
import com.google.gson.JsonObject;
|
||||||
|
|
||||||
|
public class StorPoolStatsCollector extends ManagerBase {
|
||||||
|
|
||||||
|
private static Logger log = Logger.getLogger(StorPoolStatsCollector.class);
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
private PrimaryDataStoreDao storagePoolDao;
|
||||||
|
@Inject
|
||||||
|
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||||
|
@Inject
|
||||||
|
private ConfigurationDao configurationDao;
|
||||||
|
|
||||||
|
private ScheduledExecutorService executor;
|
||||||
|
|
||||||
|
static volatile Map<String, Pair<Long, Long>> volumesStats = new ConcurrentHashMap<>();
|
||||||
|
static volatile Map<Long, Map<String, Pair<Long, Long>>> templatesStats = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
|
||||||
|
enum StorPoolObject {
|
||||||
|
VOLUME, TEMPLATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean start() {
|
||||||
|
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
|
||||||
|
if (CollectionUtils.isNotEmpty(spPools)) {
|
||||||
|
executor = Executors.newScheduledThreadPool(2,new NamedThreadFactory("StorPoolStatsCollector"));
|
||||||
|
long storageStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("storage.stats.interval"), 60000L);
|
||||||
|
long volumeStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("volume.stats.interval"), 60000L);
|
||||||
|
|
||||||
|
if (StorPoolConfigurationManager.VolumesStatsInterval.value() > 0 && volumeStatsInterval > 0) {
|
||||||
|
executor.scheduleAtFixedRate(new StorPoolVolumeStatsMonitorTask(),120, StorPoolConfigurationManager.VolumesStatsInterval.value(), TimeUnit.SECONDS);
|
||||||
|
}
|
||||||
|
if (StorPoolConfigurationManager.StorageStatsInterval.value() > 0 && storageStatsInterval > 0) {
|
||||||
|
executor.scheduleAtFixedRate(new StorPoolStorageStatsMonitorTask(), 120, StorPoolConfigurationManager.StorageStatsInterval.value(), TimeUnit.SECONDS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
class StorPoolVolumeStatsMonitorTask implements Runnable {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
|
||||||
|
if (CollectionUtils.isNotEmpty(spPools)) {
|
||||||
|
volumesStats.clear();
|
||||||
|
|
||||||
|
log.debug("Collecting StorPool volumes used space");
|
||||||
|
Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
|
||||||
|
for (StoragePoolVO storagePoolVO : spPools) {
|
||||||
|
onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO);
|
||||||
|
}
|
||||||
|
for (StoragePoolVO storagePool : onePoolforZone.values()) {
|
||||||
|
try {
|
||||||
|
log.debug(String.format("Collecting volumes statistics for zone [%s]", storagePool.getDataCenterId()));
|
||||||
|
JsonArray arr = StorPoolUtil.volumesSpace(StorPoolUtil.getSpConnection(storagePool.getUuid(),
|
||||||
|
storagePool.getId(), storagePoolDetailsDao, storagePoolDao));
|
||||||
|
volumesStats.putAll(getClusterVolumeOrTemplateSpace(arr, StorPoolObject.VOLUME));
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.debug(String.format("Could not collect StorPool volumes statistics due to %s", e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class StorPoolStorageStatsMonitorTask implements Runnable {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
List<StoragePoolVO> spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
|
||||||
|
if (CollectionUtils.isNotEmpty(spPools)) {
|
||||||
|
templatesStats.clear();
|
||||||
|
|
||||||
|
Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
|
||||||
|
for (StoragePoolVO storagePoolVO : spPools) {
|
||||||
|
onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO);
|
||||||
|
}
|
||||||
|
for (StoragePoolVO storagePool : onePoolforZone.values()) {
|
||||||
|
try {
|
||||||
|
log.debug(String.format("Collecting templates statistics for zone [%s]", storagePool.getDataCenterId()));
|
||||||
|
JsonArray arr = StorPoolUtil.templatesStats(StorPoolUtil.getSpConnection(storagePool.getUuid(),
|
||||||
|
storagePool.getId(), storagePoolDetailsDao, storagePoolDao));
|
||||||
|
templatesStats.put(storagePool.getDataCenterId(), getClusterVolumeOrTemplateSpace(arr, StorPoolObject.TEMPLATE));
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.debug(String.format("Could not collect StorPool templates statistics %s", e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, Pair<Long, Long>> getClusterVolumeOrTemplateSpace(JsonArray arr, StorPoolObject spObject) {
|
||||||
|
Map<String, Pair<Long, Long>> map = new HashMap<>();
|
||||||
|
for (JsonElement jsonElement : arr) {
|
||||||
|
JsonObject name = jsonElement.getAsJsonObject().getAsJsonObject("response");
|
||||||
|
if (name != null) {
|
||||||
|
JsonArray data = name.getAsJsonObject().getAsJsonArray("data");
|
||||||
|
if (StorPoolObject.VOLUME == spObject) {
|
||||||
|
map.putAll(getStatsForVolumes(data));
|
||||||
|
} else if (StorPoolObject.TEMPLATE == spObject) {
|
||||||
|
getClusterStats(data, map);
|
||||||
|
}
|
||||||
|
} else if (StorPoolObject.TEMPLATE == spObject) {
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, Pair<Long, Long>> getStatsForVolumes(JsonArray arr) {
|
||||||
|
Map<String, Pair<Long, Long>> map = new HashMap<>();
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
String name = arr.get(i).getAsJsonObject().get("name").getAsString();
|
||||||
|
if (!name.startsWith("*") && !name.contains("@")) {
|
||||||
|
Long spaceUsed = arr.get(i).getAsJsonObject().get("spaceUsed").getAsLong();
|
||||||
|
Long size = arr.get(i).getAsJsonObject().get("size").getAsLong();
|
||||||
|
map.put(name, new Pair<>(spaceUsed, size));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void getClusterStats(JsonArray data, Map<String, Pair<Long, Long>> map) {
|
||||||
|
for (JsonElement dat : data) {
|
||||||
|
long capacity = dat.getAsJsonObject().get("stored").getAsJsonObject().get("capacity").getAsLong();
|
||||||
|
long free = dat.getAsJsonObject().get("stored").getAsJsonObject().get("free").getAsLong();
|
||||||
|
long used = capacity - free;
|
||||||
|
String templateName = dat.getAsJsonObject().get("name").getAsString();
|
||||||
|
if (!map.containsKey(templateName)) {
|
||||||
|
map.put(templateName, new Pair<>(capacity, used));
|
||||||
|
} else {
|
||||||
|
Pair<Long, Long> template = map.get(templateName);
|
||||||
|
template.first(template.first() + capacity);
|
||||||
|
template.second(template.second() + used);
|
||||||
|
map.put(templateName, template);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -411,6 +411,18 @@ public class StorPoolUtil {
|
|||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static JsonArray volumesSpace(SpConnectionDesc conn) {
|
||||||
|
SpApiResponse resp = GET("MultiCluster/AllClusters/VolumesSpace", conn);
|
||||||
|
JsonObject obj = resp.fullJson.getAsJsonObject();
|
||||||
|
return obj.getAsJsonObject("data").getAsJsonArray("clusters");
|
||||||
|
}
|
||||||
|
|
||||||
|
public static JsonArray templatesStats(SpConnectionDesc conn) {
|
||||||
|
SpApiResponse resp = GET("MultiCluster/AllClusters/VolumeTemplatesStatus", conn);
|
||||||
|
JsonObject obj = resp.fullJson.getAsJsonObject();
|
||||||
|
return obj.getAsJsonObject("data").getAsJsonArray("clusters");
|
||||||
|
}
|
||||||
|
|
||||||
private static boolean objectExists(SpApiError err) {
|
private static boolean objectExists(SpApiError err) {
|
||||||
if (!err.getName().equals("objectDoesNotExist")) {
|
if (!err.getName().equals("objectDoesNotExist")) {
|
||||||
throw new CloudRuntimeException(err.getDescr());
|
throw new CloudRuntimeException(err.getDescr());
|
||||||
|
|||||||
@ -34,6 +34,16 @@ public class StorPoolConfigurationManager implements Configurable {
|
|||||||
public static final ConfigKey<String> AlternativeEndpoint = new ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
|
public static final ConfigKey<String> AlternativeEndpoint = new ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
|
||||||
"Used for StorPool primary storage for an alternative endpoint. Structure of the endpoint is - SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, ConfigKey.Scope.StoragePool, null);
|
"Used for StorPool primary storage for an alternative endpoint. Structure of the endpoint is - SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, ConfigKey.Scope.StoragePool, null);
|
||||||
|
|
||||||
|
public static final ConfigKey<Integer> VolumesStatsInterval = new ConfigKey<>("Advanced", Integer.class,
|
||||||
|
"storpool.volumes.stats.interval", "3600",
|
||||||
|
"The interval in seconds to get StorPool volumes statistics",
|
||||||
|
false);
|
||||||
|
|
||||||
|
public static final ConfigKey<Integer> StorageStatsInterval = new ConfigKey<>("Advanced", Integer.class,
|
||||||
|
"storpool.storage.stats.interval", "3600",
|
||||||
|
"The interval in seconds to get StorPool template statistics",
|
||||||
|
false);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getConfigComponentName() {
|
public String getConfigComponentName() {
|
||||||
return StorPoolConfigurationManager.class.getSimpleName();
|
return StorPoolConfigurationManager.class.getSimpleName();
|
||||||
@ -41,6 +51,6 @@ public class StorPoolConfigurationManager implements Configurable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ConfigKey<?>[] getConfigKeys() {
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint };
|
return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -35,4 +35,7 @@
|
|||||||
|
|
||||||
<bean id="cleanupTags"
|
<bean id="cleanupTags"
|
||||||
class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" />
|
class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" />
|
||||||
|
|
||||||
|
<bean id="statistics"
|
||||||
|
class="org.apache.cloudstack.storage.datastore.driver.StorPoolStatsCollector" />
|
||||||
</beans>
|
</beans>
|
||||||
|
|||||||
@ -80,7 +80,6 @@ import com.cloud.network.dao.IPAddressVO;
|
|||||||
import com.cloud.network.dao.NetworkAccountDao;
|
import com.cloud.network.dao.NetworkAccountDao;
|
||||||
import com.cloud.network.dao.NetworkAccountVO;
|
import com.cloud.network.dao.NetworkAccountVO;
|
||||||
import com.cloud.network.dao.NetworkDao;
|
import com.cloud.network.dao.NetworkDao;
|
||||||
import com.cloud.network.dao.NetworkDetailsDao;
|
|
||||||
import com.cloud.network.dao.NetworkDomainDao;
|
import com.cloud.network.dao.NetworkDomainDao;
|
||||||
import com.cloud.network.dao.NetworkDomainVO;
|
import com.cloud.network.dao.NetworkDomainVO;
|
||||||
import com.cloud.network.dao.NetworkServiceMapDao;
|
import com.cloud.network.dao.NetworkServiceMapDao;
|
||||||
@ -172,8 +171,6 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
|
|||||||
@Inject
|
@Inject
|
||||||
NetworkDao _networksDao = null;
|
NetworkDao _networksDao = null;
|
||||||
@Inject
|
@Inject
|
||||||
NetworkDetailsDao networkDetailsDao;
|
|
||||||
@Inject
|
|
||||||
NicDao _nicDao = null;
|
NicDao _nicDao = null;
|
||||||
@Inject
|
@Inject
|
||||||
PodVlanMapDao _podVlanMapDao;
|
PodVlanMapDao _podVlanMapDao;
|
||||||
@ -593,13 +590,24 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
|
|||||||
@Override
|
@Override
|
||||||
public String getNextAvailableMacAddressInNetwork(long networkId) throws InsufficientAddressCapacityException {
|
public String getNextAvailableMacAddressInNetwork(long networkId) throws InsufficientAddressCapacityException {
|
||||||
NetworkVO network = _networksDao.findById(networkId);
|
NetworkVO network = _networksDao.findById(networkId);
|
||||||
String mac = _networksDao.getNextAvailableMacAddress(networkId, MACIdentifier.value());
|
Integer zoneIdentifier = MACIdentifier.value();
|
||||||
if (mac == null) {
|
if (zoneIdentifier.intValue() == 0) {
|
||||||
throw new InsufficientAddressCapacityException("Unable to create another mac address", Network.class, networkId);
|
zoneIdentifier = Long.valueOf(network.getDataCenterId()).intValue();
|
||||||
}
|
}
|
||||||
|
String mac;
|
||||||
|
do {
|
||||||
|
mac = _networksDao.getNextAvailableMacAddress(networkId, zoneIdentifier);
|
||||||
|
if (mac == null) {
|
||||||
|
throw new InsufficientAddressCapacityException("Unable to create another mac address", Network.class, networkId);
|
||||||
|
}
|
||||||
|
} while(! isMACUnique(mac));
|
||||||
return mac;
|
return mac;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean isMACUnique(String mac) {
|
||||||
|
return (_nicDao.findByMacAddress(mac) == null);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public Network getNetwork(long id) {
|
public Network getNetwork(long id) {
|
||||||
|
|||||||
@ -59,7 +59,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
|
|||||||
@Inject
|
@Inject
|
||||||
ConfigurationDao _configDao;
|
ConfigurationDao _configDao;
|
||||||
@Inject
|
@Inject
|
||||||
NetworkModel _networkMgr;
|
NetworkModel networkModel;
|
||||||
String _cidr;
|
String _cidr;
|
||||||
String _gateway;
|
String _gateway;
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
|
|||||||
|
|
||||||
if (vm.getHypervisorType() == HypervisorType.VMware && !isRouterVm(vm)) {
|
if (vm.getHypervisorType() == HypervisorType.VMware && !isRouterVm(vm)) {
|
||||||
NicProfile nicProf = new NicProfile(Nic.ReservationStrategy.Create, null, null, null, null);
|
NicProfile nicProf = new NicProfile(Nic.ReservationStrategy.Create, null, null, null, null);
|
||||||
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(config.getId());
|
String mac = networkModel.getNextAvailableMacAddressInNetwork(config.getId());
|
||||||
nicProf.setMacAddress(mac);
|
nicProf.setMacAddress(mac);
|
||||||
return nicProf;
|
return nicProf;
|
||||||
}
|
}
|
||||||
@ -140,7 +140,7 @@ public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGu
|
|||||||
if (((hType == HypervisorType.VMware) || (hType == HypervisorType.Hyperv)) && isRouterVm(vm)) {
|
if (((hType == HypervisorType.VMware) || (hType == HypervisorType.Hyperv)) && isRouterVm(vm)) {
|
||||||
super.reserve(nic, config, vm, dest, context);
|
super.reserve(nic, config, vm, dest, context);
|
||||||
|
|
||||||
String mac = _networkMgr.getNextAvailableMacAddressInNetwork(config.getId());
|
String mac = networkModel.getNextAvailableMacAddressInNetwork(config.getId());
|
||||||
nic.setMacAddress(mac);
|
nic.setMacAddress(mac);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1417,7 +1417,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
// Check if the vm can be migrated with storage.
|
// Check if the vm can be migrated with storage.
|
||||||
boolean canMigrateWithStorage = false;
|
boolean canMigrateWithStorage = false;
|
||||||
|
|
||||||
if (VirtualMachine.Type.User.equals(vm.getType()) || HypervisorType.VMware.equals(vm.getHypervisorType())) {
|
List<HypervisorType> hypervisorTypes = Arrays.asList(new HypervisorType[]{HypervisorType.VMware, HypervisorType.KVM});
|
||||||
|
if (VirtualMachine.Type.User.equals(vm.getType()) || hypervisorTypes.contains(vm.getHypervisorType())) {
|
||||||
canMigrateWithStorage = _hypervisorCapabilitiesDao.isStorageMotionSupported(srcHost.getHypervisorType(), srcHostVersion);
|
canMigrateWithStorage = _hypervisorCapabilitiesDao.isStorageMotionSupported(srcHost.getHypervisorType(), srcHostVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -238,6 +238,7 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
|
|||||||
import com.cloud.hypervisor.kvm.dpdk.DpdkHelper;
|
import com.cloud.hypervisor.kvm.dpdk.DpdkHelper;
|
||||||
import com.cloud.network.IpAddressManager;
|
import com.cloud.network.IpAddressManager;
|
||||||
import com.cloud.network.Network;
|
import com.cloud.network.Network;
|
||||||
|
import com.cloud.network.Network.GuestType;
|
||||||
import com.cloud.network.Network.IpAddresses;
|
import com.cloud.network.Network.IpAddresses;
|
||||||
import com.cloud.network.Network.Provider;
|
import com.cloud.network.Network.Provider;
|
||||||
import com.cloud.network.Network.Service;
|
import com.cloud.network.Network.Service;
|
||||||
@ -3594,13 +3595,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
|
|
||||||
for (Long networkId : networkIdList) {
|
for (Long networkId : networkIdList) {
|
||||||
NetworkVO network = _networkDao.findById(networkId);
|
NetworkVO network = _networkDao.findById(networkId);
|
||||||
|
NetworkOffering ntwkOffering = _networkOfferingDao.findById(network.getNetworkOfferingId());
|
||||||
|
|
||||||
if (network == null) {
|
if (network == null) {
|
||||||
throw new InvalidParameterValueException("Unable to find network by id " + networkId);
|
throw new InvalidParameterValueException("Unable to find network by id " + networkId);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_networkModel.isSecurityGroupSupportedInNetwork(network)) {
|
if (!_networkModel.isSecurityGroupSupportedInNetwork(network) && (ntwkOffering.getGuestType() != GuestType.L2)) {
|
||||||
throw new InvalidParameterValueException("Network is not security group enabled: " + network.getId());
|
throw new InvalidParameterValueException("Network is not security group enabled or not L2 network: " + network.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
_accountMgr.checkAccess(owner, AccessType.UseEntry, false, network);
|
_accountMgr.checkAccess(owner, AccessType.UseEntry, false, network);
|
||||||
|
|||||||
@ -315,7 +315,7 @@ export default {
|
|||||||
api('listZones', params).then(json => {
|
api('listZones', params).then(json => {
|
||||||
for (const i in json.listzonesresponse.zone) {
|
for (const i in json.listzonesresponse.zone) {
|
||||||
const zone = json.listzonesresponse.zone[i]
|
const zone = json.listzonesresponse.zone[i]
|
||||||
if (zone.networktype === 'Advanced' && zone.securitygroupsenabled !== true) {
|
if (zone.networktype === 'Advanced') {
|
||||||
this.zones.push(zone)
|
this.zones.push(zone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,7 +26,7 @@
|
|||||||
@refresh-data="refreshParent"
|
@refresh-data="refreshParent"
|
||||||
@refresh="handleRefresh"/>
|
@refresh="handleRefresh"/>
|
||||||
</a-tab-pane>
|
</a-tab-pane>
|
||||||
<a-tab-pane :tab="$t('label.l2')" key="3" v-if="isAdvancedZoneWithoutSGAvailable">
|
<a-tab-pane :tab="$t('label.l2')" key="3">
|
||||||
<CreateL2NetworkForm
|
<CreateL2NetworkForm
|
||||||
:loading="loading"
|
:loading="loading"
|
||||||
:resource="resource"
|
:resource="resource"
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user