mirror of
https://github.com/apache/cloudstack.git
synced 2025-11-03 04:12:31 +01:00
Merge branch 'main' into nsx-integration
This commit is contained in:
commit
2b05dd93a1
@ -56,10 +56,9 @@ github:
|
||||
- alexandremattioli
|
||||
- vishesh92
|
||||
- GaOrtiga
|
||||
- BryanMLima
|
||||
- SadiJr
|
||||
- JoaoJandre
|
||||
- winterhazel
|
||||
- rp-
|
||||
|
||||
protected_branches: ~
|
||||
|
||||
|
||||
@ -65,18 +65,18 @@ public interface ClusterDrsAlgorithm extends Adapter {
|
||||
* the service offering for the virtual machine
|
||||
* @param destHost
|
||||
* the destination host for the virtual machine
|
||||
* @param hostCpuUsedMap
|
||||
* a map of host IDs to the amount of CPU used on each host
|
||||
* @param hostMemoryUsedMap
|
||||
* a map of host IDs to the amount of memory used on each host
|
||||
* @param hostCpuFreeMap
|
||||
* a map of host IDs to the amount of CPU free on each host
|
||||
* @param hostMemoryFreeMap
|
||||
* a map of host IDs to the amount of memory free on each host
|
||||
* @param requiresStorageMotion
|
||||
* whether storage motion is required for the virtual machine
|
||||
*
|
||||
* @return a ternary containing improvement, cost, benefit
|
||||
*/
|
||||
Ternary<Double, Double, Double> getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering,
|
||||
Host destHost, Map<Long, Long> hostCpuUsedMap,
|
||||
Map<Long, Long> hostMemoryUsedMap, Boolean requiresStorageMotion);
|
||||
Host destHost, Map<Long, Long> hostCpuFreeMap,
|
||||
Map<Long, Long> hostMemoryFreeMap, Boolean requiresStorageMotion);
|
||||
|
||||
/**
|
||||
* Calculates the imbalance of the cluster after a virtual machine migration.
|
||||
@ -87,30 +87,30 @@ public interface ClusterDrsAlgorithm extends Adapter {
|
||||
* the virtual machine being migrated
|
||||
* @param destHost
|
||||
* the destination host for the virtual machine
|
||||
* @param hostCpuUsedMap
|
||||
* a map of host IDs to the amount of CPU used on each host
|
||||
* @param hostMemoryUsedMap
|
||||
* a map of host IDs to the amount of memory used on each host
|
||||
* @param hostCpuFreeMap
|
||||
* a map of host IDs to the amount of CPU free on each host
|
||||
* @param hostMemoryFreeMap
|
||||
* a map of host IDs to the amount of memory free on each host
|
||||
*
|
||||
* @return a pair containing the CPU and memory imbalance of the cluster after the migration
|
||||
*/
|
||||
default Pair<Double, Double> getImbalancePostMigration(ServiceOffering serviceOffering, VirtualMachine vm,
|
||||
Host destHost, Map<Long, Long> hostCpuUsedMap,
|
||||
Map<Long, Long> hostMemoryUsedMap) {
|
||||
Host destHost, Map<Long, Long> hostCpuFreeMap,
|
||||
Map<Long, Long> hostMemoryFreeMap) {
|
||||
List<Long> postCpuList = new ArrayList<>();
|
||||
List<Long> postMemoryList = new ArrayList<>();
|
||||
final int vmCpu = serviceOffering.getCpu() * serviceOffering.getSpeed();
|
||||
final long vmRam = serviceOffering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
for (Long hostId : hostCpuUsedMap.keySet()) {
|
||||
long cpu = hostCpuUsedMap.get(hostId);
|
||||
long memory = hostMemoryUsedMap.get(hostId);
|
||||
for (Long hostId : hostCpuFreeMap.keySet()) {
|
||||
long cpu = hostCpuFreeMap.get(hostId);
|
||||
long memory = hostMemoryFreeMap.get(hostId);
|
||||
if (hostId == destHost.getId()) {
|
||||
postCpuList.add(cpu + vmCpu);
|
||||
postMemoryList.add(memory + vmRam);
|
||||
} else if (hostId.equals(vm.getHostId())) {
|
||||
postCpuList.add(cpu - vmCpu);
|
||||
postMemoryList.add(memory - vmRam);
|
||||
} else if (hostId.equals(vm.getHostId())) {
|
||||
postCpuList.add(cpu + vmCpu);
|
||||
postMemoryList.add(memory + vmRam);
|
||||
} else {
|
||||
postCpuList.add(cpu);
|
||||
postMemoryList.add(memory);
|
||||
|
||||
@ -29,6 +29,9 @@ http.port=8080
|
||||
# Max inactivity time in minutes for the session
|
||||
session.timeout=30
|
||||
|
||||
# Max allowed API request payload / content size in bytes
|
||||
request.content.size=1048576
|
||||
|
||||
# Options to configure and enable HTTPS on management server
|
||||
#
|
||||
# For management server to pickup these configuration settings, the configured
|
||||
|
||||
@ -26,10 +26,10 @@ import java.lang.management.ManagementFactory;
|
||||
import java.net.URL;
|
||||
import java.util.Properties;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.server.ServerProperties;
|
||||
import org.apache.commons.daemon.Daemon;
|
||||
import org.apache.commons.daemon.DaemonContext;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.eclipse.jetty.jmx.MBeanContainer;
|
||||
import org.eclipse.jetty.server.ForwardedRequestCustomizer;
|
||||
import org.eclipse.jetty.server.HttpConfiguration;
|
||||
@ -40,6 +40,7 @@ import org.eclipse.jetty.server.SecureRequestCustomizer;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.eclipse.jetty.server.ServerConnector;
|
||||
import org.eclipse.jetty.server.SslConnectionFactory;
|
||||
import org.eclipse.jetty.server.handler.ContextHandler;
|
||||
import org.eclipse.jetty.server.handler.HandlerCollection;
|
||||
import org.eclipse.jetty.server.handler.MovedContextHandler;
|
||||
import org.eclipse.jetty.server.handler.RequestLogHandler;
|
||||
@ -50,10 +51,10 @@ import org.eclipse.jetty.util.ssl.SslContextFactory;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.PropertiesUtil;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import com.cloud.utils.server.ServerProperties;
|
||||
|
||||
/***
|
||||
* The ServerDaemon class implements the embedded server, it can be started either
|
||||
@ -79,6 +80,8 @@ public class ServerDaemon implements Daemon {
|
||||
private static final String KEYSTORE_PASSWORD = "https.keystore.password";
|
||||
private static final String WEBAPP_DIR = "webapp.dir";
|
||||
private static final String ACCESS_LOG = "access.log";
|
||||
private static final String REQUEST_CONTENT_SIZE_KEY = "request.content.size";
|
||||
private static final int DEFAULT_REQUEST_CONTENT_SIZE = 1048576;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
/////////////// Server Configuration ///////////////////
|
||||
@ -90,6 +93,7 @@ public class ServerDaemon implements Daemon {
|
||||
private int httpPort = 8080;
|
||||
private int httpsPort = 8443;
|
||||
private int sessionTimeout = 30;
|
||||
private int maxFormContentSize = DEFAULT_REQUEST_CONTENT_SIZE;
|
||||
private boolean httpsEnable = false;
|
||||
private String accessLogFile = "access.log";
|
||||
private String bindInterface = null;
|
||||
@ -136,6 +140,7 @@ public class ServerDaemon implements Daemon {
|
||||
setWebAppLocation(properties.getProperty(WEBAPP_DIR));
|
||||
setAccessLogFile(properties.getProperty(ACCESS_LOG, "access.log"));
|
||||
setSessionTimeout(Integer.valueOf(properties.getProperty(SESSION_TIMEOUT, "30")));
|
||||
setMaxFormContentSize(Integer.valueOf(properties.getProperty(REQUEST_CONTENT_SIZE_KEY, String.valueOf(DEFAULT_REQUEST_CONTENT_SIZE))));
|
||||
} catch (final IOException e) {
|
||||
LOG.warn("Failed to read configuration from server.properties file", e);
|
||||
} finally {
|
||||
@ -186,6 +191,7 @@ public class ServerDaemon implements Daemon {
|
||||
|
||||
// Extra config options
|
||||
server.setStopAtShutdown(true);
|
||||
server.setAttribute(ContextHandler.MAX_FORM_CONTENT_SIZE_KEY, maxFormContentSize);
|
||||
|
||||
// HTTPS Connector
|
||||
createHttpsConnector(httpConfig);
|
||||
@ -257,6 +263,7 @@ public class ServerDaemon implements Daemon {
|
||||
final WebAppContext webApp = new WebAppContext();
|
||||
webApp.setContextPath(contextPath);
|
||||
webApp.setInitParameter("org.eclipse.jetty.servlet.Default.dirAllowed", "false");
|
||||
webApp.setMaxFormContentSize(maxFormContentSize);
|
||||
|
||||
// GZIP handler
|
||||
final GzipHandler gzipHandler = new GzipHandler();
|
||||
@ -355,4 +362,8 @@ public class ServerDaemon implements Daemon {
|
||||
public void setSessionTimeout(int sessionTimeout) {
|
||||
this.sessionTimeout = sessionTimeout;
|
||||
}
|
||||
|
||||
public void setMaxFormContentSize(int maxFormContentSize) {
|
||||
this.maxFormContentSize = maxFormContentSize;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.dc.DataCenter;
|
||||
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
|
||||
@ -341,7 +342,7 @@ public interface NetworkOrchestrationService {
|
||||
*/
|
||||
void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProfile, NicProfile nicProfile);
|
||||
|
||||
Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, boolean forced) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException;
|
||||
Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter datacenter, boolean forced) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException;
|
||||
|
||||
void unmanageNics(VirtualMachineProfile vm);
|
||||
}
|
||||
|
||||
@ -153,7 +153,7 @@ public interface VolumeOrchestrationService {
|
||||
* @param type Type of the volume - ROOT, DATADISK, etc
|
||||
* @param name Name of the volume
|
||||
* @param offering DiskOffering for the volume
|
||||
* @param size DiskOffering for the volume
|
||||
* @param sizeInBytes size of the volume in bytes
|
||||
* @param minIops minimum IOPS for the disk, if not passed DiskOffering value will be used
|
||||
* @param maxIops maximum IOPS for the disk, if not passed DiskOffering value will be used
|
||||
* @param vm VirtualMachine this volume is attached to
|
||||
@ -165,7 +165,7 @@ public interface VolumeOrchestrationService {
|
||||
* @param chainInfo chain info for the volume. Hypervisor specific.
|
||||
* @return DiskProfile of imported volume
|
||||
*/
|
||||
DiskProfile importVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
|
||||
DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
|
||||
Account owner, Long deviceId, Long poolId, String path, String chainInfo);
|
||||
|
||||
DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachine vm, VirtualMachineTemplate template,
|
||||
|
||||
@ -614,7 +614,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
private boolean isVmDestroyed(VMInstanceVO vm) {
|
||||
if (vm == null || vm.getRemoved() != null) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Unable to find vm or vm is destroyed: " + vm);
|
||||
s_logger.debug("Unable to find vm or vm is expunged: " + vm);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -631,17 +631,17 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
|
||||
try {
|
||||
if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) {
|
||||
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
|
||||
throw new CloudRuntimeException("Unable to destroy " + vm);
|
||||
s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm);
|
||||
throw new CloudRuntimeException("Unable to expunge " + vm);
|
||||
|
||||
}
|
||||
} catch (final NoTransitionException e) {
|
||||
s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm);
|
||||
throw new CloudRuntimeException("Unable to destroy " + vm, e);
|
||||
s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm);
|
||||
throw new CloudRuntimeException("Unable to expunge " + vm, e);
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Destroying vm " + vm);
|
||||
s_logger.debug("Expunging vm " + vm);
|
||||
}
|
||||
|
||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||
|
||||
@ -4627,25 +4627,21 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
|
||||
@DB
|
||||
@Override
|
||||
public Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final boolean forced)
|
||||
public Pair<NicProfile, Integer> importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced)
|
||||
throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
|
||||
s_logger.debug("Allocating nic for vm " + vm.getUuid() + " in network " + network + " during import");
|
||||
String guestIp = null;
|
||||
String selectedIp = null;
|
||||
if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) {
|
||||
if (ipAddresses.getIp4Address().equals("auto")) {
|
||||
ipAddresses.setIp4Address(null);
|
||||
}
|
||||
if (network.getGuestType() != GuestType.L2) {
|
||||
guestIp = _ipAddrMgr.acquireGuestIpAddress(network, ipAddresses.getIp4Address());
|
||||
} else {
|
||||
guestIp = null;
|
||||
}
|
||||
if (guestIp == null && network.getGuestType() != GuestType.L2 && !_networkModel.listNetworkOfferingServices(network.getNetworkOfferingId()).isEmpty()) {
|
||||
selectedIp = getSelectedIpForNicImport(network, dataCenter, ipAddresses);
|
||||
if (selectedIp == null && network.getGuestType() != GuestType.L2 && !_networkModel.listNetworkOfferingServices(network.getNetworkOfferingId()).isEmpty()) {
|
||||
throw new InsufficientVirtualNetworkCapacityException("Unable to acquire Guest IP address for network " + network, DataCenter.class,
|
||||
network.getDataCenterId());
|
||||
}
|
||||
}
|
||||
final String finalGuestIp = guestIp;
|
||||
final String finalSelectedIp = selectedIp;
|
||||
final NicVO vo = Transaction.execute(new TransactionCallback<NicVO>() {
|
||||
@Override
|
||||
public NicVO doInTransaction(TransactionStatus status) {
|
||||
@ -4657,12 +4653,13 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
NicVO vo = new NicVO(network.getGuruName(), vm.getId(), network.getId(), vm.getType());
|
||||
vo.setMacAddress(macAddressToPersist);
|
||||
vo.setAddressFormat(Networks.AddressFormat.Ip4);
|
||||
if (NetUtils.isValidIp4(finalGuestIp) && StringUtils.isNotEmpty(network.getGateway())) {
|
||||
vo.setIPv4Address(finalGuestIp);
|
||||
vo.setIPv4Gateway(network.getGateway());
|
||||
if (StringUtils.isNotEmpty(network.getCidr())) {
|
||||
vo.setIPv4Netmask(NetUtils.cidr2Netmask(network.getCidr()));
|
||||
}
|
||||
Pair<String, String> pair = getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, finalSelectedIp);
|
||||
String gateway = pair.first();
|
||||
String netmask = pair.second();
|
||||
if (NetUtils.isValidIp4(finalSelectedIp) && StringUtils.isNotEmpty(gateway)) {
|
||||
vo.setIPv4Address(finalSelectedIp);
|
||||
vo.setIPv4Gateway(gateway);
|
||||
vo.setIPv4Netmask(netmask);
|
||||
}
|
||||
vo.setBroadcastUri(network.getBroadcastUri());
|
||||
vo.setMode(network.getMode());
|
||||
@ -4699,6 +4696,45 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
return new Pair<NicProfile, Integer>(vmNic, Integer.valueOf(deviceId));
|
||||
}
|
||||
|
||||
protected String getSelectedIpForNicImport(Network network, DataCenter dataCenter, Network.IpAddresses ipAddresses) {
|
||||
if (network.getGuestType() == GuestType.L2) {
|
||||
return null;
|
||||
}
|
||||
return dataCenter.getNetworkType() == NetworkType.Basic ?
|
||||
getSelectedIpForNicImportOnBasicZone(ipAddresses.getIp4Address(), network, dataCenter):
|
||||
_ipAddrMgr.acquireGuestIpAddress(network, ipAddresses.getIp4Address());
|
||||
}
|
||||
|
||||
protected String getSelectedIpForNicImportOnBasicZone(String requestedIp, Network network, DataCenter dataCenter) {
|
||||
IPAddressVO ipAddressVO = StringUtils.isBlank(requestedIp) ?
|
||||
_ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(network.getId(), dataCenter.getId(), IpAddress.State.Free):
|
||||
_ipAddressDao.findByIp(requestedIp);
|
||||
if (ipAddressVO == null || ipAddressVO.getState() != IpAddress.State.Free) {
|
||||
String msg = String.format("Cannot find a free IP to assign to VM NIC on network %s", network.getName());
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
return ipAddressVO.getAddress() != null ? ipAddressVO.getAddress().addr() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain the gateway and netmask for a VM NIC to import
|
||||
* If the VM to import is on a Basic Zone, then obtain the information from the vlan table instead of the network
|
||||
*/
|
||||
protected Pair<String, String> getNetworkGatewayAndNetmaskForNicImport(Network network, DataCenter dataCenter, String selectedIp) {
|
||||
String gateway = network.getGateway();
|
||||
String netmask = StringUtils.isNotEmpty(network.getCidr()) ? NetUtils.cidr2Netmask(network.getCidr()) : null;
|
||||
if (dataCenter.getNetworkType() == NetworkType.Basic) {
|
||||
IPAddressVO freeIp = _ipAddressDao.findByIp(selectedIp);
|
||||
if (freeIp != null) {
|
||||
VlanVO vlan = _vlanDao.findById(freeIp.getVlanId());
|
||||
gateway = vlan != null ? vlan.getVlanGateway() : null;
|
||||
netmask = vlan != null ? vlan.getVlanNetmask() : null;
|
||||
}
|
||||
}
|
||||
return new Pair<>(gateway, netmask);
|
||||
}
|
||||
|
||||
private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) {
|
||||
if (!forced) {
|
||||
throw new CloudRuntimeException("NIC with MAC address = " + macAddress + " exists on network with ID = " + network.getId() +
|
||||
|
||||
@ -2172,19 +2172,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiskProfile importVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops,
|
||||
public DiskProfile importVolume(Type type, String name, DiskOffering offering, Long sizeInBytes, Long minIops, Long maxIops,
|
||||
VirtualMachine vm, VirtualMachineTemplate template, Account owner,
|
||||
Long deviceId, Long poolId, String path, String chainInfo) {
|
||||
if (size == null) {
|
||||
size = offering.getDiskSize();
|
||||
} else {
|
||||
size = (size * 1024 * 1024 * 1024);
|
||||
if (sizeInBytes == null) {
|
||||
sizeInBytes = offering.getDiskSize();
|
||||
}
|
||||
|
||||
minIops = minIops != null ? minIops : offering.getMinIops();
|
||||
maxIops = maxIops != null ? maxIops : offering.getMaxIops();
|
||||
|
||||
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), size, minIops, maxIops, null);
|
||||
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), sizeInBytes, minIops, maxIops, null);
|
||||
if (vm != null) {
|
||||
vol.setInstanceId(vm.getId());
|
||||
}
|
||||
|
||||
@ -29,6 +29,9 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.dc.DataCenter;
|
||||
import com.cloud.network.IpAddressManager;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -125,6 +128,7 @@ public class NetworkOrchestratorTest extends TestCase {
|
||||
testOrchastrator.routerNetworkDao = mock(RouterNetworkDao.class);
|
||||
testOrchastrator._vpcMgr = mock(VpcManager.class);
|
||||
testOrchastrator.routerJoinDao = mock(DomainRouterJoinDao.class);
|
||||
testOrchastrator._ipAddrMgr = mock(IpAddressManager.class);
|
||||
DhcpServiceProvider provider = mock(DhcpServiceProvider.class);
|
||||
|
||||
Map<Network.Capability, String> capabilities = new HashMap<Network.Capability, String>();
|
||||
@ -708,4 +712,134 @@ public class NetworkOrchestratorTest extends TestCase {
|
||||
Assert.assertEquals(ip6Dns[0], profile.getIPv6Dns1());
|
||||
Assert.assertEquals(ip6Dns[1], profile.getIPv6Dns2());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNetworkGatewayAndNetmaskForNicImportAdvancedZone() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
String ipAddress = "10.1.1.10";
|
||||
|
||||
String networkGateway = "10.1.1.1";
|
||||
String networkNetmask = "255.255.255.0";
|
||||
String networkCidr = "10.1.1.0/24";
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
|
||||
Mockito.when(network.getGateway()).thenReturn(networkGateway);
|
||||
Mockito.when(network.getCidr()).thenReturn(networkCidr);
|
||||
Pair<String, String> pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
|
||||
Assert.assertNotNull(pair);
|
||||
Assert.assertEquals(networkGateway, pair.first());
|
||||
Assert.assertEquals(networkNetmask, pair.second());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNetworkGatewayAndNetmaskForNicImportBasicZone() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
IPAddressVO ipAddressVO = Mockito.mock(IPAddressVO.class);
|
||||
String ipAddress = "172.1.1.10";
|
||||
|
||||
String defaultNetworkGateway = "172.1.1.1";
|
||||
String defaultNetworkNetmask = "255.255.255.0";
|
||||
VlanVO vlan = Mockito.mock(VlanVO.class);
|
||||
Mockito.when(vlan.getVlanGateway()).thenReturn(defaultNetworkGateway);
|
||||
Mockito.when(vlan.getVlanNetmask()).thenReturn(defaultNetworkNetmask);
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
|
||||
Mockito.when(ipAddressVO.getVlanId()).thenReturn(1L);
|
||||
Mockito.when(testOrchastrator._vlanDao.findById(1L)).thenReturn(vlan);
|
||||
Mockito.when(testOrchastrator._ipAddressDao.findByIp(ipAddress)).thenReturn(ipAddressVO);
|
||||
Pair<String, String> pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
|
||||
Assert.assertNotNull(pair);
|
||||
Assert.assertEquals(defaultNetworkGateway, pair.first());
|
||||
Assert.assertEquals(defaultNetworkNetmask, pair.second());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetGuestIpForNicImportL2Network() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
|
||||
Mockito.when(network.getGuestType()).thenReturn(GuestType.L2);
|
||||
Assert.assertNull(testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetGuestIpForNicImportAdvancedZone() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
|
||||
Mockito.when(network.getGuestType()).thenReturn(GuestType.Isolated);
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
|
||||
String ipAddress = "10.1.10.10";
|
||||
Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
|
||||
Mockito.when(testOrchastrator._ipAddrMgr.acquireGuestIpAddress(network, ipAddress)).thenReturn(ipAddress);
|
||||
String guestIp = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
|
||||
Assert.assertEquals(ipAddress, guestIp);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetGuestIpForNicImportBasicZoneAutomaticIP() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
|
||||
Mockito.when(network.getGuestType()).thenReturn(GuestType.Shared);
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
|
||||
long networkId = 1L;
|
||||
long dataCenterId = 1L;
|
||||
String freeIp = "172.10.10.10";
|
||||
IPAddressVO ipAddressVO = Mockito.mock(IPAddressVO.class);
|
||||
Ip ip = mock(Ip.class);
|
||||
Mockito.when(ip.addr()).thenReturn(freeIp);
|
||||
Mockito.when(ipAddressVO.getAddress()).thenReturn(ip);
|
||||
Mockito.when(ipAddressVO.getState()).thenReturn(State.Free);
|
||||
Mockito.when(network.getId()).thenReturn(networkId);
|
||||
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
|
||||
Mockito.when(testOrchastrator._ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(networkId, dataCenterId, State.Free)).thenReturn(ipAddressVO);
|
||||
String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
|
||||
Assert.assertEquals(freeIp, ipAddress);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetGuestIpForNicImportBasicZoneManualIP() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
|
||||
Mockito.when(network.getGuestType()).thenReturn(GuestType.Shared);
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
|
||||
long networkId = 1L;
|
||||
long dataCenterId = 1L;
|
||||
String requestedIp = "172.10.10.10";
|
||||
IPAddressVO ipAddressVO = Mockito.mock(IPAddressVO.class);
|
||||
Ip ip = mock(Ip.class);
|
||||
Mockito.when(ip.addr()).thenReturn(requestedIp);
|
||||
Mockito.when(ipAddressVO.getAddress()).thenReturn(ip);
|
||||
Mockito.when(ipAddressVO.getState()).thenReturn(State.Free);
|
||||
Mockito.when(network.getId()).thenReturn(networkId);
|
||||
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
|
||||
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
|
||||
Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
|
||||
String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
|
||||
Assert.assertEquals(requestedIp, ipAddress);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testGetGuestIpForNicImportBasicUsedIP() {
|
||||
Network network = Mockito.mock(Network.class);
|
||||
DataCenter dataCenter = Mockito.mock(DataCenter.class);
|
||||
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
|
||||
Mockito.when(network.getGuestType()).thenReturn(GuestType.Shared);
|
||||
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
|
||||
long networkId = 1L;
|
||||
long dataCenterId = 1L;
|
||||
String requestedIp = "172.10.10.10";
|
||||
IPAddressVO ipAddressVO = Mockito.mock(IPAddressVO.class);
|
||||
Ip ip = mock(Ip.class);
|
||||
Mockito.when(ip.addr()).thenReturn(requestedIp);
|
||||
Mockito.when(ipAddressVO.getAddress()).thenReturn(ip);
|
||||
Mockito.when(ipAddressVO.getState()).thenReturn(State.Allocated);
|
||||
Mockito.when(network.getId()).thenReturn(networkId);
|
||||
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
|
||||
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
|
||||
Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
|
||||
testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,11 +75,17 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapa
|
||||
sc.setParameters("hypervisorType", hypervisorType);
|
||||
sc.setParameters("hypervisorVersion", hypervisorVersion);
|
||||
HypervisorCapabilitiesVO result = findOneBy(sc);
|
||||
String parentVersion = CloudStackVersion.getVMwareParentVersion(hypervisorVersion);
|
||||
if (result != null || !HypervisorType.VMware.equals(hypervisorType) ||
|
||||
CloudStackVersion.getVMwareParentVersion(hypervisorVersion) == null) {
|
||||
parentVersion == null) {
|
||||
return result;
|
||||
}
|
||||
sc.setParameters("hypervisorVersion", CloudStackVersion.getVMwareParentVersion(hypervisorVersion));
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("Hypervisor capabilities for hypervisor: %s, version: %s can not be found. " +
|
||||
"Trying to find capabilities for the parent version: %s",
|
||||
hypervisorType, hypervisorVersion, parentVersion));
|
||||
}
|
||||
sc.setParameters("hypervisorVersion", parentVersion);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@ -103,4 +103,6 @@ public interface IPAddressDao extends GenericDao<IPAddressVO, Long> {
|
||||
List<IPAddressVO> listByNetworkId(long networkId);
|
||||
|
||||
void buildQuarantineSearchCriteria(SearchCriteria<IPAddressVO> sc);
|
||||
|
||||
IPAddressVO findBySourceNetworkIdAndDatacenterIdAndState(long sourceNetworkId, long dataCenterId, State state);
|
||||
}
|
||||
|
||||
@ -554,4 +554,13 @@ public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implemen
|
||||
|
||||
sc.setParametersIfNotNull("quarantinedPublicIpsIdsNIN", quarantinedIpsIdsAllowedToUser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IPAddressVO findBySourceNetworkIdAndDatacenterIdAndState(long sourceNetworkId, long dataCenterId, State state) {
|
||||
SearchCriteria<IPAddressVO> sc = AllFieldsSearch.create();
|
||||
sc.setParameters("sourcenetwork", sourceNetworkId);
|
||||
sc.setParameters("dataCenterId", dataCenterId);
|
||||
sc.setParameters("state", State.Free);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,9 +20,12 @@ import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.utils.CloudStackVersion;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.GuestOSHypervisorVO;
|
||||
import com.cloud.utils.db.Filter;
|
||||
import com.cloud.utils.db.QueryBuilder;
|
||||
@ -32,6 +35,7 @@ import com.cloud.utils.db.SearchCriteria;
|
||||
|
||||
@Component
|
||||
public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO, Long> implements GuestOSHypervisorDao {
|
||||
private static final Logger s_logger = Logger.getLogger(GuestOSHypervisorDaoImpl.class);
|
||||
|
||||
protected final SearchBuilder<GuestOSHypervisorVO> guestOsSearch;
|
||||
protected final SearchBuilder<GuestOSHypervisorVO> mappingSearch;
|
||||
@ -80,6 +84,29 @@ public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO
|
||||
hypervisorTypeAndVersionSearch.done();
|
||||
}
|
||||
|
||||
private GuestOSHypervisorVO getMappingForHypervisorVersionOrParentVersionIfNeeded(GuestOSHypervisorVO mapping,
|
||||
String hypervisorType, String hypervisorVersion, Long guestOsId, String guestOsName) {
|
||||
if (mapping != null || !Hypervisor.HypervisorType.VMware.toString().equals(hypervisorType)) {
|
||||
return mapping;
|
||||
}
|
||||
String guestOs = guestOsId != null ? String.format("guest OS ID: %d", guestOsId) : String.format("guest OS ID: %s", guestOsName);
|
||||
String parentVersion = CloudStackVersion.getVMwareParentVersion(hypervisorVersion);
|
||||
if (parentVersion == null) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.info(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. Parent version is also null",
|
||||
guestOs, hypervisorType, hypervisorVersion));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. " +
|
||||
"Trying to find one for the parent version: %s", guestOs, hypervisorType, hypervisorVersion, parentVersion));
|
||||
}
|
||||
return guestOsId != null ?
|
||||
findByOsIdAndHypervisorInternal(guestOsId, hypervisorType, parentVersion):
|
||||
findByOsNameAndHypervisorInternal(guestOsName, hypervisorType, parentVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<GuestOSHypervisorVO> listByGuestOsId(long guestOsId) {
|
||||
SearchCriteria<GuestOSHypervisorVO> sc = guestOsSearch.create();
|
||||
@ -87,8 +114,7 @@ public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsIdAndHypervisor(long guestOsId, String hypervisorType, String hypervisorVersion) {
|
||||
private GuestOSHypervisorVO findByOsIdAndHypervisorInternal(long guestOsId, String hypervisorType, String hypervisorVersion) {
|
||||
SearchCriteria<GuestOSHypervisorVO> sc = mappingSearch.create();
|
||||
String version = "default";
|
||||
if (!(hypervisorVersion == null || hypervisorVersion.isEmpty())) {
|
||||
@ -100,6 +126,12 @@ public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsIdAndHypervisor(long guestOsId, String hypervisorType, String hypervisorVersion) {
|
||||
GuestOSHypervisorVO mapping = findByOsIdAndHypervisorInternal(guestOsId, hypervisorType, hypervisorVersion);
|
||||
return getMappingForHypervisorVersionOrParentVersionIfNeeded(mapping, hypervisorType, hypervisorVersion, guestOsId, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsIdAndHypervisorAndUserDefined(long guestOsId, String hypervisorType, String hypervisorVersion, boolean isUserDefined) {
|
||||
SearchCriteria<GuestOSHypervisorVO> sc = userDefinedMappingSearch.create();
|
||||
@ -123,8 +155,7 @@ public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO
|
||||
return super.remove(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsNameAndHypervisor(String guestOsName, String hypervisorType, String hypervisorVersion) {
|
||||
private GuestOSHypervisorVO findByOsNameAndHypervisorInternal(String guestOsName, String hypervisorType, String hypervisorVersion) {
|
||||
SearchCriteria<GuestOSHypervisorVO> sc = guestOsNameSearch.create();
|
||||
String version = "default";
|
||||
if (!(hypervisorVersion == null || hypervisorVersion.isEmpty())) {
|
||||
@ -138,6 +169,12 @@ public class GuestOSHypervisorDaoImpl extends GenericDaoBase<GuestOSHypervisorVO
|
||||
return CollectionUtils.isNotEmpty(results) ? results.get(0) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsNameAndHypervisor(String guestOsName, String hypervisorType, String hypervisorVersion) {
|
||||
GuestOSHypervisorVO mapping = findByOsNameAndHypervisorInternal(guestOsName, hypervisorType, hypervisorVersion);
|
||||
return getMappingForHypervisorVersionOrParentVersionIfNeeded(mapping, hypervisorType, hypervisorVersion, null, guestOsName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GuestOSHypervisorVO findByOsNameAndHypervisorOrderByCreatedDesc(String guestOsName, String hypervisorType, String hypervisorVersion) {
|
||||
SearchCriteria<GuestOSHypervisorVO> sc = guestOsNameSearch.create();
|
||||
|
||||
@ -83,8 +83,9 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
||||
protected static final String SELECT_HYPERTYPE_FROM_ZONE_VOLUME = "SELECT s.hypervisor from volumes v, storage_pool s where v.pool_id = s.id and v.id = ?";
|
||||
protected static final String SELECT_POOLSCOPE = "SELECT s.scope from storage_pool s, volumes v where s.id = v.pool_id and v.id = ?";
|
||||
|
||||
private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
|
||||
+ " AND pool.pod_id = ? AND pool.cluster_id = ? " + " GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_PART1 = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? ";
|
||||
private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_PART2 = " GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
|
||||
private static final String ORDER_ZONE_WIDE_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
|
||||
+ " AND pool.scope = 'ZONE' AND pool.status='Up' " + " GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
|
||||
@ -612,14 +613,27 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
||||
public List<Long> listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId) {
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
List<Long> result = new ArrayList<>();
|
||||
StringBuilder sql = new StringBuilder(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_PART1);
|
||||
try {
|
||||
String sql = ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT;
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, dcId);
|
||||
pstmt.setLong(3, podId);
|
||||
pstmt.setLong(4, clusterId);
|
||||
List<Long> resourceIdList = new ArrayList<>();
|
||||
resourceIdList.add(accountId);
|
||||
resourceIdList.add(dcId);
|
||||
|
||||
if (podId != null) {
|
||||
sql.append(" AND pool.pod_id = ?");
|
||||
resourceIdList.add(podId);
|
||||
}
|
||||
if (clusterId != null) {
|
||||
sql.append(" AND pool.cluster_id = ?");
|
||||
resourceIdList.add(clusterId);
|
||||
}
|
||||
sql.append(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_PART2);
|
||||
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
for (int i = 0; i < resourceIdList.size(); i++) {
|
||||
pstmt.setLong(i + 1, resourceIdList.get(i));
|
||||
}
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
@ -627,9 +641,11 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
||||
}
|
||||
return result;
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e);
|
||||
s_logger.debug("DB Exception on: " + sql.toString(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e);
|
||||
s_logger.debug("Caught: " + sql.toString(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -356,3 +356,7 @@ CREATE TABLE `cloud_usage`.`bucket_statistics` (
|
||||
|
||||
-- Add remover account ID to quarantined IPs table.
|
||||
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.quarantined_ips', 'remover_account_id', 'bigint(20) unsigned DEFAULT NULL COMMENT "ID of the account that removed the IP from quarantine, foreign key to `account` table"');
|
||||
|
||||
-- Explicitly add support for VMware 8.0b (8.0.0.2), 8.0c (8.0.0.3)
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.0.2', 1024, 0, 59, 64, 1, 1);
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.0.3', 1024, 0, 59, 64, 1, 1);
|
||||
|
||||
@ -0,0 +1,105 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.storage.dao;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.anyInt;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.startsWith;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VolumeDaoImplTest {
|
||||
@Mock
|
||||
private PreparedStatement preparedStatementMock;
|
||||
|
||||
@Mock
|
||||
private TransactionLegacy transactionMock;
|
||||
|
||||
private static MockedStatic<TransactionLegacy> mockedTransactionLegacy;
|
||||
|
||||
private final VolumeDaoImpl volumeDao = new VolumeDaoImpl();
|
||||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
mockedTransactionLegacy = Mockito.mockStatic(TransactionLegacy.class);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void close() {
|
||||
mockedTransactionLegacy.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListPoolIdsByVolumeCount_with_cluster_details() throws SQLException {
|
||||
final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITH_CLUSTER =
|
||||
"SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? AND pool.pod_id = ? AND pool.cluster_id = ? GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
final long dcId = 1, accountId = 1;
|
||||
final Long podId = 1L, clusterId = 1L;
|
||||
|
||||
when(TransactionLegacy.currentTxn()).thenReturn(transactionMock);
|
||||
when(transactionMock.prepareAutoCloseStatement(startsWith(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITH_CLUSTER))).thenReturn(preparedStatementMock);
|
||||
ResultSet rs = Mockito.mock(ResultSet.class);
|
||||
when(preparedStatementMock.executeQuery()).thenReturn(rs, rs);
|
||||
|
||||
volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, accountId);
|
||||
|
||||
verify(transactionMock, times(1)).prepareAutoCloseStatement(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITH_CLUSTER);
|
||||
verify(preparedStatementMock, times(1)).setLong(1, accountId);
|
||||
verify(preparedStatementMock, times(1)).setLong(2, dcId);
|
||||
verify(preparedStatementMock, times(1)).setLong(3, podId);
|
||||
verify(preparedStatementMock, times(1)).setLong(4, clusterId);
|
||||
verify(preparedStatementMock, times(4)).setLong(anyInt(), anyLong());
|
||||
verify(preparedStatementMock, times(1)).executeQuery();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListPoolIdsByVolumeCount_without_cluster_details() throws SQLException {
|
||||
final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITHOUT_CLUSTER =
|
||||
"SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
final long dcId = 1, accountId = 1;
|
||||
|
||||
when(TransactionLegacy.currentTxn()).thenReturn(transactionMock);
|
||||
when(transactionMock.prepareAutoCloseStatement(startsWith(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITHOUT_CLUSTER))).thenReturn(preparedStatementMock);
|
||||
ResultSet rs = Mockito.mock(ResultSet.class);
|
||||
when(preparedStatementMock.executeQuery()).thenReturn(rs, rs);
|
||||
|
||||
volumeDao.listPoolIdsByVolumeCount(dcId, null, null, accountId);
|
||||
|
||||
verify(transactionMock, times(1)).prepareAutoCloseStatement(ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT_QUERY_WITHOUT_CLUSTER);
|
||||
verify(preparedStatementMock, times(1)).setLong(1, accountId);
|
||||
verify(preparedStatementMock, times(1)).setLong(2, dcId);
|
||||
verify(preparedStatementMock, times(2)).setLong(anyInt(), anyLong());
|
||||
verify(preparedStatementMock, times(1)).executeQuery();
|
||||
}
|
||||
}
|
||||
@ -17,13 +17,13 @@
|
||||
package org.apache.cloudstack.storage.allocator;
|
||||
|
||||
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
@ -34,10 +34,14 @@ import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class AbstractStoragePoolAllocatorTest {
|
||||
@ -51,6 +55,9 @@ public class AbstractStoragePoolAllocatorTest {
|
||||
Account account;
|
||||
private List<StoragePool> pools;
|
||||
|
||||
@Mock
|
||||
VolumeDao volumeDao;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
pools = new ArrayList<>();
|
||||
@ -83,6 +90,29 @@ public class AbstractStoragePoolAllocatorTest {
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_userdispersing_reorder_check() {
|
||||
allocator.allocationAlgorithm = "userdispersing";
|
||||
allocator.volumeDao = volumeDao;
|
||||
|
||||
when(plan.getDataCenterId()).thenReturn(1l);
|
||||
when(plan.getPodId()).thenReturn(1l);
|
||||
when(plan.getClusterId()).thenReturn(1l);
|
||||
when(account.getAccountId()).thenReturn(1l);
|
||||
List<Long> poolIds = new ArrayList<>();
|
||||
poolIds.add(1l);
|
||||
poolIds.add(9l);
|
||||
when(volumeDao.listPoolIdsByVolumeCount(1l,1l,1l,1l)).thenReturn(poolIds);
|
||||
|
||||
List<StoragePool> reorderedPools = allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
Assert.assertEquals(poolIds.size(),reorderedPools.size());
|
||||
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
|
||||
Mockito.verify(allocator, Mockito.times(1)).reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
|
||||
Mockito.verify(volumeDao, Mockito.times(1)).listPoolIdsByVolumeCount(1l,1l,1l,1l);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void reorderStoragePoolsBasedOnAlgorithm_firstfitleastconsumed() {
|
||||
allocator.allocationAlgorithm = "firstfitleastconsumed";
|
||||
|
||||
@ -101,9 +101,13 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet {
|
||||
log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName));
|
||||
ApplicationContext context = getApplicationContext(moduleDefinitionName);
|
||||
try {
|
||||
Runnable runnable = context.getBean("moduleStartup", Runnable.class);
|
||||
log.info(String.format("Starting module [%s].", moduleDefinitionName));
|
||||
runnable.run();
|
||||
if (context.containsBean("moduleStartup")) {
|
||||
Runnable runnable = context.getBean("moduleStartup", Runnable.class);
|
||||
log.info(String.format("Starting module [%s].", moduleDefinitionName));
|
||||
runnable.run();
|
||||
} else {
|
||||
log.debug(String.format("Could not get module [%s] context bean.", moduleDefinitionName));
|
||||
}
|
||||
} catch (BeansException e) {
|
||||
log.warn(String.format("Failed to start module [%s] due to: [%s].", moduleDefinitionName, e.getMessage()));
|
||||
if (log.isDebugEnabled()) {
|
||||
@ -126,6 +130,10 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet {
|
||||
public void with(ModuleDefinition def, Stack<ModuleDefinition> parents) {
|
||||
try {
|
||||
String moduleDefinitionName = def.getName();
|
||||
if (parents.isEmpty()) {
|
||||
log.debug(String.format("Could not find module [%s] context as they have no parents.", moduleDefinitionName));
|
||||
return;
|
||||
}
|
||||
log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName));
|
||||
ApplicationContext parent = getApplicationContext(parents.peek().getName());
|
||||
log.debug(String.format("Trying to load module [%s] context.", moduleDefinitionName));
|
||||
|
||||
@ -68,7 +68,7 @@ public class BalancedTest {
|
||||
|
||||
List<Long> cpuList, memoryList;
|
||||
|
||||
Map<Long, Long> hostCpuUsedMap, hostMemoryUsedMap;
|
||||
Map<Long, Long> hostCpuFreeMap, hostMemoryFreeMap;
|
||||
|
||||
|
||||
@Mock
|
||||
@ -105,13 +105,13 @@ public class BalancedTest {
|
||||
cpuList = Arrays.asList(1L, 2L);
|
||||
memoryList = Arrays.asList(512L, 2048L);
|
||||
|
||||
hostCpuUsedMap = new HashMap<>();
|
||||
hostCpuUsedMap.put(1L, 1000L);
|
||||
hostCpuUsedMap.put(2L, 2000L);
|
||||
hostCpuFreeMap = new HashMap<>();
|
||||
hostCpuFreeMap.put(1L, 2000L);
|
||||
hostCpuFreeMap.put(2L, 1000L);
|
||||
|
||||
hostMemoryUsedMap = new HashMap<>();
|
||||
hostMemoryUsedMap.put(1L, 512L * 1024L * 1024L);
|
||||
hostMemoryUsedMap.put(2L, 2048L * 1024L * 1024L);
|
||||
hostMemoryFreeMap = new HashMap<>();
|
||||
hostMemoryFreeMap.put(1L, 2048L * 1024L * 1024L);
|
||||
hostMemoryFreeMap.put(2L, 512L * 1024L * 1024L);
|
||||
}
|
||||
|
||||
private void overrideDefaultConfigValue(final ConfigKey configKey, final String name,
|
||||
@ -191,7 +191,7 @@ public class BalancedTest {
|
||||
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
|
||||
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(0.0, result.first(), 0.01);
|
||||
assertEquals(0.0, result.second(), 0.0);
|
||||
assertEquals(1.0, result.third(), 0.0);
|
||||
@ -205,7 +205,7 @@ public class BalancedTest {
|
||||
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
|
||||
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(0.4, result.first(), 0.01);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
@ -219,7 +219,7 @@ public class BalancedTest {
|
||||
public void getMetricsWithDefault() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "both");
|
||||
Ternary<Double, Double, Double> result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(0.4, result.first(), 0.01);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
|
||||
@ -66,7 +66,7 @@ public class CondensedTest {
|
||||
|
||||
List<Long> cpuList, memoryList;
|
||||
|
||||
Map<Long, Long> hostCpuUsedMap, hostMemoryUsedMap;
|
||||
Map<Long, Long> hostCpuFreeMap, hostMemoryFreeMap;
|
||||
|
||||
|
||||
private AutoCloseable closeable;
|
||||
@ -98,13 +98,13 @@ public class CondensedTest {
|
||||
cpuList = Arrays.asList(1L, 2L);
|
||||
memoryList = Arrays.asList(512L, 2048L);
|
||||
|
||||
hostCpuUsedMap = new HashMap<>();
|
||||
hostCpuUsedMap.put(1L, 1000L);
|
||||
hostCpuUsedMap.put(2L, 2000L);
|
||||
hostCpuFreeMap = new HashMap<>();
|
||||
hostCpuFreeMap.put(1L, 2000L);
|
||||
hostCpuFreeMap.put(2L, 1000L);
|
||||
|
||||
hostMemoryUsedMap = new HashMap<>();
|
||||
hostMemoryUsedMap.put(1L, 512L * 1024L * 1024L);
|
||||
hostMemoryUsedMap.put(2L, 2048L * 1024L * 1024L);
|
||||
hostMemoryFreeMap = new HashMap<>();
|
||||
hostMemoryFreeMap.put(1L, 2048L * 1024L * 1024L);
|
||||
hostMemoryFreeMap.put(2L, 512L * 1024L * 1024L);
|
||||
}
|
||||
|
||||
private void overrideDefaultConfigValue(final ConfigKey configKey,
|
||||
@ -185,7 +185,7 @@ public class CondensedTest {
|
||||
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
|
||||
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(0.0, result.first(), 0.0);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
@ -199,7 +199,7 @@ public class CondensedTest {
|
||||
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
|
||||
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(-0.4, result.first(), 0.01);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
@ -213,7 +213,7 @@ public class CondensedTest {
|
||||
public void getMetricsWithDefault() throws NoSuchFieldException, IllegalAccessException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "both");
|
||||
Ternary<Double, Double, Double> result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost,
|
||||
hostCpuUsedMap, hostMemoryUsedMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
assertEquals(-0.4, result.first(), 0.0001);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
|
||||
@ -27,25 +27,25 @@ import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.DomainBlockInfo;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ResourceWrapper(handles=GetUnmanagedInstancesCommand.class)
|
||||
public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWrapper<GetUnmanagedInstancesCommand, GetUnmanagedInstancesAnswer, LibvirtComputingResource> {
|
||||
private static final Logger LOGGER = Logger.getLogger(LibvirtGetUnmanagedInstancesCommandWrapper.class);
|
||||
|
||||
private static final int requiredVncPasswordLength = 22;
|
||||
|
||||
@Override
|
||||
public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, LibvirtComputingResource libvirtComputingResource) {
|
||||
LOGGER.info("Fetching unmanaged instance on host");
|
||||
@ -132,8 +132,8 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
|
||||
instance.setPowerState(getPowerState(libvirtComputingResource.getVmState(conn,domain.getName())));
|
||||
instance.setMemory((int) LibvirtComputingResource.getDomainMemory(domain) / 1024);
|
||||
instance.setNics(getUnmanagedInstanceNics(parser.getInterfaces()));
|
||||
instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource));
|
||||
instance.setVncPassword(parser.getVncPasswd() + "aaaaaaaaaaaaaa"); // Suffix back extra characters for DB compatibility
|
||||
instance.setDisks(getUnmanagedInstanceDisks(parser.getDisks(),libvirtComputingResource, conn, domain.getName()));
|
||||
instance.setVncPassword(getFormattedVncPassword(parser.getVncPasswd()));
|
||||
|
||||
return instance;
|
||||
} catch (Exception e) {
|
||||
@ -142,6 +142,14 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
|
||||
}
|
||||
}
|
||||
|
||||
protected String getFormattedVncPassword(String vncPasswd) {
|
||||
if (StringUtils.isBlank(vncPasswd)) {
|
||||
return null;
|
||||
}
|
||||
String randomChars = RandomStringUtils.random(requiredVncPasswordLength - vncPasswd.length(), true, false);
|
||||
return String.format("%s%s", vncPasswd, randomChars);
|
||||
}
|
||||
|
||||
private UnmanagedInstanceTO.PowerState getPowerState(VirtualMachine.PowerState vmPowerState) {
|
||||
switch (vmPowerState) {
|
||||
case PowerOn:
|
||||
@ -170,7 +178,7 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
|
||||
return nics;
|
||||
}
|
||||
|
||||
private List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<LibvirtVMDef.DiskDef> disksInfo, LibvirtComputingResource libvirtComputingResource){
|
||||
private List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<LibvirtVMDef.DiskDef> disksInfo, LibvirtComputingResource libvirtComputingResource, Connect conn, String domainName) {
|
||||
final ArrayList<UnmanagedInstanceTO.Disk> disks = new ArrayList<>(disksInfo.size());
|
||||
int counter = 0;
|
||||
for (LibvirtVMDef.DiskDef diskDef : disksInfo) {
|
||||
@ -180,14 +188,11 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
|
||||
|
||||
final UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk();
|
||||
Long size = null;
|
||||
String imagePath = null;
|
||||
try {
|
||||
QemuImgFile file = new QemuImgFile(diskDef.getSourcePath());
|
||||
QemuImg qemu = new QemuImg(0);
|
||||
Map<String, String> info = qemu.info(file);
|
||||
size = Long.parseLong(info.getOrDefault("virtual_size", "0"));
|
||||
imagePath = info.getOrDefault("image", null);
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
Domain dm = conn.domainLookupByName(domainName);
|
||||
DomainBlockInfo blockInfo = dm.blockInfo(diskDef.getDiskLabel());
|
||||
size = blockInfo.getCapacity();
|
||||
} catch (LibvirtException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
@ -197,25 +202,44 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
|
||||
disk.setLabel(diskDef.getDiskLabel());
|
||||
disk.setController(diskDef.getBusType().toString());
|
||||
|
||||
|
||||
Pair<String, String> sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath());
|
||||
if (sourceHostPath != null) {
|
||||
disk.setDatastoreHost(sourceHostPath.first());
|
||||
disk.setDatastorePath(sourceHostPath.second());
|
||||
} else {
|
||||
disk.setDatastorePath(diskDef.getSourcePath());
|
||||
int pathEnd = diskDef.getSourcePath().lastIndexOf("/");
|
||||
if (pathEnd >= 0) {
|
||||
disk.setDatastorePath(diskDef.getSourcePath().substring(0, pathEnd));
|
||||
} else {
|
||||
disk.setDatastorePath(diskDef.getSourcePath());
|
||||
}
|
||||
disk.setDatastoreHost(diskDef.getSourceHost());
|
||||
}
|
||||
|
||||
disk.setDatastoreType(diskDef.getDiskType().toString());
|
||||
disk.setDatastorePort(diskDef.getSourceHostPort());
|
||||
disk.setImagePath(imagePath);
|
||||
disk.setDatastoreName(imagePath.substring(imagePath.lastIndexOf("/")));
|
||||
disk.setImagePath(diskDef.getSourcePath());
|
||||
disk.setDatastoreName(disk.getDatastorePath());
|
||||
disk.setFileBaseName(getDiskRelativePath(diskDef));
|
||||
disks.add(disk);
|
||||
}
|
||||
return disks;
|
||||
}
|
||||
|
||||
protected String getDiskRelativePath(LibvirtVMDef.DiskDef diskDef) {
|
||||
if (diskDef == null || diskDef.getDiskType() == null || diskDef.getDiskType() == LibvirtVMDef.DiskDef.DiskType.BLOCK) {
|
||||
return null;
|
||||
}
|
||||
String sourcePath = diskDef.getSourcePath();
|
||||
if (StringUtils.isBlank(sourcePath)) {
|
||||
return null;
|
||||
}
|
||||
if (!sourcePath.contains("/")) {
|
||||
return sourcePath;
|
||||
}
|
||||
return sourcePath.substring(sourcePath.lastIndexOf("/") + 1);
|
||||
}
|
||||
|
||||
private Pair<String, String> getSourceHostPath(LibvirtComputingResource libvirtComputingResource, String diskPath) {
|
||||
int pathEnd = diskPath.lastIndexOf("/");
|
||||
if (pathEnd >= 0) {
|
||||
|
||||
@ -0,0 +1,73 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class LibvirtGetUnmanagedInstancesCommandWrapperTest {
|
||||
|
||||
@Spy
|
||||
private LibvirtGetUnmanagedInstancesCommandWrapper wrapper = new LibvirtGetUnmanagedInstancesCommandWrapper();
|
||||
|
||||
@Test
|
||||
public void testGetDiskRelativePathNullDisk() {
|
||||
Assert.assertNull(wrapper.getDiskRelativePath(null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDiskRelativePathBlockType() {
|
||||
LibvirtVMDef.DiskDef diskDef = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.when(diskDef.getDiskType()).thenReturn(LibvirtVMDef.DiskDef.DiskType.BLOCK);
|
||||
Assert.assertNull(wrapper.getDiskRelativePath(diskDef));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDiskRelativePathNullPath() {
|
||||
LibvirtVMDef.DiskDef diskDef = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.when(diskDef.getDiskType()).thenReturn(LibvirtVMDef.DiskDef.DiskType.FILE);
|
||||
Mockito.when(diskDef.getSourcePath()).thenReturn(null);
|
||||
Assert.assertNull(wrapper.getDiskRelativePath(diskDef));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDiskRelativePathWithoutSlashes() {
|
||||
LibvirtVMDef.DiskDef diskDef = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.when(diskDef.getDiskType()).thenReturn(LibvirtVMDef.DiskDef.DiskType.FILE);
|
||||
String imagePath = UUID.randomUUID().toString();
|
||||
Mockito.when(diskDef.getSourcePath()).thenReturn(imagePath);
|
||||
Assert.assertEquals(imagePath, wrapper.getDiskRelativePath(diskDef));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDiskRelativePathFullPath() {
|
||||
LibvirtVMDef.DiskDef diskDef = Mockito.mock(LibvirtVMDef.DiskDef.class);
|
||||
Mockito.when(diskDef.getDiskType()).thenReturn(LibvirtVMDef.DiskDef.DiskType.FILE);
|
||||
String relativePath = "ea4b2296-d349-4968-ab72-c8eb523b556e";
|
||||
String imagePath = String.format("/mnt/97e4c9ed-e3bc-3e26-b103-7967fc9feae1/%s", relativePath);
|
||||
Mockito.when(diskDef.getSourcePath()).thenReturn(imagePath);
|
||||
Assert.assertEquals(relativePath, wrapper.getDiskRelativePath(diskDef));
|
||||
}
|
||||
}
|
||||
@ -131,9 +131,9 @@ import com.cloud.network.dao.PhysicalNetworkDao;
|
||||
import com.cloud.network.router.NetworkHelper;
|
||||
import com.cloud.network.rules.FirewallRule;
|
||||
import com.cloud.network.rules.FirewallRuleVO;
|
||||
import com.cloud.network.security.SecurityGroup;
|
||||
import com.cloud.network.security.SecurityGroupManager;
|
||||
import com.cloud.network.security.SecurityGroupService;
|
||||
import com.cloud.network.security.SecurityGroupVO;
|
||||
import com.cloud.network.security.SecurityRule;
|
||||
import com.cloud.network.vpc.NetworkACL;
|
||||
import com.cloud.offering.NetworkOffering;
|
||||
@ -1218,22 +1218,9 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName()));
|
||||
}
|
||||
|
||||
SecurityGroupVO securityGroupVO = null;
|
||||
SecurityGroup securityGroup = null;
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
securityGroupVO = securityGroupManager.createSecurityGroup(KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME.concat(Long.toHexString(System.currentTimeMillis())), "Security group for CKS nodes", owner.getDomainId(), owner.getId(), owner.getAccountName());
|
||||
if (securityGroupVO == null) {
|
||||
throw new CloudRuntimeException(String.format("Failed to create security group: %s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME));
|
||||
}
|
||||
List<String> cidrList = new ArrayList<>();
|
||||
cidrList.add(NetUtils.ALL_IP4_CIDRS);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroupVO.getId(), NetUtils.TCP_PROTO,
|
||||
KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_SSH_PORT_SG, KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_SSH_PORT_SG,
|
||||
null, null, cidrList, null, SecurityRule.SecurityRuleType.IngressRule);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroupVO.getId(), NetUtils.TCP_PROTO,
|
||||
KubernetesClusterActionWorker.CLUSTER_API_PORT, KubernetesClusterActionWorker.CLUSTER_API_PORT,
|
||||
null, null, cidrList, null, SecurityRule.SecurityRuleType.IngressRule);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroupVO.getId(), NetUtils.ALL_PROTO,
|
||||
null, null, null, null, cidrList, null, SecurityRule.SecurityRuleType.EgressRule);
|
||||
securityGroup = getOrCreateSecurityGroupForAccount(owner);
|
||||
}
|
||||
|
||||
final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId());
|
||||
@ -1241,7 +1228,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize);
|
||||
final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize);
|
||||
|
||||
SecurityGroupVO finalSecurityGroupVO = securityGroupVO;
|
||||
final SecurityGroup finalSecurityGroup = securityGroup;
|
||||
final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
|
||||
@Override
|
||||
public KubernetesClusterVO doInTransaction(TransactionStatus status) {
|
||||
@ -1250,7 +1237,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
owner.getAccountId(), controlNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory,
|
||||
cmd.getNodeRootDiskSize(), "", KubernetesCluster.ClusterType.CloudManaged);
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
newCluster.setSecurityGroupId(finalSecurityGroupVO.getId());
|
||||
newCluster.setSecurityGroupId(finalSecurityGroup.getId());
|
||||
}
|
||||
kubernetesClusterDao.persist(newCluster);
|
||||
return newCluster;
|
||||
@ -1265,6 +1252,29 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
||||
return cluster;
|
||||
}
|
||||
|
||||
private SecurityGroup getOrCreateSecurityGroupForAccount(Account owner) {
|
||||
String securityGroupName = String.format("%s-%s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME, owner.getUuid());
|
||||
String securityGroupDesc = String.format("%s and account %s", KubernetesClusterActionWorker.CKS_SECURITY_GROUP_DESCRIPTION, owner.getName());
|
||||
SecurityGroup securityGroup = securityGroupManager.getSecurityGroup(securityGroupName, owner.getId());
|
||||
if (securityGroup == null) {
|
||||
securityGroup = securityGroupManager.createSecurityGroup(securityGroupName, securityGroupDesc, owner.getDomainId(), owner.getId(), owner.getAccountName());
|
||||
if (securityGroup == null) {
|
||||
throw new CloudRuntimeException(String.format("Failed to create security group: %s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME));
|
||||
}
|
||||
List<String> cidrList = new ArrayList<>();
|
||||
cidrList.add(NetUtils.ALL_IP4_CIDRS);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroup.getId(), NetUtils.TCP_PROTO,
|
||||
KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_SSH_PORT_SG, KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_SSH_PORT_SG,
|
||||
null, null, cidrList, null, SecurityRule.SecurityRuleType.IngressRule);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroup.getId(), NetUtils.TCP_PROTO,
|
||||
KubernetesClusterActionWorker.CLUSTER_API_PORT, KubernetesClusterActionWorker.CLUSTER_API_PORT,
|
||||
null, null, cidrList, null, SecurityRule.SecurityRuleType.IngressRule);
|
||||
securityGroupService.authorizeSecurityGroupRule(securityGroup.getId(), NetUtils.ALL_PROTO,
|
||||
null, null, null, null, cidrList, null, SecurityRule.SecurityRuleType.EgressRule);
|
||||
}
|
||||
return securityGroup;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start operation can be performed at two different life stages of Kubernetes cluster. First when a freshly created cluster
|
||||
* in which case there are no resources provisioned for the Kubernetes cluster. So during start all the resources
|
||||
|
||||
@ -106,6 +106,7 @@ public class KubernetesClusterActionWorker {
|
||||
public static final int CLUSTER_NODES_DEFAULT_SSH_PORT_SG = DEFAULT_SSH_PORT;
|
||||
|
||||
public static final String CKS_CLUSTER_SECURITY_GROUP_NAME = "CKSSecurityGroup";
|
||||
public static final String CKS_SECURITY_GROUP_DESCRIPTION = "Security group for CKS nodes";
|
||||
|
||||
protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterActionWorker.class);
|
||||
|
||||
|
||||
@ -18,16 +18,38 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.object.BaseObjectStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
import org.apache.cloudstack.storage.object.BucketObject;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.amazonaws.services.s3.model.AccessControlList;
|
||||
import com.amazonaws.services.s3.model.BucketPolicy;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
import com.cloud.storage.BucketVO;
|
||||
import com.cloud.storage.dao.BucketDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import io.minio.BucketExistsArgs;
|
||||
import io.minio.DeleteBucketEncryptionArgs;
|
||||
import io.minio.MakeBucketArgs;
|
||||
@ -42,26 +64,10 @@ import io.minio.admin.UserInfo;
|
||||
import io.minio.admin.messages.DataUsageInfo;
|
||||
import io.minio.messages.SseConfiguration;
|
||||
import io.minio.messages.VersioningConfiguration;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.object.BaseObjectStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.object.BucketObject;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.inject.Inject;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
private static final Logger s_logger = Logger.getLogger(MinIOObjectStoreDriverImpl.class);
|
||||
protected static final String ACS_PREFIX = "acs";
|
||||
|
||||
@Inject
|
||||
AccountDao _accountDao;
|
||||
@ -81,14 +87,18 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
private static final String ACCESS_KEY = "accesskey";
|
||||
private static final String SECRET_KEY = "secretkey";
|
||||
|
||||
private static final String MINIO_ACCESS_KEY = "minio-accesskey";
|
||||
private static final String MINIO_SECRET_KEY = "minio-secretkey";
|
||||
protected static final String MINIO_ACCESS_KEY = "minio-accesskey";
|
||||
protected static final String MINIO_SECRET_KEY = "minio-secretkey";
|
||||
|
||||
@Override
|
||||
public DataStoreTO getStoreTO(DataStore store) {
|
||||
return null;
|
||||
}
|
||||
|
||||
protected String getUserOrAccessKeyForAccount(Account account) {
|
||||
return String.format("%s-%s", ACS_PREFIX, account.getUuid());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bucket createBucket(Bucket bucket, boolean objectLock) {
|
||||
//ToDo Client pool mgmt
|
||||
@ -135,8 +145,8 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
" \"Version\": \"2012-10-17\"\n" +
|
||||
" }";
|
||||
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
|
||||
String policyName = "acs-"+account.getAccountName()+"-policy";
|
||||
String userName = "acs-"+account.getAccountName();
|
||||
String policyName = getUserOrAccessKeyForAccount(account) + "-policy";
|
||||
String userName = getUserOrAccessKeyForAccount(account);
|
||||
try {
|
||||
minioAdminClient.addCannedPolicy(policyName, policy);
|
||||
minioAdminClient.setPolicy(userName, false, policyName);
|
||||
@ -250,22 +260,53 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
|
||||
}
|
||||
|
||||
protected void updateAccountCredentials(final long accountId, final String accessKey, final String secretKey, final boolean checkIfNotPresent) {
|
||||
Map<String, String> details = _accountDetailsDao.findDetails(accountId);
|
||||
boolean updateNeeded = false;
|
||||
if (!checkIfNotPresent || StringUtils.isBlank(details.get(MINIO_ACCESS_KEY))) {
|
||||
details.put(MINIO_ACCESS_KEY, accessKey);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (StringUtils.isAllBlank(secretKey, details.get(MINIO_SECRET_KEY))) {
|
||||
s_logger.error(String.format("Failed to retrieve secret key for MinIO user: %s from store and account details", accessKey));
|
||||
}
|
||||
if (StringUtils.isNotBlank(secretKey) && (!checkIfNotPresent || StringUtils.isBlank(details.get(MINIO_SECRET_KEY)))) {
|
||||
details.put(MINIO_SECRET_KEY, secretKey);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (!updateNeeded) {
|
||||
return;
|
||||
}
|
||||
_accountDetailsDao.persist(accountId, details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean createUser(long accountId, long storeId) {
|
||||
Account account = _accountDao.findById(accountId);
|
||||
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
|
||||
String accessKey = "acs-"+account.getAccountName();
|
||||
String accessKey = getUserOrAccessKeyForAccount(account);
|
||||
// Check user exists
|
||||
try {
|
||||
UserInfo userInfo = minioAdminClient.getUserInfo(accessKey);
|
||||
if(userInfo != null) {
|
||||
s_logger.debug("User already exists in MinIO store: "+accessKey);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("Skipping user creation as the user already exists in MinIO store: %s", accessKey));
|
||||
}
|
||||
updateAccountCredentials(accountId, accessKey, userInfo.secretKey(), true);
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("User does not exist. Creating user: "+accessKey);
|
||||
} catch (NoSuchAlgorithmException | IOException | InvalidKeyException e) {
|
||||
s_logger.error(String.format("Error encountered while retrieving user: %s for existing MinIO store user check", accessKey), e);
|
||||
return false;
|
||||
} catch (RuntimeException e) { // MinIO lib may throw RuntimeException with code: XMinioAdminNoSuchUser
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("Ignoring error encountered while retrieving user: %s for existing MinIO store user check", accessKey));
|
||||
}
|
||||
s_logger.trace("Exception during MinIO user check", e);
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("MinIO store user does not exist. Creating user: %s", accessKey));
|
||||
}
|
||||
|
||||
KeyGenerator generator = null;
|
||||
try {
|
||||
generator = KeyGenerator.getInstance("HmacSHA1");
|
||||
@ -280,10 +321,7 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
// Store user credentials
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(MINIO_ACCESS_KEY, accessKey);
|
||||
details.put(MINIO_SECRET_KEY, secretKey);
|
||||
_accountDetailsDao.persist(accountId, details);
|
||||
updateAccountCredentials(accountId, accessKey, secretKey, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -16,16 +16,23 @@
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import com.cloud.storage.BucketVO;
|
||||
import com.cloud.storage.dao.BucketDao;
|
||||
import com.cloud.user.AccountDetailVO;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.AccountVO;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import io.minio.BucketExistsArgs;
|
||||
import io.minio.MinioClient;
|
||||
import io.minio.RemoveBucketArgs;
|
||||
import io.minio.admin.MinioAdminClient;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.anyLong;
|
||||
import static org.mockito.Mockito.anyString;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
@ -34,22 +41,24 @@ import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import com.cloud.storage.BucketVO;
|
||||
import com.cloud.storage.dao.BucketDao;
|
||||
import com.cloud.user.AccountDetailVO;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.AccountVO;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.anyLong;
|
||||
import static org.mockito.Mockito.anyString;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
import io.minio.BucketExistsArgs;
|
||||
import io.minio.MinioClient;
|
||||
import io.minio.RemoveBucketArgs;
|
||||
import io.minio.admin.MinioAdminClient;
|
||||
import io.minio.admin.UserInfo;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class MinIOObjectStoreDriverImplTest {
|
||||
@ -97,7 +106,7 @@ public class MinIOObjectStoreDriverImplTest {
|
||||
doReturn(minioClient).when(minioObjectStoreDriverImpl).getMinIOClient(anyLong());
|
||||
doReturn(minioAdminClient).when(minioObjectStoreDriverImpl).getMinIOAdminClient(anyLong());
|
||||
when(bucketDao.listByObjectStoreIdAndAccountId(anyLong(), anyLong())).thenReturn(new ArrayList<BucketVO>());
|
||||
when(account.getAccountName()).thenReturn("admin");
|
||||
when(account.getUuid()).thenReturn(UUID.randomUUID().toString());
|
||||
when(accountDao.findById(anyLong())).thenReturn(account);
|
||||
when(accountDetailsDao.findDetail(anyLong(),anyString())).
|
||||
thenReturn(new AccountDetailVO(1L, "abc","def"));
|
||||
@ -119,4 +128,27 @@ public class MinIOObjectStoreDriverImplTest {
|
||||
verify(minioClient, times(1)).bucketExists(any());
|
||||
verify(minioClient, times(1)).removeBucket(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateUserExisting() throws Exception {
|
||||
String uuid = "uuid";
|
||||
String accessKey = MinIOObjectStoreDriverImpl.ACS_PREFIX + "-" + uuid;
|
||||
String secretKey = "secret";
|
||||
|
||||
doReturn(minioAdminClient).when(minioObjectStoreDriverImpl).getMinIOAdminClient(anyLong());
|
||||
when(accountDao.findById(anyLong())).thenReturn(account);
|
||||
when(account.getUuid()).thenReturn(uuid);
|
||||
UserInfo info = mock(UserInfo.class);
|
||||
when(info.secretKey()).thenReturn(secretKey);
|
||||
when(minioAdminClient.getUserInfo(accessKey)).thenReturn(info);
|
||||
final Map<String, String> persistedMap = new HashMap<>();
|
||||
Mockito.doAnswer((Answer<Void>) invocation -> {
|
||||
persistedMap.putAll((Map<String, String>)invocation.getArguments()[1]);
|
||||
return null;
|
||||
}).when(accountDetailsDao).persist(Mockito.anyLong(), Mockito.anyMap());
|
||||
boolean result = minioObjectStoreDriverImpl.createUser(1L, 1L);
|
||||
assertTrue(result);
|
||||
assertEquals(accessKey, persistedMap.get(MinIOObjectStoreDriverImpl.MINIO_ACCESS_KEY));
|
||||
assertEquals(secretKey, persistedMap.get(MinIOObjectStoreDriverImpl.MINIO_SECRET_KEY));
|
||||
}
|
||||
}
|
||||
|
||||
@ -63,7 +63,6 @@ import com.cloud.agent.api.SecurityGroupRulesCmd;
|
||||
import com.cloud.agent.api.SecurityGroupRulesCmd.IpPortAndProto;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.agent.manager.Commands;
|
||||
import com.cloud.api.query.dao.SecurityGroupJoinDao;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
import com.cloud.event.ActionEvent;
|
||||
@ -131,8 +130,6 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro
|
||||
@Inject
|
||||
SecurityGroupDao _securityGroupDao;
|
||||
@Inject
|
||||
SecurityGroupJoinDao _securityGroupJoinDao;
|
||||
@Inject
|
||||
SecurityGroupRuleDao _securityGroupRuleDao;
|
||||
@Inject
|
||||
SecurityGroupVMMapDao _securityGroupVMMapDao;
|
||||
@ -1405,7 +1402,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro
|
||||
}
|
||||
|
||||
@Override
|
||||
public SecurityGroupVO getDefaultSecurityGroup(long accountId) {
|
||||
public SecurityGroup getDefaultSecurityGroup(long accountId) {
|
||||
return _securityGroupDao.findByAccountAndName(accountId, DEFAULT_GROUP_NAME);
|
||||
}
|
||||
|
||||
|
||||
@ -647,13 +647,12 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
@Override
|
||||
protected void runInContext() {
|
||||
try {
|
||||
LOGGER.debug("HostStatsCollector is running...");
|
||||
|
||||
SearchCriteria<HostVO> sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance();
|
||||
|
||||
Map<Object, Object> metrics = new HashMap<>();
|
||||
List<HostVO> hosts = _hostDao.search(sc, null);
|
||||
|
||||
LOGGER.debug(String.format("HostStatsCollector is running to process %d UP hosts", hosts.size()));
|
||||
|
||||
Map<Object, Object> metrics = new HashMap<>();
|
||||
for (HostVO host : hosts) {
|
||||
HostStatsEntry hostStatsEntry = (HostStatsEntry) _resourceMgr.getHostStatistics(host.getId());
|
||||
if (hostStatsEntry != null) {
|
||||
@ -1195,13 +1194,12 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
@Override
|
||||
protected void runInContext() {
|
||||
try {
|
||||
LOGGER.trace("VmStatsCollector is running...");
|
||||
|
||||
SearchCriteria<HostVO> sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance();
|
||||
List<HostVO> hosts = _hostDao.search(sc, null);
|
||||
|
||||
Map<Object, Object> metrics = new HashMap<>();
|
||||
LOGGER.debug(String.format("VmStatsCollector is running to process VMs across %d UP hosts", hosts.size()));
|
||||
|
||||
Map<Object, Object> metrics = new HashMap<>();
|
||||
for (HostVO host : hosts) {
|
||||
Date timestamp = new Date();
|
||||
Map<Long, VMInstanceVO> vmMap = getVmMapForStatsForHost(host);
|
||||
|
||||
@ -27,6 +27,12 @@ public class PasswordPolicyImpl implements PasswordPolicy, Configurable {
|
||||
private Logger logger = Logger.getLogger(PasswordPolicyImpl.class);
|
||||
|
||||
public void verifyIfPasswordCompliesWithPasswordPolicies(String password, String username, Long domainId) {
|
||||
if (StringUtils.isEmpty(password)) {
|
||||
logger.warn(String.format("User [%s] has an empty password, skipping password policy checks. " +
|
||||
"If this is not a LDAP user, there is something wrong.", username));
|
||||
return;
|
||||
}
|
||||
|
||||
int numberOfSpecialCharactersInPassword = 0;
|
||||
int numberOfUppercaseLettersInPassword = 0;
|
||||
int numberOfLowercaseLettersInPassword = 0;
|
||||
@ -188,12 +194,12 @@ public class PasswordPolicyImpl implements PasswordPolicy, Configurable {
|
||||
logger.trace(String.format("Validating if the new password for user [%s] matches regex [%s] defined in the configuration [%s].",
|
||||
username, passwordPolicyRegex, PasswordPolicyRegex.key()));
|
||||
|
||||
if (passwordPolicyRegex == null){
|
||||
logger.trace(String.format("Regex is null; therefore, we will not validate if the new password matches with regex for user [%s].", username));
|
||||
if (StringUtils.isEmpty(passwordPolicyRegex)) {
|
||||
logger.trace(String.format("Regex is empty; therefore, we will not validate if the new password matches with regex for user [%s].", username));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!password.matches(passwordPolicyRegex)){
|
||||
if (!password.matches(passwordPolicyRegex)) {
|
||||
logger.error(String.format("User [%s] informed a new password that does not match with regex [%s]. Refusing the user's new password.", username, passwordPolicyRegex));
|
||||
throw new InvalidParameterValueException("User password does not match with password policy regex.");
|
||||
}
|
||||
|
||||
@ -4581,7 +4581,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
Host host, Host lastHost, VirtualMachine.PowerState powerState) {
|
||||
if (isImport) {
|
||||
vm.setDataCenterId(zone.getId());
|
||||
if (hypervisorType == HypervisorType.VMware) {
|
||||
if (List.of(HypervisorType.VMware, HypervisorType.KVM).contains(hypervisorType) && host != null) {
|
||||
vm.setHostId(host.getId());
|
||||
}
|
||||
if (lastHost != null) {
|
||||
|
||||
@ -354,9 +354,9 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
||||
hostList.stream().map(HostVO::getId).toArray(Long[]::new));
|
||||
|
||||
Map<Long, Long> hostCpuMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
|
||||
hostJoin -> hostJoin.getCpuUsedCapacity() + hostJoin.getCpuReservedCapacity()));
|
||||
hostJoin -> hostJoin.getCpus() * hostJoin.getSpeed() - hostJoin.getCpuReservedCapacity() - hostJoin.getCpuUsedCapacity()));
|
||||
Map<Long, Long> hostMemoryMap = hostJoinList.stream().collect(Collectors.toMap(HostJoinVO::getId,
|
||||
hostJoin -> hostJoin.getMemUsedCapacity() + hostJoin.getMemReservedCapacity()));
|
||||
hostJoin -> hostJoin.getTotalMemory() - hostJoin.getMemUsedCapacity() - hostJoin.getMemReservedCapacity()));
|
||||
|
||||
Map<Long, ServiceOffering> vmIdServiceOfferingMap = new HashMap<>();
|
||||
|
||||
@ -387,10 +387,10 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
||||
long vmCpu = (long) serviceOffering.getCpu() * serviceOffering.getSpeed();
|
||||
long vmMemory = serviceOffering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
hostCpuMap.put(vm.getHostId(), hostCpuMap.get(vm.getHostId()) - vmCpu);
|
||||
hostCpuMap.put(destHost.getId(), hostCpuMap.get(destHost.getId()) + vmCpu);
|
||||
hostMemoryMap.put(vm.getHostId(), hostMemoryMap.get(vm.getHostId()) - vmMemory);
|
||||
hostMemoryMap.put(destHost.getId(), hostMemoryMap.get(destHost.getId()) + vmMemory);
|
||||
hostCpuMap.put(vm.getHostId(), hostCpuMap.get(vm.getHostId()) + vmCpu);
|
||||
hostCpuMap.put(destHost.getId(), hostCpuMap.get(destHost.getId()) - vmCpu);
|
||||
hostMemoryMap.put(vm.getHostId(), hostMemoryMap.get(vm.getHostId()) + vmMemory);
|
||||
hostMemoryMap.put(destHost.getId(), hostMemoryMap.get(destHost.getId()) - vmMemory);
|
||||
vm.setHostId(destHost.getId());
|
||||
iteration++;
|
||||
}
|
||||
|
||||
@ -551,7 +551,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
List<StoragePoolVO> pools = primaryDataStoreDao.listPoolsByCluster(cluster.getId());
|
||||
pools.addAll(primaryDataStoreDao.listByDataCenterId(zone.getId()));
|
||||
for (StoragePool pool : pools) {
|
||||
if (pool.getPath().endsWith(dsName)) {
|
||||
if (StringUtils.contains(pool.getPath(), dsPath)) {
|
||||
storagePool = pool;
|
||||
break;
|
||||
}
|
||||
@ -855,7 +855,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
}
|
||||
|
||||
private NicProfile importNic(UnmanagedInstanceTO.Nic nic, VirtualMachine vm, Network network, Network.IpAddresses ipAddresses, int deviceId, boolean isDefaultNic, boolean forced) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
|
||||
Pair<NicProfile, Integer> result = networkOrchestrationService.importNic(nic.getMacAddress(), deviceId, network, isDefaultNic, vm, ipAddresses, forced);
|
||||
DataCenterVO dataCenterVO = dataCenterDao.findById(network.getDataCenterId());
|
||||
Pair<NicProfile, Integer> result = networkOrchestrationService.importNic(nic.getMacAddress(), deviceId, network, isDefaultNic, vm, ipAddresses, dataCenterVO, forced);
|
||||
if (result == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("NIC ID: %s import failed", nic.getNicId()));
|
||||
}
|
||||
@ -1063,7 +1064,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
final VirtualMachineTemplate template, final String displayName, final String hostName, final Account caller, final Account owner, final Long userId,
|
||||
final ServiceOfferingVO serviceOffering, final Map<String, Long> dataDiskOfferingMap,
|
||||
final Map<String, Long> nicNetworkMap, final Map<String, Network.IpAddresses> callerNicIpAddressMap,
|
||||
final Map<String, String> details, final boolean migrateAllowed, final boolean forced) {
|
||||
final Map<String, String> details, final boolean migrateAllowed, final boolean forced, final boolean isImportUnmanagedFromSameHypervisor) {
|
||||
LOGGER.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].",
|
||||
unmanagedInstance, instanceName, zone, cluster, host, template, serviceOffering, dataDiskOfferingMap, nicNetworkMap, details));
|
||||
UserVm userVm = null;
|
||||
@ -1111,7 +1112,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
}
|
||||
}
|
||||
allDetails.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDisk.getController());
|
||||
allDetails.put(VmDetailConstants.ROOT_DISK_SIZE, String.valueOf(rootDisk.getCapacity() / Resource.ResourceType.bytesToGiB));
|
||||
if (cluster.getHypervisorType() == Hypervisor.HypervisorType.KVM && isImportUnmanagedFromSameHypervisor) {
|
||||
long size = Double.valueOf(Math.ceil((double)rootDisk.getCapacity() / Resource.ResourceType.bytesToGiB)).longValue();
|
||||
allDetails.put(VmDetailConstants.ROOT_DISK_SIZE, String.valueOf(size));
|
||||
}
|
||||
|
||||
try {
|
||||
checkUnmanagedDiskAndOfferingForImport(unmanagedInstance.getName(), rootDisk, null, validatedServiceOffering, owner, zone, cluster, migrateAllowed);
|
||||
@ -1169,8 +1173,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
}
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId());
|
||||
diskProfileStoragePoolList.add(importDisk(rootDisk, userVm, cluster, diskOffering, Volume.Type.ROOT, String.format("ROOT-%d", userVm.getId()),
|
||||
(rootDisk.getCapacity() / Resource.ResourceType.bytesToGiB), minIops, maxIops,
|
||||
template, owner, null));
|
||||
rootDisk.getCapacity(), minIops, maxIops, template, owner, null));
|
||||
long deviceId = 1L;
|
||||
for (UnmanagedInstanceTO.Disk disk : dataDisks) {
|
||||
if (disk.getCapacity() == null || disk.getCapacity() == 0) {
|
||||
@ -1178,7 +1181,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
}
|
||||
DiskOffering offering = diskOfferingDao.findById(dataDiskOfferingMap.get(disk.getDiskId()));
|
||||
diskProfileStoragePoolList.add(importDisk(disk, userVm, cluster, offering, Volume.Type.DATADISK, String.format("DATA-%d-%s", userVm.getId(), disk.getDiskId()),
|
||||
(disk.getCapacity() / Resource.ResourceType.bytesToGiB), offering.getMinIops(), offering.getMaxIops(),
|
||||
disk.getCapacity(), offering.getMinIops(), offering.getMaxIops(),
|
||||
template, owner, deviceId));
|
||||
deviceId++;
|
||||
}
|
||||
@ -1320,7 +1323,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
ActionEventUtils.onStartedActionEvent(userId, owner.getId(), EventTypes.EVENT_VM_IMPORT,
|
||||
cmd.getEventDescription(), null, null, true, 0);
|
||||
|
||||
//TODO: Placeholder for integration with KVM ingestion and KVM extend unmanage/manage VMs
|
||||
if (cmd instanceof ImportVmCmd) {
|
||||
ImportVmCmd importVmCmd = (ImportVmCmd) cmd;
|
||||
if (StringUtils.isBlank(importVmCmd.getImportSource())) {
|
||||
@ -1336,8 +1338,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
details, importVmCmd, forced);
|
||||
}
|
||||
} else {
|
||||
if (cluster.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
userVm = importUnmanagedInstanceFromVmwareToVmware(zone, cluster, hosts, additionalNameFilters,
|
||||
if (List.of(Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM).contains(cluster.getHypervisorType())) {
|
||||
userVm = importUnmanagedInstanceFromHypervisor(zone, cluster, hosts, additionalNameFilters,
|
||||
template, instanceName, displayName, hostName, caller, owner, userId,
|
||||
serviceOffering, dataDiskOfferingMap,
|
||||
nicNetworkMap, nicIpAddressMap,
|
||||
@ -1456,13 +1458,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
}
|
||||
}
|
||||
|
||||
private UserVm importUnmanagedInstanceFromVmwareToVmware(DataCenter zone, Cluster cluster,
|
||||
List<HostVO> hosts, List<String> additionalNameFilters,
|
||||
VMTemplateVO template, String instanceName, String displayName,
|
||||
String hostName, Account caller, Account owner, long userId,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap,
|
||||
Map<String, Long> nicNetworkMap, Map<String, Network.IpAddresses> nicIpAddressMap,
|
||||
Map<String, String> details, Boolean migrateAllowed, List<String> managedVms, boolean forced) {
|
||||
private UserVm importUnmanagedInstanceFromHypervisor(DataCenter zone, Cluster cluster,
|
||||
List<HostVO> hosts, List<String> additionalNameFilters,
|
||||
VMTemplateVO template, String instanceName, String displayName,
|
||||
String hostName, Account caller, Account owner, long userId,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap,
|
||||
Map<String, Long> nicNetworkMap, Map<String, Network.IpAddresses> nicIpAddressMap,
|
||||
Map<String, String> details, Boolean migrateAllowed, List<String> managedVms, boolean forced) {
|
||||
UserVm userVm = null;
|
||||
for (HostVO host : hosts) {
|
||||
HashMap<String, UnmanagedInstanceTO> unmanagedInstances = getUnmanagedInstancesForHost(host, instanceName, managedVms);
|
||||
@ -1510,7 +1512,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
template, displayName, hostName, CallContext.current().getCallingAccount(), owner, userId,
|
||||
serviceOffering, dataDiskOfferingMap,
|
||||
nicNetworkMap, nicIpAddressMap,
|
||||
details, migrateAllowed, forced);
|
||||
details, migrateAllowed, forced, true);
|
||||
break;
|
||||
}
|
||||
if (userVm != null) {
|
||||
@ -1580,7 +1582,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
template, displayName, hostName, caller, owner, userId,
|
||||
serviceOffering, dataDiskOfferingMap,
|
||||
nicNetworkMap, nicIpAddressMap,
|
||||
details, false, forced);
|
||||
details, false, forced, false);
|
||||
LOGGER.debug(String.format("VM %s imported successfully", sourceVM));
|
||||
return userVm;
|
||||
} catch (CloudRuntimeException e) {
|
||||
@ -2370,7 +2372,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage())));
|
||||
}
|
||||
networkOrchestrationService.importNic(macAddress,0,network, true, userVm, requestedIpPair, true);
|
||||
networkOrchestrationService.importNic(macAddress,0,network, true, userVm, requestedIpPair, zone, true);
|
||||
publishVMUsageUpdateResourceCount(userVm, serviceOffering);
|
||||
return userVm;
|
||||
}
|
||||
|
||||
@ -1034,7 +1034,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<NicProfile, Integer> importNic(String macAddress, int deviceId, Network network, Boolean isDefaultNic, VirtualMachine vm, IpAddresses ipAddresses, boolean forced) {
|
||||
public Pair<NicProfile, Integer> importNic(String macAddress, int deviceId, Network network, Boolean isDefaultNic, VirtualMachine vm, IpAddresses ipAddresses, DataCenter dataCenter, boolean forced) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@ -367,7 +367,7 @@ public class UnmanagedVMsManagerImplTest {
|
||||
NicProfile profile = Mockito.mock(NicProfile.class);
|
||||
Integer deviceId = 100;
|
||||
Pair<NicProfile, Integer> pair = new Pair<NicProfile, Integer>(profile, deviceId);
|
||||
when(networkOrchestrationService.importNic(nullable(String.class), nullable(Integer.class), nullable(Network.class), nullable(Boolean.class), nullable(VirtualMachine.class), nullable(Network.IpAddresses.class), Mockito.anyBoolean())).thenReturn(pair);
|
||||
when(networkOrchestrationService.importNic(nullable(String.class), nullable(Integer.class), nullable(Network.class), nullable(Boolean.class), nullable(VirtualMachine.class), nullable(Network.IpAddresses.class), nullable(DataCenter.class), Mockito.anyBoolean())).thenReturn(pair);
|
||||
when(volumeDao.findByInstance(Mockito.anyLong())).thenReturn(volumes);
|
||||
List<UserVmResponse> userVmResponses = new ArrayList<>();
|
||||
UserVmResponse userVmResponse = new UserVmResponse();
|
||||
|
||||
@ -40,9 +40,14 @@ class TestHostPing(cloudstackTestCase):
|
||||
self.services = self.testClient.getParsedTestDataConfig()
|
||||
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
self.original_host_state_map = {}
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
for host_id in self.original_host_state_map:
|
||||
state = self.original_host_state_map[host_id]
|
||||
sql_query = "UPDATE host SET status = '" + state + "' WHERE uuid = '" + host_id + "'"
|
||||
self.dbConnection.execute(sql_query)
|
||||
super(TestHostPing, self).tearDown()
|
||||
|
||||
def checkHostStateInCloudstack(self, state, host_id):
|
||||
@ -92,6 +97,7 @@ class TestHostPing(cloudstackTestCase):
|
||||
self.logger.debug('Hypervisor = {}'.format(host.id))
|
||||
|
||||
hostToTest = listHost[0]
|
||||
self.original_host_state_map[hostToTest.id] = hostToTest.state
|
||||
sql_query = "UPDATE host SET status = 'Alert' WHERE uuid = '" + hostToTest.id + "'"
|
||||
self.dbConnection.execute(sql_query)
|
||||
|
||||
|
||||
@ -148,7 +148,7 @@ class TestImageStoreObjectMigration(cloudstackTestCase):
|
||||
|
||||
storeObjects = originalSecondaryStore.listObjects(self.apiclient, path="template/tmpl/" + str(account_id) + "/" + str(template_id))
|
||||
|
||||
self.assertEqual(len(storeObjects), 2, "Check template is uploaded on secondary storage")
|
||||
self.assertGreaterEqual(len(storeObjects), 2, "Check template is uploaded on secondary storage")
|
||||
|
||||
# Migrate template to another secondary storage
|
||||
secondaryStores = ImageStore.list(self.apiclient, zoneid=self.zone.id)
|
||||
@ -173,7 +173,7 @@ class TestImageStoreObjectMigration(cloudstackTestCase):
|
||||
|
||||
storeObjects = destSecondaryStore.listObjects(self.apiclient, path="template/tmpl/" + str(account_id) + "/" + str(template_id))
|
||||
|
||||
self.assertEqual(len(storeObjects), 2, "Check template is uploaded on destination secondary storage")
|
||||
self.assertGreaterEqual(len(storeObjects), 2, "Check template is uploaded on destination secondary storage")
|
||||
|
||||
def registerTemplate(self, cmd):
|
||||
temp = self.apiclient.registerTemplate(cmd)[0]
|
||||
|
||||
@ -1011,8 +1011,37 @@ class TestSecuredVmMigration(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls.hypervisor.lower() in ["kvm"]:
|
||||
cls.ensure_all_hosts_are_up()
|
||||
super(TestSecuredVmMigration, cls).tearDownClass()
|
||||
|
||||
@classmethod
|
||||
def ensure_all_hosts_are_up(cls):
|
||||
hosts = Host.list(
|
||||
cls.apiclient,
|
||||
zoneid=cls.zone.id,
|
||||
type='Routing',
|
||||
hypervisor='KVM'
|
||||
)
|
||||
for host in hosts:
|
||||
if host.state != "Up":
|
||||
SshClient(host.ipaddress, port=22, user=cls.hostConfig["username"], passwd=cls.hostConfig["password"]) \
|
||||
.execute("service cloudstack-agent stop ; \
|
||||
sleep 10 ; \
|
||||
service cloudstack-agent start")
|
||||
interval = 5
|
||||
retries = 10
|
||||
while retries > -1:
|
||||
time.sleep(interval)
|
||||
restarted_host = Host.list(
|
||||
cls.apiclient,
|
||||
hostid=host.id,
|
||||
type='Routing'
|
||||
)[0]
|
||||
if restarted_host.state == "Up":
|
||||
break
|
||||
retries = retries - 1
|
||||
|
||||
def setUp(self):
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
|
||||
@ -2958,6 +2958,7 @@
|
||||
"message.installwizard.tooltip.tungsten.provider.vrouterport": "Tungsten provider vrouter port is required",
|
||||
"message.instances.managed": "Instances controlled by CloudStack.",
|
||||
"message.instances.unmanaged": "Instances not controlled by CloudStack.",
|
||||
"message.instances.migrate.vmware": "Instances that can be migrated from VMware.",
|
||||
"message.interloadbalance.not.return.elementid": "error: listInternalLoadBalancerElements API doesn't return internal LB element ID.",
|
||||
"message.ip.address.changes.effect.after.vm.restart": "IP address changes takes effect only after Instance restart.",
|
||||
"message.ip.v6.prefix.delete": "IPv6 prefix deleted",
|
||||
|
||||
@ -199,8 +199,10 @@ export default {
|
||||
created () {
|
||||
this.menus = this.mainMenu.find((item) => item.path === '/').children
|
||||
this.collapsed = !this.sidebarOpened
|
||||
const readyForShutdownPollingJob = setInterval(this.checkShutdown, 5000)
|
||||
this.$store.commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', readyForShutdownPollingJob)
|
||||
if ('readyForShutdown' in this.$store.getters.apis) {
|
||||
const readyForShutdownPollingJob = setInterval(this.checkShutdown, 5000)
|
||||
this.$store.commit('SET_READY_FOR_SHUTDOWN_POLLING_JOB', readyForShutdownPollingJob)
|
||||
}
|
||||
},
|
||||
mounted () {
|
||||
const layoutMode = this.$config.theme['@layout-mode'] || 'light'
|
||||
|
||||
@ -449,7 +449,7 @@ export default {
|
||||
initMinioClient () {
|
||||
if (!this.client) {
|
||||
const url = /https?:\/\/([^/]+)\/?/.exec(this.resource.url.split(this.resource.name)[0])[1]
|
||||
const isHttps = /^https/.test(url)
|
||||
const isHttps = /^https/.test(this.resource.url)
|
||||
this.client = new Minio.Client({
|
||||
endPoint: url.split(':')[0],
|
||||
port: url.split(':').length > 1 ? parseInt(url.split(':')[1]) : isHttps ? 443 : 80,
|
||||
|
||||
@ -97,7 +97,7 @@ export default {
|
||||
label: 'label.action.force.reconnect',
|
||||
message: 'message.confirm.action.force.reconnect',
|
||||
dataView: true,
|
||||
show: (record) => { return ['Disconnected', 'Up'].includes(record.state) }
|
||||
show: (record) => { return ['Disconnected', 'Up', 'Alert'].includes(record.state) }
|
||||
},
|
||||
{
|
||||
api: 'updateHost',
|
||||
|
||||
@ -211,7 +211,7 @@ export default {
|
||||
params.params = userdataparams
|
||||
}
|
||||
|
||||
api('registerUserData', params).then(json => {
|
||||
api('registerUserData', {}, 'POST', params).then(json => {
|
||||
this.$message.success(this.$t('message.success.register.user.data') + ' ' + values.name)
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
|
||||
@ -303,7 +303,6 @@ export default {
|
||||
} else {
|
||||
this.loadMore(apiToCall, page + 1, sema)
|
||||
}
|
||||
this.form.domainid = 0
|
||||
})
|
||||
},
|
||||
fetchRoles () {
|
||||
|
||||
@ -429,8 +429,10 @@
|
||||
</a-form-item>
|
||||
</a-col>
|
||||
</a-row>
|
||||
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ $t('label.ok') }}</a-button>
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ $t('label.ok') }}</a-button>
|
||||
</div>
|
||||
</a-form>
|
||||
</a-spin>
|
||||
</div>
|
||||
|
||||
@ -111,7 +111,7 @@
|
||||
</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
<a-form-item name="templateid" ref="templateid" v-if="cluster.hypervisortype === 'KVM' && !selectedVmwareVcenter && !isDiskImport && !isExternalImport">
|
||||
<a-form-item name="templateid" ref="templateid" v-if="cluster.hypervisortype === 'VMware' || (cluster.hypervisortype === 'KVM' && !selectedVmwareVcenter && !isDiskImport && !isExternalImport)">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.templatename')" :tooltip="apiParams.templateid.description + '. ' + $t('message.template.import.vm.temporary')"/>
|
||||
</template>
|
||||
@ -120,7 +120,7 @@
|
||||
:value="templateType"
|
||||
@change="changeTemplateType">
|
||||
<a-row :gutter="12">
|
||||
<a-col :md="24" :lg="12" v-if="this.cluster.hypervisortype === 'VMWare'">
|
||||
<a-col :md="24" :lg="12" v-if="this.cluster.hypervisortype === 'VMware'">
|
||||
<a-radio value="auto">
|
||||
{{ $t('label.template.temporary.import') }}
|
||||
</a-radio>
|
||||
@ -667,7 +667,7 @@ export default {
|
||||
nic.broadcasturi = 'pvlan://' + nic.vlanid + '-i' + nic.isolatedpvlan
|
||||
}
|
||||
}
|
||||
if (this.cluster.hypervisortype === 'VMWare') {
|
||||
if (this.cluster.hypervisortype === 'VMware') {
|
||||
nic.meta = this.getMeta(nic, { macaddress: 'mac', vlanid: 'vlan', networkname: 'network' })
|
||||
} else {
|
||||
nic.meta = this.getMeta(nic, { macaddress: 'mac', vlanid: 'vlan' })
|
||||
@ -849,7 +849,7 @@ export default {
|
||||
this.nicsNetworksMapping = data
|
||||
},
|
||||
defaultTemplateType () {
|
||||
if (this.cluster.hypervisortype === 'VMWare') {
|
||||
if (this.cluster.hypervisortype === 'VMware') {
|
||||
return 'auto'
|
||||
}
|
||||
return 'custom'
|
||||
|
||||
@ -238,6 +238,7 @@
|
||||
</a-form-item>
|
||||
<a-form-item v-if="isDestinationKVM && isMigrateFromVmware && clusterId != undefined">
|
||||
<SelectVmwareVcenter
|
||||
@onVcenterTypeChanged="updateVmwareVcenterType"
|
||||
@loadingVmwareUnmanagedInstances="() => this.unmanagedInstancesLoading = true"
|
||||
@listedVmwareUnmanagedInstances="($e) => onListUnmanagedInstancesFromVmware($e)"
|
||||
/>
|
||||
@ -322,8 +323,8 @@
|
||||
<a-col v-if="!isDiskImport" :md="24" :lg="(!isMigrateFromVmware && showManagedInstances) ? 12 : 24">
|
||||
<a-card class="instances-card">
|
||||
<template #title>
|
||||
{{ $t('label.unmanaged.instances') }}
|
||||
<a-tooltip :title="$t('message.instances.unmanaged')">
|
||||
{{ (isMigrateFromVmware && vmwareVcenterType === 'existing') ? $t('label.instances') : $t('label.unmanaged.instances') }}
|
||||
<a-tooltip :title="(isMigrateFromVmware && vmwareVcenterType === 'existing') ? $t('message.instances.migrate.vmware') : $t('message.instances.unmanaged')">
|
||||
<info-circle-outlined />
|
||||
</a-tooltip>
|
||||
<a-button
|
||||
@ -731,6 +732,7 @@ export default {
|
||||
showUnmanageForm: false,
|
||||
selectedUnmanagedInstance: {},
|
||||
query: {},
|
||||
vmwareVcenterType: undefined,
|
||||
selectedVmwareVcenter: undefined
|
||||
}
|
||||
},
|
||||
@ -1409,6 +1411,9 @@ export default {
|
||||
this.unmanagedInstances = obj.response.unmanagedinstance
|
||||
this.itemCount.unmanaged = obj.response.count
|
||||
this.unmanagedInstancesLoading = false
|
||||
},
|
||||
updateVmwareVcenterType (type) {
|
||||
this.vmwareVcenterType = type
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,7 +28,8 @@
|
||||
<a-radio-group
|
||||
style="text-align: center; width: 100%"
|
||||
v-model:value="vcenterSelectedOption"
|
||||
buttonStyle="solid">
|
||||
buttonStyle="solid"
|
||||
@change="onVcenterTypeChange">
|
||||
<a-radio-button value="existing" style="width: 50%; text-align: center">
|
||||
{{ $t('label.existing') }}
|
||||
</a-radio-button>
|
||||
@ -256,6 +257,9 @@ export default {
|
||||
},
|
||||
onSelectExistingVmwareDatacenter (value) {
|
||||
this.selectedExistingVcenterId = value
|
||||
},
|
||||
onVcenterTypeChange () {
|
||||
this.$emit('onVcenterTypeChanged', this.vcenterSelectedOption)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user