diff --git a/.python-version b/.python-version index d70c8f8d89f..c8cfe395918 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.6 +3.10 diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 515614fff16..773ef963ef7 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -434,3 +434,10 @@ iscsi.session.cleanup.enabled=false # Implicit host tags managed by agent.properties # host.tags= + +# Timeout(in seconds) for SSL handshake when agent connects to server. When no value is set then default value of 30s +# will be used +#ssl.handshake.timeout= + +# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used +#backoff.seconds= diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index c84179d6660..2e7b61fbd51 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -26,24 +26,25 @@ import java.net.Socket; import java.net.UnknownHostException; import java.nio.channels.ClosedChannelException; import java.nio.charset.Charset; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Timer; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import javax.naming.ConfigurationException; -import com.cloud.resource.AgentStatusUpdater; -import com.cloud.resource.ResourceStatusUpdater; -import com.cloud.agent.api.PingAnswer; -import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.agent.lb.SetupMSListAnswer; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.ca.PostCertificateRenewalCommand; @@ -55,10 +56,11 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.ObjectUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -67,6 +69,7 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.PingAnswer; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ShutdownCommand; @@ -76,9 +79,11 @@ import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.exception.AgentControlChannelException; import com.cloud.host.Host; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.NioConnectionException; @@ -90,7 +95,6 @@ import com.cloud.utils.nio.NioConnection; import com.cloud.utils.nio.Task; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.logging.log4j.ThreadContext; /** * @config @@ -114,7 +118,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater Configuration(66), // Exiting due to configuration problems. Error(67); // Exiting because of error. - int value; + final int value; ExitStatus(final int value) { this.value = value; @@ -125,133 +129,162 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - List _controlListeners = new ArrayList(); + CopyOnWriteArrayList controlListeners = new CopyOnWriteArrayList<>(); - IAgentShell _shell; - NioConnection _connection; - ServerResource _resource; - Link _link; - Long _id; + IAgentShell shell; + NioConnection connection; + ServerResource serverResource; + Link link; + Long id; String _uuid; String _name; - Timer _timer = new Timer("Agent Timer"); - Timer certTimer; - Timer hostLBTimer; + ScheduledExecutorService selfTaskExecutor; + ScheduledExecutorService certExecutor; + ScheduledExecutorService hostLbCheckExecutor; - List _watchList = new ArrayList(); - long _sequence = 0; - long _lastPingResponseTime = 0; - long _pingInterval = 0; - AtomicInteger _inProgress = new AtomicInteger(); + CopyOnWriteArrayList> watchList = new CopyOnWriteArrayList<>(); + AtomicLong sequence = new AtomicLong(0); + AtomicLong lastPingResponseTime = new AtomicLong(0L); + long pingInterval = 0; + AtomicInteger commandsInProgress = new AtomicInteger(0); - StartupTask _startup = null; - long _startupWaitDefault = 180000; - long _startupWait = _startupWaitDefault; - boolean _reconnectAllowed = true; - //For time sentitive task, e.g. PingTask - ThreadPoolExecutor _ugentTaskPool; - ExecutorService _executor; + private final AtomicReference startupTask = new AtomicReference<>(); + private static final long DEFAULT_STARTUP_WAIT = 180; + long startupWait = DEFAULT_STARTUP_WAIT; + boolean reconnectAllowed = true; - Thread _shutdownThread = new ShutdownThread(this); + //For time sensitive task, e.g. PingTask + ThreadPoolExecutor outRequestHandler; + ExecutorService requestHandler; - private String _keystoreSetupPath; - private String _keystoreCertImportPath; + Thread shutdownThread = new ShutdownThread(this); - // for simulator use only + private String keystoreSetupSetupPath; + private String keystoreCertImportScriptPath; + + private String hostname; + + protected String getLinkLog(final Link link) { + if (link == null) { + return ""; + } + StringBuilder str = new StringBuilder(); + if (logger.isTraceEnabled()) { + str.append(System.identityHashCode(link)).append("-"); + } + str.append(link.getSocketAddress()); + return str.toString(); + } + + protected String getAgentName() { + return (serverResource != null && serverResource.isAppendAgentNameToLogs() && + StringUtils.isNotBlank(serverResource.getName())) ? + serverResource.getName() : + "Agent"; + } + + protected void setupShutdownHookAndInitExecutors() { + logger.trace("Adding shutdown hook"); + Runtime.getRuntime().addShutdownHook(shutdownThread); + selfTaskExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Agent-SelfTask")); + outRequestHandler = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, + new SynchronousQueue<>(), new NamedThreadFactory("AgentOutRequest-Handler")); + requestHandler = new ThreadPoolExecutor(shell.getWorkers(), 5 * shell.getWorkers(), 1, TimeUnit.DAYS, + new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentRequest-Handler")); + } + + /** + * Constructor for the {@code Agent} class, intended for simulator use only. + * + *

This constructor initializes the agent with a provided {@link IAgentShell}. + * It sets up the necessary NIO client connection, establishes a shutdown hook, + * and initializes the thread executors. + * + * @param shell the {@link IAgentShell} instance that provides agent configuration and runtime information. + */ public Agent(final IAgentShell shell) { - _shell = shell; - _link = null; - - _connection = new NioClient("Agent", _shell.getNextHost(), _shell.getPort(), _shell.getWorkers(), this); - - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); + this.shell = shell; + this.link = null; + this.connection = new NioClient( + getAgentName(), + this.shell.getNextHost(), + this.shell.getPort(), + this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), + this + ); + setupShutdownHookAndInitExecutors(); } public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { - _shell = shell; - _resource = resource; - _link = null; - + this.shell = shell; + serverResource = resource; + link = null; resource.setAgentControl(this); - - final String value = _shell.getPersistentProperty(getResourceName(), "id"); - _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); - _name = _shell.getPersistentProperty(getResourceName(), "name"); - _id = value != null ? Long.parseLong(value) : null; - logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); + final String value = shell.getPersistentProperty(getResourceName(), "id"); + _uuid = shell.getPersistentProperty(getResourceName(), "uuid"); + _name = shell.getPersistentProperty(getResourceName(), "name"); + id = value != null ? Long.parseLong(value) : null; + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(id, ""), _uuid, _name); final Map params = new HashMap<>(); - // merge with properties from command line to let resource access command line parameters - for (final Map.Entry cmdLineProp : _shell.getCmdLineProperties().entrySet()) { + for (final Map.Entry cmdLineProp : this.shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - - if (!_resource.configure(getResourceName(), params)) { - throw new ConfigurationException("Unable to configure " + _resource.getName()); + if (!serverResource.configure(getResourceName(), params)) { + throw new ConfigurationException("Unable to configure " + serverResource.getName()); } + ThreadContext.put("agentname", getAgentName()); + final String host = this.shell.getNextHost(); + connection = new NioClient(getAgentName(), host, this.shell.getPort(), this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), this); + setupShutdownHookAndInitExecutors(); + logger.info("{} with host = {}, local id = {}", this, host, localAgentId); + } - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); - - logger.debug("Adding shutdown hook"); - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); - - logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", - ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + @Override + public String toString() { + return String.format("Agent [id = %s, uuid = %s, name = %s, type = %s, zone = %s, pod = %s, workers = %d, port = %d]", + ObjectUtils.defaultIfNull(id, "new"), + _uuid, + _name, + getResourceName(), + this.shell.getZone(), + this.shell.getPod(), + this.shell.getWorkers(), + this.shell.getPort()); } public String getVersion() { - return _shell.getVersion(); + return shell.getVersion(); } public String getResourceGuid() { - final String guid = _shell.getGuid(); + final String guid = shell.getGuid(); return guid + "-" + getResourceName(); } public String getZone() { - return _shell.getZone(); + return shell.getZone(); } public String getPod() { - return _shell.getPod(); + return shell.getPod(); } protected void setLink(final Link link) { - _link = link; + this.link = link; } public ServerResource getResource() { - return _resource; - } - - public BackoffAlgorithm getBackoffAlgorithm() { - return _shell.getBackoffAlgorithm(); + return serverResource; } public String getResourceName() { - return _resource.getClass().getSimpleName(); + return serverResource.getClass().getSimpleName(); } /** @@ -260,71 +293,65 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * agent instances and its inner objects. */ private void scavengeOldAgentObjects() { - _executor.submit(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(2000L); - } catch (final InterruptedException ignored) { - } finally { - System.gc(); - } + requestHandler.submit(() -> { + try { + Thread.sleep(2000L); + } catch (final InterruptedException ignored) { + } finally { + System.gc(); } }); } public void start() { - if (!_resource.start()) { - logger.error("Unable to start the resource: {}", _resource.getName()); - throw new CloudRuntimeException("Unable to start the resource: " + _resource.getName()); + if (!serverResource.start()) { + String msg = String.format("Unable to start the resource: %s", serverResource.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); } - _keystoreSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); - if (_keystoreSetupPath == null) { + keystoreSetupSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); + if (keystoreSetupSetupPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_SETUP_SCRIPT)); } - _keystoreCertImportPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); - if (_keystoreCertImportPath == null) { + keystoreCertImportScriptPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); + if (keystoreCertImportScriptPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_IMPORT_SCRIPT)); } try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.warn("Attempt to connect to server generated NIO Connection Exception {}, trying again", e.getLocalizedMessage()); } - while (!_connection.isStartup()) { - final String host = _shell.getNextHost(); - _shell.getBackoffAlgorithm().waitBeforeRetry(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Connecting to host:{}", host); + while (!connection.isStartup()) { + final String host = shell.getNextHost(); + shell.getBackoffAlgorithm().waitBeforeRetry(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), + shell.getSslHandshakeTimeout(), this); + logger.info("Connecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); } } - _shell.updateConnectedHost(); + shell.updateConnectedHost(); scavengeOldAgentObjects(); } public void stop(final String reason, final String detail) { - logger.info("Stopping the agent: Reason = {} {}", reason, ": Detail = " + ObjectUtils.defaultIfNull(detail, "")); - _reconnectAllowed = false; - if (_connection != null) { + logger.info("Stopping the agent: Reason = {}{}", reason, (detail != null ? ": Detail = " + detail : "")); + reconnectAllowed = false; + if (connection != null) { final ShutdownCommand cmd = new ShutdownCommand(reason, detail); try { - if (_link != null) { - final Request req = new Request(_id != null ? _id : -1, -1, cmd, false); - _link.send(req.toBytes()); + if (link != null) { + final Request req = new Request(id != null ? id : -1, -1, cmd, false); + link.send(req.toBytes()); } } catch (final ClosedChannelException e) { logger.warn("Unable to send: {}", cmd.toString()); @@ -337,53 +364,54 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } catch (final InterruptedException e) { logger.debug("Who the heck interrupted me here?"); } - _connection.stop(); - _connection = null; - _link = null; + connection.stop(); + connection = null; + link = null; } - if (_resource != null) { - _resource.stop(); - _resource = null; + if (serverResource != null) { + serverResource.stop(); + serverResource = null; } - if (_startup != null) { - _startup = null; + if (startupTask.get() != null) { + startupTask.set(null); } - if (_ugentTaskPool != null) { - _ugentTaskPool.shutdownNow(); - _ugentTaskPool = null; + if (outRequestHandler != null) { + outRequestHandler.shutdownNow(); + outRequestHandler = null; } - if (_executor != null) { - _executor.shutdown(); - _executor = null; + if (requestHandler != null) { + requestHandler.shutdown(); + requestHandler = null; } - if (_timer != null) { - _timer.cancel(); - _timer = null; + if (selfTaskExecutor != null) { + selfTaskExecutor.shutdown(); + selfTaskExecutor = null; } - if (hostLBTimer != null) { - hostLBTimer.cancel(); - hostLBTimer = null; + if (hostLbCheckExecutor != null) { + hostLbCheckExecutor.shutdown(); + hostLbCheckExecutor = null; } - if (certTimer != null) { - certTimer.cancel(); - certTimer = null; + if (certExecutor != null) { + certExecutor.shutdown(); + certExecutor = null; } } public Long getId() { - return _id; + return id; } public void setId(final Long id) { - _id = id; - _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); + logger.debug("Set agent id {}", id); + this.id = id; + shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } public String getUuid() { @@ -392,7 +420,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setUuid(String uuid) { this._uuid = uuid; - _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + shell.setPersistentProperty(getResourceName(), "uuid", uuid); } public String getName() { @@ -401,61 +429,75 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void setName(String name) { this._name = name; - _shell.setPersistentProperty(getResourceName(), "name", name); + shell.setPersistentProperty(getResourceName(), "name", name); } - private synchronized void scheduleServicesRestartTask() { - if (certTimer != null) { - certTimer.cancel(); - certTimer.purge(); + private void scheduleCertificateRenewalTask() { + String name = "CertificateRenewalTask"; + if (certExecutor != null && !certExecutor.isShutdown()) { + certExecutor.shutdown(); + try { + if (!certExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + certExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + certExecutor.shutdownNow(); + } } - certTimer = new Timer("Certificate Renewal Timer"); - certTimer.schedule(new PostCertificateRenewalTask(this), 5000L); + certExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS); } - private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { - if (hostLBTimer != null) { - hostLBTimer.cancel(); + private void scheduleHostLBCheckerTask(final long checkInterval) { + String name = "HostLBCheckerTask"; + if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) { + hostLbCheckExecutor.shutdown(); + try { + if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + hostLbCheckExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + hostLbCheckExecutor.shutdownNow(); + } } if (checkInterval > 0L) { - logger.info("Scheduling preferred host timer task with host.lb.interval={}ms", checkInterval); - hostLBTimer = new Timer("Host LB Timer"); - hostLBTimer.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval); + logger.info("Scheduling preferred host task with host.lb.interval={}ms", checkInterval); + hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, + TimeUnit.MILLISECONDS); } } public void scheduleWatch(final Link link, final Request request, final long delay, final long period) { - synchronized (_watchList) { - logger.debug("Adding task with request: {} to watch list", request.toString()); - - final WatchTask task = new WatchTask(link, request, this); - _timer.schedule(task, 0, period); - _watchList.add(task); - } + logger.debug("Adding a watch list"); + final WatchTask task = new WatchTask(link, request, this); + final ScheduledFuture future = selfTaskExecutor.scheduleAtFixedRate(task, delay, period, TimeUnit.MILLISECONDS); + watchList.add(future); } public void triggerUpdate() { - PingCommand command = _resource.getCurrentStatus(getId()); + PingCommand command = serverResource.getCurrentStatus(getId()); command.setOutOfBand(true); logger.debug("Sending out of band ping"); - - final Request request = new Request(_id, -1, command, false); + final Request request = new Request(id, -1, command, false); request.setSequence(getNextSequence()); try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to send ping update: {}", request.toString()); } } protected void cancelTasks() { - synchronized (_watchList) { - for (final WatchTask task : _watchList) { - task.cancel(); - } - logger.debug("Clearing {} tasks of watch list", _watchList.size()); - _watchList.clear(); + for (final ScheduledFuture task : watchList) { + task.cancel(true); } + logger.debug("Clearing watch list: {}", () -> watchList.size()); + watchList.clear(); } /** @@ -466,27 +508,47 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater * when host is added back */ protected void cleanupAgentZoneProperties() { - _shell.setPersistentProperty(null, "zone", ""); - _shell.setPersistentProperty(null, "cluster", ""); - _shell.setPersistentProperty(null, "pod", ""); + shell.setPersistentProperty(null, "zone", ""); + shell.setPersistentProperty(null, "cluster", ""); + shell.setPersistentProperty(null, "pod", ""); } - public synchronized void lockStartupTask(final Link link) { - _startup = new StartupTask(link); - _timer.schedule(_startup, _startupWait); + public void lockStartupTask(final Link link) { + logger.debug("Creating startup task for link: {}", () -> getLinkLog(link)); + StartupTask currentTask = startupTask.get(); + if (currentTask != null) { + logger.warn("A Startup task is already locked or in progress, cannot create for link {}", + getLinkLog(link)); + return; + } + currentTask = new StartupTask(link); + if (startupTask.compareAndSet(null, currentTask)) { + selfTaskExecutor.schedule(currentTask, startupWait, TimeUnit.SECONDS); + return; + } + logger.warn("Failed to lock a StartupTask for link: {}", getLinkLog(link)); + } + + protected boolean cancelStartupTask() { + StartupTask task = startupTask.getAndSet(null); + if (task != null) { + task.cancel(); + return true; + } + return false; } public void sendStartup(final Link link) { - final StartupCommand[] startup = _resource.initialize(); + final StartupCommand[] startup = serverResource.initialize(); if (startup != null) { - final String msHostList = _shell.getPersistentProperty(null, "host"); + final String msHostList = shell.getPersistentProperty(null, "host"); final Command[] commands = new Command[startup.length]; for (int i = 0; i < startup.length; i++) { setupStartupCommand(startup[i]); startup[i].setMSHostList(msHostList); commands[i] = startup[i]; } - final Request request = new Request(_id != null ? _id : -1, -1, commands, false, false); + final Request request = new Request(id != null ? id : -1, -1, commands, false, false); request.setSequence(getNextSequence()); logger.debug("Sending Startup: {}", request.toString()); @@ -494,31 +556,37 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater try { link.send(request.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(link), e.getMessage(), request); } - if (_resource instanceof ResourceStatusUpdater) { - ((ResourceStatusUpdater) _resource).registerStatusUpdater(this); + if (serverResource instanceof ResourceStatusUpdater) { + ((ResourceStatusUpdater) serverResource).registerStatusUpdater(this); } } } - protected void setupStartupCommand(final StartupCommand startup) { - InetAddress addr; + protected String retrieveHostname() { + logger.trace("Retrieving hostname with resource={}", () -> serverResource.getClass().getSimpleName()); + final String result = Script.runSimpleBashScript(Script.getExecutableAbsolutePath("hostname"), 500); + if (StringUtils.isNotBlank(result)) { + return result; + } try { - addr = InetAddress.getLocalHost(); + InetAddress address = InetAddress.getLocalHost(); + return address.toString(); } catch (final UnknownHostException e) { logger.warn("unknown host? ", e); throw new CloudRuntimeException("Cannot get local IP address"); } + } - final Script command = new Script("hostname", 500, logger); - final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - final String result = command.execute(parser); - final String hostname = result == null ? parser.getLine() : addr.toString(); - + protected void setupStartupCommand(final StartupCommand startup) { startup.setId(getId()); - if (startup.getName() == null) { + if (StringUtils.isBlank(startup.getName())) { + if (StringUtils.isBlank(hostname)) { + hostname = retrieveHostname(); + } startup.setName(hostname); } startup.setDataCenter(getZone()); @@ -540,78 +608,75 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return new ServerHandler(type, link, data); } - protected void reconnect(final Link link) { - if (!_reconnectAllowed) { + protected void closeAndTerminateLink(final Link link) { + if (link == null) { return; } - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } + link.close(); + link.terminated(); + } + + protected void stopAndCleanupConnection(boolean waitForStop) { + if (connection == null) { + return; } - - if (link != null) { - link.close(); - link.terminated(); - } - - setLink(null); - cancelTasks(); - - _resource.disconnected(); - - logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", _shell.getConnectedHost(), _inProgress.get()); - - _connection.stop(); - + connection.stop(); try { - _connection.cleanUp(); + connection.cleanUp(); } catch (final IOException e) { logger.warn("Fail to clean up old connection. {}", e); } - - while (_connection.isStartup()) { - _shell.getBackoffAlgorithm().waitBeforeRetry(); + if (!waitForStop) { + return; } - do { - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Reconnecting to host:{}", host); + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (connection.isStartup()); + } + + protected void reconnect(final Link link) { + if (!reconnectAllowed) { + logger.debug("Reconnect requested but it is not allowed {}", () -> getLinkLog(link)); + return; + } + cancelStartupTask(); + closeAndTerminateLink(link); + closeAndTerminateLink(this.link); + setLink(null); + cancelTasks(); + serverResource.disconnected(); + logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get()); + stopAndCleanupConnection(true); + do { + final String host = shell.getNextHost(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this); + logger.info("Reconnecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); } - _shell.getBackoffAlgorithm().waitBeforeRetry(); - } while (!_connection.isStartup()); - _shell.updateConnectedHost(); - logger.info("Connected to the host: {}", _shell.getConnectedHost()); + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (!connection.isStartup()); + shell.updateConnectedHost(); + logger.info("Connected to the host: {}", shell.getConnectedHost()); } public void processStartupAnswer(final Answer answer, final Response response, final Link link) { - boolean cancelled = false; - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } else { - cancelled = true; - } - } + boolean answerValid = cancelStartupTask(); final StartupAnswer startup = (StartupAnswer)answer; if (!startup.getResult()) { logger.error("Not allowed to connect to the server: {}", answer.getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); } - if (cancelled) { + if (!answerValid) { logger.warn("Threw away a startup answer because we're reconnecting."); return; } @@ -622,12 +687,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater setId(startup.getHostId()); setUuid(startup.getHostUuid()); setName(startup.getHostName()); - _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. + pingInterval = startup.getPingInterval() * 1000L; // change to ms. - setLastPingResponseTime(); - scheduleWatch(link, response, _pingInterval, _pingInterval); + updateLastPingResponseTime(); + scheduleWatch(link, response, pingInterval, pingInterval); - _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); + outRequestHandler.setKeepAliveTime(2 * pingInterval, TimeUnit.MILLISECONDS); logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName()); @@ -644,9 +709,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater final Command cmd = cmds[i]; Answer answer; try { - if (cmd.getContextParam("logid") != null) { - ThreadContext.put("logcontextid", cmd.getContextParam("logid")); - } if (logger.isDebugEnabled()) { if (!requestLogged) // ensures request is logged only once per method call { @@ -661,7 +723,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (cmd instanceof CronCommand) { final CronCommand watch = (CronCommand)cmd; - scheduleWatch(link, request, (long)watch.getInterval() * 1000, watch.getInterval() * 1000); + scheduleWatch(link, request, watch.getInterval() * 1000L, watch.getInterval() * 1000L); answer = new Answer(cmd, true, null); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; @@ -670,10 +732,17 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (shutdown.isRemoveHost()) { cleanupAgentZoneProperties(); } - _reconnectAllowed = false; + reconnectAllowed = false; answer = new Answer(cmd, true, null); } else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) { + logger.debug("Not ready to connect to mgt server: {}", ((ReadyCommand)cmd).getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); return; } else if (cmd instanceof MaintainCommand) { @@ -681,12 +750,10 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = new MaintainAnswer((MaintainCommand)cmd); } else if (cmd instanceof AgentControlCommand) { answer = null; - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - answer = listener.processControlRequest(request, (AgentControlCommand)cmd); - if (answer != null) { - break; - } + for (final IAgentControlListener listener : controlListeners) { + answer = listener.processControlRequest(request, (AgentControlCommand)cmd); + if (answer != null) { + break; } } @@ -698,8 +765,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater answer = setupAgentKeystore((SetupKeyStoreCommand) cmd); } else if (cmd instanceof SetupCertificateCommand && ((SetupCertificateCommand) cmd).isHandleByAgent()) { answer = setupAgentCertificate((SetupCertificateCommand) cmd); - if (Host.Type.Routing.equals(_resource.getType())) { - scheduleServicesRestartTask(); + if (Host.Type.Routing.equals(serverResource.getType())) { + scheduleCertificateRenewalTask(); } } else if (cmd instanceof SetupMSListCommand) { answer = setupManagementServerList((SetupMSListCommand) cmd); @@ -707,11 +774,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (cmd instanceof ReadyCommand) { processReadyCommand(cmd); } - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(cmd); + answer = serverResource.executeRequest(cmd); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer == null) { logger.debug("Response: unsupported command {}", cmd.toString()); @@ -765,13 +832,13 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME; final String csrFile = agentFile.getParent() + "/" + KeyStoreUtils.CSR_FILENAME; - String storedPassword = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String storedPassword = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); if (StringUtils.isEmpty(storedPassword)) { storedPassword = keyStorePassword; - _shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); + shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); } - Script script = new Script(_keystoreSetupPath, 300000, logger); + Script script = new Script(keystoreSetupSetupPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(keyStoreFile); script.add(storedPassword); @@ -815,8 +882,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater throw new CloudRuntimeException("Unable to save received agent client and ca certificates", e); } - String ksPassphrase = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); - Script script = new Script(_keystoreCertImportPath, 300000, logger); + String ksPassphrase = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + Script script = new Script(keystoreCertImportScriptPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(ksPassphrase); script.add(keyStoreFile); @@ -838,9 +905,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) { try { final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm); - _shell.setPersistentProperty(null, "host", newMSHosts); - _shell.setHosts(newMSHosts); - _shell.resetHostCounter(); + shell.setPersistentProperty(null, "host", newMSHosts); + shell.setHosts(newMSHosts); + shell.resetHostCounter(); logger.info("Processed new management server list: {}", newMSHosts); } catch (final Exception e) { throw new CloudRuntimeException("Could not persist received management servers list", e); @@ -849,7 +916,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if ("shuffle".equals(lbAlgorithm)) { scheduleHostLBCheckerTask(0); } else { - scheduleHostLBCheckerTask(_shell.getLbCheckerInterval(lbCheckInterval)); + scheduleHostLBCheckerTask(shell.getLbCheckerInterval(lbCheckInterval)); } } @@ -865,16 +932,14 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater processStartupAnswer(answer, response, link); } else if (answer instanceof AgentControlAnswer) { // Notice, we are doing callback while holding a lock! - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - listener.processControlResponse(response, (AgentControlAnswer)answer); - } + for (final IAgentControlListener listener : controlListeners) { + listener.processControlResponse(response, (AgentControlAnswer)answer); } - } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && _reconnectAllowed) { + } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && reconnectAllowed) { logger.info("Management server requested startup command to reinitialize the agent"); sendStartup(link); } else { - setLastPingResponseTime(); + updateLastPingResponseTime(); } } @@ -911,22 +976,24 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { - if (System.currentTimeMillis() - _lastPingResponseTime > _pingInterval * _shell.getPingRetries()) { - logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", _pingInterval * _shell.getPingRetries()); + if (System.currentTimeMillis() - lastPingResponseTime.get() > pingInterval * shell.getPingRetries()) { + logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", + pingInterval * shell.getPingRetries()); return; } - final PingCommand ping = _resource.getCurrentStatus(getId()); - final Request request = new Request(_id, -1, ping, false); + final PingCommand ping = serverResource.getCurrentStatus(getId()); + final Request request = new Request(id, -1, ping, false); request.setSequence(getNextSequence()); logger.debug("Sending ping: {}", request.toString()); try { task.getLink().send(request.toBytes()); //if i can send pingcommand out, means the link is ok - setLastPingResponseTime(); + updateLastPingResponseTime(); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(task.getLink()), e.getMessage(), request); } } else if (obj instanceof Request) { @@ -936,11 +1003,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater ThreadContext.put("logcontextid", command.getContextParam("logid")); } Answer answer = null; - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(command); + answer = serverResource.executeRequest(command); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer != null) { final Response response = new Response(req, answer); @@ -957,35 +1024,29 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public synchronized void setLastPingResponseTime() { - _lastPingResponseTime = System.currentTimeMillis(); + public void updateLastPingResponseTime() { + lastPingResponseTime.set(System.currentTimeMillis()); } - protected synchronized long getNextSequence() { - return _sequence++; + protected long getNextSequence() { + return sequence.getAndIncrement(); } @Override public void registerControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.add(listener); - } + controlListeners.add(listener); } @Override public void unregisterControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.remove(listener); - } + controlListeners.remove(listener); } @Override public AgentControlAnswer sendRequest(final AgentControlCommand cmd, final int timeoutInMilliseconds) throws AgentControlChannelException { final Request request = new Request(getId(), -1, new Command[] {cmd}, true, false); request.setSequence(getNextSequence()); - final AgentControlListener listener = new AgentControlListener(request); - registerControlListener(listener); try { postRequest(request); @@ -996,7 +1057,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater logger.warn("sendRequest is interrupted, exit waiting"); } } - return listener.getAnswer(); } finally { unregisterControlListener(listener); @@ -1011,9 +1071,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } private void postRequest(final Request request) throws AgentControlChannelException { - if (_link != null) { + if (link != null) { try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to post agent control request: {}", request.toString()); throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage()); @@ -1065,7 +1125,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class WatchTask extends ManagedContextTimerTask { + public class WatchTask implements Runnable { protected Request _request; protected Agent _agent; protected Link _link; @@ -1078,11 +1138,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } @Override - protected void runInContext() { + public void run() { logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task")); try { if (_request instanceof Response) { - _ugentTaskPool.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); + outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); } else { _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request)); } @@ -1092,34 +1152,31 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } } - public class StartupTask extends ManagedContextTimerTask { + public class StartupTask implements Runnable { protected Link _link; - protected volatile boolean cancelled = false; + private final AtomicBoolean cancelled = new AtomicBoolean(false); public StartupTask(final Link link) { logger.debug("Startup task created"); _link = link; } - @Override - public synchronized boolean cancel() { + public boolean cancel() { // TimerTask.cancel may fail depends on the calling context - if (!cancelled) { - cancelled = true; - _startupWait = _startupWaitDefault; + if (cancelled.compareAndSet(false, true)) { + startupWait = DEFAULT_STARTUP_WAIT; logger.debug("Startup task cancelled"); - return super.cancel(); } return true; } @Override - protected synchronized void runInContext() { - if (!cancelled) { - logger.info("The startup command is now cancelled"); - cancelled = true; - _startup = null; - _startupWait = _startupWaitDefault * 2; + public void run() { + if (cancelled.compareAndSet(false, true)) { + logger.info("The running startup command is now invalid. Attempting reconnect"); + startupTask.set(null); + startupWait = DEFAULT_STARTUP_WAIT * 2; + logger.debug("Executing reconnect from task - {}", () -> getLinkLog(_link)); reconnect(_link); } } @@ -1151,7 +1208,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override public void doTask(final Task task) throws TaskExecutionException { if (task.getType() == Task.Type.CONNECT) { - _shell.getBackoffAlgorithm().reset(); + shell.getBackoffAlgorithm().reset(); setLink(task.getLink()); sendStartup(task.getLink()); } else if (task.getType() == Task.Type.DATA) { @@ -1164,7 +1221,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } else { //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool //processRequest(request, task.getLink()); - _executor.submit(new AgentRequestHandler(getType(), getLink(), request)); + requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { logger.error("Unable to find this request "); @@ -1172,14 +1229,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater logger.error("Error parsing task", e); } } else if (task.getType() == Task.Type.DISCONNECT) { - try { - // an issue has been found if reconnect immediately after disconnecting. please refer to https://github.com/apache/cloudstack/issues/8517 - // wait 5 seconds before reconnecting - Thread.sleep(5000); - } catch (InterruptedException e) { - } + logger.debug("Executing disconnect task - {}", () -> getLinkLog(task.getLink())); reconnect(task.getLink()); - return; } else if (task.getType() == Task.Type.OTHER) { processOtherTask(task); } @@ -1202,26 +1253,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater protected void runInContext() { while (true) { try { - if (_inProgress.get() == 0) { + if (commandsInProgress.get() == 0) { logger.debug("Running post certificate renewal task to restart services."); // Let the resource perform any post certificate renewal cleanups - _resource.executeRequest(new PostCertificateRenewalCommand()); + serverResource.executeRequest(new PostCertificateRenewalCommand()); - IAgentShell shell = agent._shell; - ServerResource resource = agent._resource.getClass().newInstance(); + IAgentShell shell = agent.shell; + ServerResource resource = agent.serverResource.getClass().getDeclaredConstructor().newInstance(); // Stop current agent agent.cancelTasks(); - agent._reconnectAllowed = false; - Runtime.getRuntime().removeShutdownHook(agent._shutdownThread); + agent.reconnectAllowed = false; + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); agent.stop(ShutdownCommand.Requested, "Restarting due to new X509 certificates"); // Nullify references for GC - agent._shell = null; - agent._watchList = null; - agent._shutdownThread = null; - agent._controlListeners = null; + agent.shell = null; + agent.watchList = null; + agent.shutdownThread = null; + agent.controlListeners = null; agent = null; // Start a new agent instance @@ -1229,7 +1280,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); - Thread.sleep(5000); } catch (final Exception e) { logger.warn("Failed to execute post certificate renewal command:", e); @@ -1244,35 +1294,34 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater @Override protected void runInContext() { try { - final String[] msList = _shell.getHosts(); + final String[] msList = shell.getHosts(); if (msList == null || msList.length < 1) { return; } final String preferredHost = msList[0]; - final String connectedHost = _shell.getConnectedHost(); - logger.trace("Running preferred host checker task, connected host={}, preferred host={}", connectedHost, preferredHost); - - if (preferredHost != null && !preferredHost.equals(connectedHost) && _link != null) { - boolean isHostUp = true; - try (final Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(preferredHost, _shell.getPort()), 5000); - } catch (final IOException e) { - isHostUp = false; - logger.trace("Host: {} is not reachable", preferredHost); - - } - if (isHostUp && _link != null && _inProgress.get() == 0) { + final String connectedHost = shell.getConnectedHost(); + logger.debug("Running preferred host checker task, connected host={}, preferred host={}", + connectedHost, preferredHost); + if (preferredHost == null || preferredHost.equals(connectedHost) || link == null) { + return; + } + boolean isHostUp = false; + try (final Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(preferredHost, shell.getPort()), 5000); + isHostUp = true; + } catch (final IOException e) { + logger.debug("Host: {} is not reachable", preferredHost); + } + if (isHostUp && link != null && commandsInProgress.get() == 0) { + if (logger.isDebugEnabled()) { logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredHost); - - _shell.resetHostCounter(); - reconnect(_link); } + shell.resetHostCounter(); + reconnect(link); } } catch (Throwable t) { logger.error("Error caught while attempting to connect to preferred host", t); } } - } - } diff --git a/agent/src/main/java/com/cloud/agent/AgentShell.java b/agent/src/main/java/com/cloud/agent/AgentShell.java index 0699e00250b..c5257b95b7c 100644 --- a/agent/src/main/java/com/cloud/agent/AgentShell.java +++ b/agent/src/main/java/com/cloud/agent/AgentShell.java @@ -16,29 +16,6 @@ // under the License. package com.cloud.agent; -import com.cloud.agent.Agent.ExitStatus; -import com.cloud.agent.dao.StorageComponent; -import com.cloud.agent.dao.impl.PropertiesStorage; -import com.cloud.agent.properties.AgentProperties; -import com.cloud.agent.properties.AgentPropertiesFileHandler; -import com.cloud.resource.ServerResource; -import com.cloud.utils.LogUtils; -import com.cloud.utils.ProcessUtil; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; -import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.daemon.Daemon; -import org.apache.commons.daemon.DaemonContext; -import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.lang.math.NumberUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.config.Configurator; - -import javax.naming.ConfigurationException; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -53,6 +30,31 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; +import javax.naming.ConfigurationException; + +import org.apache.commons.daemon.Daemon; +import org.apache.commons.daemon.DaemonContext; +import org.apache.commons.daemon.DaemonInitException; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.config.Configurator; + +import com.cloud.agent.Agent.ExitStatus; +import com.cloud.agent.dao.StorageComponent; +import com.cloud.agent.dao.impl.PropertiesStorage; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; +import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; +import com.cloud.utils.ProcessUtil; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.exception.CloudRuntimeException; + public class AgentShell implements IAgentShell, Daemon { protected static Logger LOGGER = LogManager.getLogger(AgentShell.class); @@ -406,7 +408,9 @@ public class AgentShell implements IAgentShell, Daemon { LOGGER.info("Defaulting to the constant time backoff algorithm"); _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", new HashMap()); + Map map = new HashMap<>(); + map.put("seconds", _properties.getProperty("backoff.seconds")); + _backoff.configure("ConstantTimeBackoff", map); } private void launchAgent() throws ConfigurationException { @@ -455,6 +459,11 @@ public class AgentShell implements IAgentShell, Daemon { agent.start(); } + @Override + public Integer getSslHandshakeTimeout() { + return AgentPropertiesFileHandler.getPropertyValue(AgentProperties.SSL_HANDSHAKE_TIMEOUT); + } + public synchronized int getNextAgentId() { return _nextAgentId++; } diff --git a/agent/src/main/java/com/cloud/agent/IAgentShell.java b/agent/src/main/java/com/cloud/agent/IAgentShell.java index 2dd08fffd45..7f04048795d 100644 --- a/agent/src/main/java/com/cloud/agent/IAgentShell.java +++ b/agent/src/main/java/com/cloud/agent/IAgentShell.java @@ -70,4 +70,6 @@ public interface IAgentShell { String getConnectedHost(); void launchNewAgent(ServerResource resource) throws ConfigurationException; + + Integer getSslHandshakeTimeout(); } diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 52679811f7c..e5593f10460 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -811,6 +811,13 @@ public class AgentProperties{ */ public static final Property HOST_TAGS = new Property<>("host.tags", null, String.class); + /** + * Timeout for SSL handshake in seconds + * Data type: Integer.
+ * Default value: null + */ + public static final Property SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class); + public static class Property { private String name; private T defaultValue; diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java index 4126692546f..6d9758cc3dc 100644 --- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java +++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java @@ -362,4 +362,11 @@ public class AgentShellTest { Assert.assertEquals(expected, shell.getConnectedHost()); } + + @Test + public void testGetSslHandshakeTimeout() { + Integer expected = 1; + agentPropertiesFileHandlerMocked.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.eq(AgentProperties.SSL_HANDSHAKE_TIMEOUT))).thenReturn(expected); + Assert.assertEquals(expected, agentShellSpy.getSslHandshakeTimeout()); + } } diff --git a/agent/src/test/java/com/cloud/agent/AgentTest.java b/agent/src/test/java/com/cloud/agent/AgentTest.java new file mode 100644 index 00000000000..65dc030ebd7 --- /dev/null +++ b/agent/src/test/java/com/cloud/agent/AgentTest.java @@ -0,0 +1,257 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import javax.naming.ConfigurationException; + +import org.apache.logging.log4j.Logger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.resource.ServerResource; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.nio.Link; +import com.cloud.utils.nio.NioConnection; + +@RunWith(MockitoJUnitRunner.class) +public class AgentTest { + Agent agent; + private AgentShell shell; + private ServerResource serverResource; + private Logger logger; + + @Before + public void setUp() throws ConfigurationException { + shell = mock(AgentShell.class); + serverResource = mock(ServerResource.class); + doReturn(true).when(serverResource).configure(any(), any()); + doReturn(1).when(shell).getWorkers(); + doReturn(1).when(shell).getPingRetries(); + agent = new Agent(shell, 1, serverResource); + logger = mock(Logger.class); + ReflectionTestUtils.setField(agent, "logger", logger); + } + + @Test + public void testGetLinkLogNullLinkReturnsEmptyString() { + Link link = null; + String result = agent.getLinkLog(link); + assertEquals("", result); + } + + @Test + public void testGetLinkLogLinkWithTraceEnabledReturnsLinkLogWithHashCode() { + Link link = mock(Link.class); + InetSocketAddress socketAddress = new InetSocketAddress("192.168.1.100", 1111); + when(link.getSocketAddress()).thenReturn(socketAddress); + when(logger.isTraceEnabled()).thenReturn(true); + + String result = agent.getLinkLog(link); + System.out.println(result); + assertTrue(result.startsWith(System.identityHashCode(link) + "-")); + assertTrue(result.contains("192.168.1.100")); + } + + @Test + public void testGetAgentNameWhenServerResourceIsNull() { + ReflectionTestUtils.setField(agent, "serverResource", null); + assertEquals("Agent", agent.getAgentName()); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsTrue() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(true); + when(serverResource.getName()).thenReturn("TestAgent"); + + String agentName = agent.getAgentName(); + assertEquals("TestAgent", agentName); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsFalse() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(false); + + String agentName = agent.getAgentName(); + assertEquals("Agent", agentName); + } + + @Test + public void testAgentInitialization() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + when(shell.getPingRetries()).thenReturn(3); + when(shell.getWorkers()).thenReturn(5); + agent.setupShutdownHookAndInitExecutors(); + assertNotNull(agent.selfTaskExecutor); + assertNotNull(agent.outRequestHandler); + assertNotNull(agent.requestHandler); + } + + @Test + public void testAgentShutdownHookAdded() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + agent.setupShutdownHookAndInitExecutors(); + verify(logger).trace("Adding shutdown hook"); + } + + @Test + public void testGetResourceGuidValidGuidAndResourceName() { + when(shell.getGuid()).thenReturn("12345"); + String result = agent.getResourceGuid(); + assertTrue(result.startsWith("12345-" + ServerResource.class.getSimpleName())); + } + + @Test + public void testGetZoneReturnsValidZone() { + when(shell.getZone()).thenReturn("ZoneA"); + String result = agent.getZone(); + assertEquals("ZoneA", result); + } + + @Test + public void testGetPodReturnsValidPod() { + when(shell.getPod()).thenReturn("PodA"); + String result = agent.getPod(); + assertEquals("PodA", result); + } + + @Test + public void testSetLinkAssignsLink() { + Link mockLink = mock(Link.class); + agent.setLink(mockLink); + assertEquals(mockLink, agent.link); + } + + @Test + public void testGetResourceReturnsServerResource() { + ServerResource mockResource = mock(ServerResource.class); + ReflectionTestUtils.setField(agent, "serverResource", mockResource); + ServerResource result = agent.getResource(); + assertSame(mockResource, result); + } + + @Test + public void testGetResourceName() { + String result = agent.getResourceName(); + assertTrue(result.startsWith(ServerResource.class.getSimpleName())); + } + + @Test + public void testUpdateLastPingResponseTimeUpdatesCurrentTime() { + long beforeUpdate = System.currentTimeMillis(); + agent.updateLastPingResponseTime(); + long updatedTime = agent.lastPingResponseTime.get(); + assertTrue(updatedTime >= beforeUpdate); + assertTrue(updatedTime <= System.currentTimeMillis()); + } + + @Test + public void testGetNextSequenceIncrementsSequence() { + long initialSequence = agent.getNextSequence(); + long nextSequence = agent.getNextSequence(); + assertEquals(initialSequence + 1, nextSequence); + long thirdSequence = agent.getNextSequence(); + assertEquals(nextSequence + 1, thirdSequence); + } + + @Test + public void testRegisterControlListenerAddsListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + } + + @Test + public void testUnregisterControlListenerRemovesListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + agent.unregisterControlListener(listener); + assertFalse(agent.controlListeners.contains(listener)); + } + + @Test + public void testCloseAndTerminateLinkLinkIsNullDoesNothing() { + agent.closeAndTerminateLink(null); + } + + @Test + public void testCloseAndTerminateLinkValidLinkCallsCloseAndTerminate() { + Link mockLink = mock(Link.class); + agent.closeAndTerminateLink(mockLink); + verify(mockLink).close(); + verify(mockLink).terminated(); + } + + @Test + public void testStopAndCleanupConnectionConnectionIsNullDoesNothing() { + agent.connection = null; + agent.stopAndCleanupConnection(false); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionNoWaitStopsAndCleansUp() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + } + + @Test + public void testStopAndCleanupConnectionCleanupThrowsIOExceptionLogsWarning() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + doThrow(new IOException("Cleanup failed")).when(mockConnection).cleanUp(); + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(logger).warn(eq("Fail to clean up old connection. {}"), any(IOException.class)); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionWaitForStopWaitsForStartupToStop() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + ConstantTimeBackoff mockBackoff = mock(ConstantTimeBackoff.class); + mockBackoff.setTimeToWait(0); + agent.connection = mockConnection; + when(shell.getBackoffAlgorithm()).thenReturn(mockBackoff); + when(mockConnection.isStartup()).thenReturn(true, true, false); + agent.stopAndCleanupConnection(true); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + verify(mockBackoff, times(3)).waitBeforeRetry(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java index 68204d43253..f041c8342ae 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java @@ -30,6 +30,11 @@ public interface RoleService { ConfigKey EnableDynamicApiChecker = new ConfigKey<>("Advanced", Boolean.class, "dynamic.apichecker.enabled", "false", "If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.", true); + ConfigKey DynamicApiCheckerCachePeriod = new ConfigKey<>("Advanced", Integer.class, + "dynamic.apichecker.cache.period", "0", + "Defines the expiration time in seconds for the Dynamic API Checker cache, determining how long cached data is retained before being refreshed. If set to zero then caching will be disabled", + false); + boolean isEnabled(); /** diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index b91e56dcaef..895e9328992 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -100,7 +100,7 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { dv = EnumSet.of(DomainDetails.all); } else { try { - ArrayList dc = new ArrayList(); + ArrayList dc = new ArrayList<>(); for (String detail : viewDetails) { dc.add(DomainDetails.valueOf(detail)); } @@ -142,7 +142,10 @@ public class ListDomainsCmd extends BaseListCmd implements UserCmd { if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java index 9157188fdee..bd9ab30f4f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java @@ -157,7 +157,10 @@ public class ListAccountsCmd extends BaseListDomainResourcesCmd implements UserC if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java index d670e4d3a88..4f6f1ad66c9 100644 --- a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java +++ b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java @@ -39,7 +39,7 @@ public interface OutOfBandManagementService { long getId(); boolean isOutOfBandManagementEnabled(Host host); void submitBackgroundPowerSyncTask(Host host); - boolean transitionPowerStateToDisabled(List hosts); + boolean transitionPowerStateToDisabled(List hostIds); OutOfBandManagementResponse enableOutOfBandManagement(DataCenter zone); OutOfBandManagementResponse enableOutOfBandManagement(Cluster cluster); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java index 3c9d4cb67ae..45f175e9a81 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.admin.domain; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; import org.junit.Assert; import org.junit.Test; @@ -71,7 +72,17 @@ public class ListDomainsCmdTest { cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); - Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + Mockito.verify(resourceLimitService).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + } + + @Test + public void testUpdateDomainResponseWithDomainsMinDetails() { + ListDomainsCmd cmd = new ListDomainsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java index 896a7a6c826..a1ba9270345 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.account; import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.AccountResponse; import org.junit.Assert; import org.junit.Test; @@ -58,7 +59,7 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseNoDomains() { + public void testUpdateAccountResponseNoAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; cmd.updateAccountResponse(null); @@ -66,11 +67,21 @@ public class ListAccountsCmdTest { } @Test - public void testUpdateDomainResponseWithDomains() { + public void testUpdateDomainResponseWithAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); } + + @Test + public void testUpdateDomainResponseWithAccountsMinDetails() { + ListAccountsCmd cmd = new ListAccountsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); + } } diff --git a/core/src/main/java/com/cloud/resource/ServerResource.java b/core/src/main/java/com/cloud/resource/ServerResource.java index 1602a78d9a4..092019e7f21 100644 --- a/core/src/main/java/com/cloud/resource/ServerResource.java +++ b/core/src/main/java/com/cloud/resource/ServerResource.java @@ -78,4 +78,12 @@ public interface ServerResource extends Manager { void setAgentControl(IAgentControl agentControl); + default boolean isExitOnFailures() { + return true; + } + + default boolean isAppendAgentNameToLogs() { + return false; + } + } diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index e8ffd86ac4f..94c73d8f4d6 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -22,7 +22,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,6 +37,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,6 +101,10 @@ public interface VirtualMachineManager extends Manager { "refer documentation", true, ConfigKey.Scope.Zone); + ConfigKey VmSyncPowerStateTransitioning = new ConfigKey<>("Advanced", Boolean.class, "vm.sync.power.state.transitioning", "true", + "Whether to sync power states of the transitioning and stalled VMs while processing VM power reports.", false); + + interface Topics { String VM_POWER_STATE = "vm.powerstate"; } @@ -286,24 +290,22 @@ public interface VirtualMachineManager extends Manager { /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host + * @param host host * @param vmIds list of VM IDs * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds); + HashMap getVirtualMachineStatistics(Host host, List vmIds); /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host - * @param vmMap map of VM IDs and the corresponding VirtualMachine object + * @param host host + * @param vmMap map of VM instanceName and its ID * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap); + HashMap getVirtualMachineStatistics(Host host, Map vmMap); - HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap); - HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap); Map getDiskOfferingSuitabilityForVm(long vmId, List diskOfferingIds); diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index cbd137e8682..c3d45b98b00 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -16,14 +16,11 @@ // under the License. package com.cloud.capacity; -import java.util.Map; - import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -130,6 +127,10 @@ public interface CapacityManager { true, ConfigKey.Scope.Zone); + ConfigKey CapacityCalculateWorkers = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class, + "capacity.calculate.workers", "1", + "Number of worker threads to be used for capacities calculation", true); + public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost); @@ -145,8 +146,6 @@ public interface CapacityManager { void updateCapacityForHost(Host host); - void updateCapacityForHost(Host host, Map offeringsMap); - /** * @param pool storage pool * @param templateForVmCreation template that will be used for vm creation @@ -163,12 +162,12 @@ public interface CapacityManager { /** * Check if specified host has capability to support cpu cores and speed freq - * @param hostId the host to be checked + * @param host the host to be checked * @param cpuNum cpu number to check * @param cpuSpeed cpu Speed to check * @return true if the count of host's running VMs >= hypervisor limit */ - boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed); + boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed); /** * Check if cluster will cross threshold if the cpu/memory requested are accommodated diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 343ad0fa212..8eb6462b483 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -138,13 +138,13 @@ public interface ResourceManager extends ResourceService, Configurable { public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId); - public List listAvailHypervisorInZone(Long hostId, Long zoneId); + public List listAvailHypervisorInZone(Long zoneId); public HostVO findHostByGuid(String guid); public HostVO findHostByName(String name); - HostStats getHostStatistics(long hostId); + HostStats getHostStatistics(Host host); Long getGuestOSCategoryId(long hostId); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 0b9f7bcb7db..7b31ec6a81b 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -42,6 +43,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; @@ -214,6 +216,10 @@ public interface StorageManager extends StorageService { "when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)", true, ConfigKey.Scope.Zone); + ConfigKey StoragePoolHostConnectWorkers = new ConfigKey<>("Storage", Integer.class, + "storage.pool.host.connect.workers", "1", + "Number of worker threads to be used to connect hosts to a primary storage", true); + /** * should we execute in sequence not involving any storages? * @return tru if commands should execute in sequence @@ -365,6 +371,9 @@ public interface StorageManager extends StorageService { String getStoragePoolMountFailureReason(String error); + void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleStorageConflictException, boolean errorOnNoUpHost) throws CloudRuntimeException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index f3add1557ce..1ab3b7ff892 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -18,6 +18,7 @@ package com.cloud.agent.manager; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; +import java.net.SocketAddress; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.Arrays; @@ -25,23 +26,20 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.configuration.Config; -import com.cloud.org.Cluster; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.GlobalLock; import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -56,6 +54,8 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.MapUtils; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -82,6 +82,7 @@ import com.cloud.agent.api.UnsupportedAnswer; import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; import com.cloud.configuration.ManagementServiceConfiguration; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; @@ -101,15 +102,18 @@ import com.cloud.host.Status.Event; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.org.Cluster; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; @@ -124,8 +128,6 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.NioServer; import com.cloud.utils.nio.Task; import com.cloud.utils.time.InaccurateClock; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.ThreadContext; /** * Implementation of the Agent Manager. This class controls the connection to the agents. @@ -143,7 +145,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected List _loadingAgents = new ArrayList(); protected Map _commandTimeouts = new HashMap<>(); private int _monitorId = 0; - private final Lock _agentStatusLock = new ReentrantLock(); @Inject protected CAManager caService; @@ -189,6 +190,9 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected StateMachine2 _statusStateMachine = Status.getStateMachine(); private final ConcurrentHashMap _pingMap = new ConcurrentHashMap(10007); + private int maxConcurrentNewAgentConnections; + private final ConcurrentHashMap newAgentConnections = new ConcurrentHashMap<>(); + protected ScheduledExecutorService newAgentConnectionsMonitor; @Inject ResourceManager _resourceMgr; @@ -198,6 +202,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected final ConfigKey Workers = new ConfigKey("Advanced", Integer.class, "workers", "5", "Number of worker threads handling remote agent connections.", false); protected final ConfigKey Port = new ConfigKey("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); + protected final ConfigKey RemoteAgentSslHandshakeTimeout = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.timeout", "30", + "Seconds after which SSL handshake times out during remote agent connections.", false); + protected final ConfigKey RemoteAgentMaxConcurrentNewConnections = new ConfigKey<>("Advanced", + Integer.class, "agent.max.concurrent.new.connections", "0", + "Number of maximum concurrent new connections server allows for remote agents. " + + "If set to zero (default value) then no limit will be enforced on concurrent new connections", + false); protected final ConfigKey AlertWait = new ConfigKey("Advanced", Integer.class, "alert.wait", "1800", "Seconds to wait before alerting on a disconnected agent", true); protected final ConfigKey DirectAgentLoadSize = new ConfigKey("Advanced", Integer.class, "direct.agent.load.size", "16", @@ -214,8 +226,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout()); - final int threads = DirectAgentLoadSize.value(); - _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId); @@ -226,24 +236,31 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl registerForHostEvents(new SetHostParamsListener(), true, true, false); - _executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); + final int agentTaskThreads = DirectAgentLoadSize.value(); + _executor = new ThreadPoolExecutor(agentTaskThreads, agentTaskThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); _connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); // allow core threads to time out even when there are no items in the queue _connectExecutor.allowCoreThreadTimeOut(true); - _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); + maxConcurrentNewAgentConnections = RemoteAgentMaxConcurrentNewConnections.value(); + + _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, + this, caService, RemoteAgentSslHandshakeTimeout.value()); logger.info("Listening on {} with {} workers.", Port.value(), Workers.value()); + final int directAgentPoolSize = DirectAgentPoolSize.value(); // executes all agent commands other than cron and ping - _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); + _directAgentExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands - _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value()); - _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 + _cronJobExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgentCronJob")); + logger.debug("Created DirectAgentAttache pool with size: {}.", directAgentPoolSize); + _directAgentThreadCap = Math.round(directAgentPoolSize * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); + newAgentConnectionsMonitor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("NewAgentConnectionsMonitor")); + initializeCommandTimeouts(); return true; @@ -254,6 +271,28 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return new AgentHandler(type, link, data); } + @Override + public int getMaxConcurrentNewConnectionsCount() { + return maxConcurrentNewAgentConnections; + } + + @Override + public int getNewConnectionsCount() { + return newAgentConnections.size(); + } + + @Override + public void registerNewConnection(SocketAddress address) { + logger.trace(String.format("Adding new agent connection from %s", address.toString())); + newAgentConnections.putIfAbsent(address.toString(), System.currentTimeMillis()); + } + + @Override + public void unregisterNewConnection(SocketAddress address) { + logger.trace(String.format("Removing new agent connection for %s", address.toString())); + newAgentConnections.remove(address.toString()); + } + @Override public int registerForHostEvents(final Listener listener, final boolean connections, final boolean commands, final boolean priority) { synchronized (_hostMonitors) { @@ -687,6 +726,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), mgmtServiceConf.getPingInterval(), mgmtServiceConf.getPingInterval(), TimeUnit.SECONDS); + final int cleanupTime = Wait.value(); + newAgentConnectionsMonitor.scheduleAtFixedRate(new AgentNewConnectionsMonitorTask(), cleanupTime, + cleanupTime, TimeUnit.MINUTES); + return true; } @@ -844,6 +887,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _connectExecutor.shutdownNow(); _monitorExecutor.shutdownNow(); + newAgentConnectionsMonitor.shutdownNow(); return true; } @@ -1312,6 +1356,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (attache == null) { logger.warn("Unable to create attache for agent: {}", _request); } + unregisterNewConnection(_link.getSocketAddress()); } } @@ -1594,21 +1639,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { - try { - _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host = {}]", - host.getResourceState(), e.toString(), host); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); - host.setManagementServerId(msId); - try { - return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); - } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); - throw new CloudRuntimeException(String.format( - "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); - } - } finally { - _agentStatusLock.unlock(); + host.setManagementServerId(msId); + try { + return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); + } catch (final NoTransitionException e1) { + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } @@ -1813,6 +1853,35 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } + protected class AgentNewConnectionsMonitorTask extends ManagedContextRunnable { + @Override + protected void runInContext() { + logger.trace("Agent New Connections Monitor is started."); + final int cleanupTime = Wait.value(); + Set> entrySet = newAgentConnections.entrySet(); + long cutOff = System.currentTimeMillis() - (cleanupTime * 60 * 1000L); + if (logger.isDebugEnabled()) { + List expiredConnections = newAgentConnections.entrySet() + .stream() + .filter(e -> e.getValue() <= cutOff) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + logger.debug(String.format("Currently %d active new connections, of which %d have expired - %s", + entrySet.size(), + expiredConnections.size(), + StringUtils.join(expiredConnections))); + } + for (Map.Entry entry : entrySet) { + if (entry.getValue() <= cutOff) { + if (logger.isTraceEnabled()) { + logger.trace(String.format("Cleaning up new agent connection for %s", entry.getKey())); + } + newAgentConnections.remove(entry.getKey()); + } + } + } + } + protected class BehindOnPingListener implements Listener { @Override public boolean isRecurring() { @@ -1888,7 +1957,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, GranularWaitTimeForCommands }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, + GranularWaitTimeForCommands, RemoteAgentSslHandshakeTimeout, RemoteAgentMaxConcurrentNewConnections }; } protected class SetHostParamsListener implements Listener { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index be327418205..dd3666e5561 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -49,11 +49,12 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.ha.dao.HAConfigDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.shutdown.ShutdownManager; +import org.apache.cloudstack.shutdown.command.BaseShutdownManagementServerHostCommand; import org.apache.cloudstack.shutdown.command.CancelShutdownManagementServerHostCommand; import org.apache.cloudstack.shutdown.command.PrepareForShutdownManagementServerHostCommand; -import org.apache.cloudstack.shutdown.command.BaseShutdownManagementServerHostCommand; import org.apache.cloudstack.shutdown.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.security.SSLUtils; @@ -73,7 +74,6 @@ import com.cloud.cluster.ClusterManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ClusterServicePdu; import com.cloud.cluster.ClusteredAgentRebalanceService; -import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.agentlb.AgentLoadBalancerPlanner; import com.cloud.cluster.agentlb.HostTransferMapVO; @@ -215,12 +215,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust continue; } } - - logger.debug("Loading directly connected host {}", host); + logger.debug("Loading directly connected {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", - host, e); + logger.warn(" can not load directly connected {} due to ", host, e); } } } @@ -250,8 +248,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); AgentAttache old = null; synchronized (_agents) { - old = _agents.get(id); - _agents.put(id, attache); + old = _agents.get(host.getId()); + _agents.put(host.getId(), attache); } if (old != null) { logger.debug("Remove stale agent attache from current management server"); @@ -550,13 +548,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + logger.debug("{} has switched to another management server, need to update agent map with a forwarding agent attache", host); agent = createAttache(host); } } if (agent == null) { final AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); - ex.addProxyObject(_entityMgr.findById(Host.class, hostId).getUuid()); + ex.addProxyObject(host.getUuid()); throw ex; } @@ -1034,7 +1032,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); + logger.debug("Disconnecting {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1042,7 +1040,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId); + logger.debug("Loading directly connected {} to the management server {} as a part of rebalance process", host, _nodeId); result = loadDirectlyConnectedHost(host, true); } else { logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host); @@ -1054,9 +1052,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } if (result) { - logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.debug("Successfully loaded directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); + logger.warn("Failed to load directly connected {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index a8b0130bdbc..6d27b0efed3 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -85,6 +85,7 @@ import org.apache.cloudstack.resource.ResourceCleanupService; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cache.SingleCache; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.cloudstack.vm.UnmanagedVMsManager; @@ -406,6 +407,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private DomainDao domainDao; @Inject ResourceCleanupService resourceCleanupService; + @Inject + VmWorkJobDao vmWorkJobDao; + + private SingleCache> vmIdsInProgressCache; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -450,6 +455,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Long.class, "systemvm.root.disk.size", "-1", "Size of root volume (in GB) of system VMs and virtual routers", true); + private boolean syncTransitioningVmPowerState; + ScheduledExecutorService _executor = null; private long _nodeId; @@ -816,6 +823,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public boolean start() { + vmIdsInProgressCache = new SingleCache<>(10, vmWorkJobDao::listVmIdsWithPendingJob); _executor.scheduleAtFixedRate(new CleanupTask(), 5, VmJobStateReportInterval.value(), TimeUnit.SECONDS); _executor.scheduleAtFixedRate(new TransitionTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS); cancelWorkItems(_nodeId); @@ -843,6 +851,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this)); + syncTransitioningVmPowerState = Boolean.TRUE.equals(VmSyncPowerStateTransitioning.value()); + return true; } @@ -3506,7 +3516,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) && (HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) { logger.info("Searching for hosts in the zone for vm migration"); - List clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId()); + List clustersToExclude = _clusterDao.listAllClusterIds(host.getDataCenterId()); List clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString()); for (ClusterVO cluster : clusterList) { clustersToExclude.remove(cluster.getId()); @@ -3800,7 +3810,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (ping.getHostVmStateReport() != null) { _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand()); } - scanStalledVMInTransitionStateOnUpHost(agentId); processed = true; } @@ -4757,7 +4766,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize, - AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName + AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName, + VmSyncPowerStateTransitioning }; } @@ -4955,20 +4965,46 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + /** + * Scans stalled VMs in transition states on an UP host and processes them accordingly. + * + *

This method is executed only when the {@code syncTransitioningVmPowerState} flag is enabled. It identifies + * VMs stuck in specific states (e.g., Starting, Stopping, Migrating) on a host that is UP, except for those + * in the Expunging state, which require special handling.

+ * + *

The following conditions are checked during the scan: + *

    + *
  • No pending {@code VmWork} job exists for the VM.
  • + *
  • The VM is associated with the given {@code hostId}, and the host is UP.
  • + *
+ *

+ * + *

When a host is UP, a state report for the VMs will typically be received. However, certain scenarios + * (e.g., out-of-band changes or behavior specific to hypervisors like XenServer or KVM) might result in + * missing reports, preventing the state-sync logic from running. To address this, the method scans VMs + * based on their last update timestamp. If a VM remains stalled without a status update while its host is UP, + * it is assumed to be powered off, which is generally a safe assumption.

+ * + * @param hostId the ID of the host to scan for stalled VMs in transition states. + */ private void scanStalledVMInTransitionStateOnUpHost(final long hostId) { - final long stallThresholdInMs = VmJobStateReportInterval.value() + (VmJobStateReportInterval.value() >> 1); - final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs); - final List mostlikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostId, cutTime); - for (final Long vmId : mostlikelyStoppedVMs) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + if (!syncTransitioningVmPowerState) { + return; + } + if (!_hostDao.isHostUp(hostId)) { + return; + } + final long stallThresholdInMs = VmJobStateReportInterval.value() * 2; + final long cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs).getTime(); + final List hostTransitionVms = _vmDao.listByHostAndState(hostId, State.Starting, State.Stopping, State.Migrating); + + final List mostLikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : mostLikelyStoppedVMs) { handlePowerOffReportWithNoPendingJobsOnVM(vm); } - final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostId, cutTime); - for (final Long vmId : vmsWithRecentReport) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : vmsWithRecentReport) { if (vm.getPowerState() == PowerState.PowerOn) { handlePowerOnReportWithNoPendingJobsOnVM(vm); } else { @@ -4977,6 +5013,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } + private void scanStalledVMInTransitionStateOnDisconnectedHosts() { final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - VmOpWaitInterval.value() * 1000); final List stuckAndUncontrollableVMs = listStalledVMInTransitionStateOnDisconnectedHosts(cutTime); @@ -4989,89 +5026,58 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - private List listStalledVMInTransitionStateOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e); - } + private List listStalledVMInTransitionStateOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } - return l; + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() < cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } - private List listVMInTransitionStateWithRecentReportOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time > ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e); - } - return l; + private List listVMInTransitionStateWithRecentReportOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() > cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status != 'UP' " + - "AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; + final String sql = "SELECT i.* " + + "FROM vm_instance AS i " + + "INNER JOIN host AS h ON i.host_id = h.id " + + "WHERE h.status != 'UP' " + + " AND i.power_state_update_time < ? " + + " AND i.state IN ('Starting', 'Stopping', 'Migrating') " + + " AND i.id NOT IN (SELECT vm_instance_id FROM vm_work_job AS w " + + " INNER JOIN async_job AS j ON w.id = j.id " + + " WHERE j.job_status = ?) " + + " AND i.removed IS NULL"; final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); + int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setString(1, cutTimeStr); - pstmt.setInt(2, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); + pstmt.setString(1, cutTimeStr); + pstmt.setInt(2, jobStatusInProgress); + final ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + l.add(rs.getLong(1)); } - return l; + } catch (final SQLException e) { + logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } + return l; } public class VmStateSyncOutcome extends OutcomeImpl { @@ -5953,29 +5959,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds) { + public HashMap getVirtualMachineStatistics(Host host, List vmIds) { HashMap vmStatsById = new HashMap<>(); if (CollectionUtils.isEmpty(vmIds)) { return vmStatsById; } - Map vmMap = new HashMap<>(); - for (Long vmId : vmIds) { - vmMap.put(vmId, _vmDao.findById(vmId)); - } - return getVirtualMachineStatistics(hostId, hostName, vmMap); + Map vmMap = _vmDao.getNameIdMapForVmIds(vmIds); + return getVirtualMachineStatistics(host, vmMap); } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap) { + public HashMap getVirtualMachineStatistics(Host host, Map vmInstanceNameIdMap) { HashMap vmStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM statistics."); return vmStatsById; @@ -5986,23 +5986,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmStatsById; } for (Map.Entry entry : vmStatsByName.entrySet()) { - vmStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmStatsById; } @Override - public HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmDiskStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmDiskStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmDiskStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; @@ -6013,23 +6010,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmDiskStatsById; } for (Map.Entry> entry: vmDiskStatsByName.entrySet()) { - vmDiskStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmDiskStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmDiskStatsById; } @Override - public HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmNetworkStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmNetworkStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmNetworkStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; @@ -6040,7 +6034,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return vmNetworkStatsById; } for (Map.Entry> entry: vmNetworkStatsByName.entrySet()) { - vmNetworkStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmNetworkStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmNetworkStatsById; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 94dddfdf18a..4b344ac4299 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -16,27 +16,29 @@ // under the License. package com.cloud.vm; -import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.configuration.ManagementServiceConfiguration; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.utils.DateUtil; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.dao.VMInstanceDao; public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStateSync { @@ -47,7 +49,12 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; + private LazyCache vmCache; + private LazyCache hostCache; + public VirtualMachinePowerStateSyncImpl() { + vmCache = new LazyCache<>(16, 10, this::getVmFromId); + hostCache = new LazyCache<>(16, 10, this::getHostFromId); } @Override @@ -58,130 +65,141 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Override public void processHostVmStateReport(long hostId, Map report) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, false); + logger.debug("Process host VM state report. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - HostVO host = hostDao.findById(hostId); - logger.debug("Process host VM state report from ping process. host: {}", host); - - Map> translatedInfo = convertVmStateReport(report); - processReport(host, translatedInfo, force); + logger.debug("Process host VM state report from ping process. host: {}", hostCache.get(hostId)); + Map translatedInfo = convertVmStateReport(report); + processReport(hostId, translatedInfo, force); } - private void processReport(HostVO host, Map> translatedInfo, boolean force) { - - logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - - for (Map.Entry> entry : translatedInfo.entrySet()) { - - logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); - } else { - logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); - } + private void updateAndPublishVmPowerStates(long hostId, Map instancePowerStates, + Date updateTime) { + if (instancePowerStates.isEmpty()) { + return; } + Set vmIds = instancePowerStates.keySet(); + Map notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId, + updateTime); + if (notUpdated.size() > vmIds.size()) { + return; + } + for (Long vmId : vmIds) { + if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) { + logger.debug("VM state report is updated. {}, {}, power state: {}", + () -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId)); + _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, + PublishScope.GLOBAL, vmId); + continue; + } + logger.trace("VM power state does not change, skip DB writing. {}", () -> vmCache.get(vmId)); + } + } + private List filterOutdatedFromMissingVmReport(List vmsThatAreMissingReport) { + List outdatedVms = vmsThatAreMissingReport.stream() + .filter(v -> !_instanceDao.isPowerStateUpToDate(v)) + .map(VMInstanceVO::getId) + .collect(Collectors.toList()); + if (CollectionUtils.isEmpty(outdatedVms)) { + return vmsThatAreMissingReport; + } + _instanceDao.resetVmPowerStateTracking(outdatedVms); + return vmsThatAreMissingReport.stream() + .filter(v -> !outdatedVms.contains(v.getId())) + .collect(Collectors.toList()); + } + + private void processMissingVmReport(long hostId, Set vmIds, boolean force) { // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, - VirtualMachine.State.Stopping, VirtualMachine.State.Starting); - java.util.Iterator it = vmsThatAreMissingReport.iterator(); - while (it.hasNext()) { - VMInstanceVO instance = it.next(); - if (translatedInfo.get(instance.getId()) != null) - it.remove(); + List vmsThatAreMissingReport = _instanceDao.findByHostInStatesExcluding(hostId, vmIds, + VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); + // here we need to be wary of out of band migration as opposed to other, more unexpected state changes + if (vmsThatAreMissingReport.isEmpty()) { + return; + } + Date currentTime = DateUtil.currentGMTTime(); + logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + if (!force) { + vmsThatAreMissingReport = filterOutdatedFromMissingVmReport(vmsThatAreMissingReport); } - // here we need to be wary of out of band migration as opposed to other, more unexpected state changes - if (vmsThatAreMissingReport.size() > 0) { - Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); - - // 2 times of sync-update interval for graceful period - long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; - - for (VMInstanceVO instance : vmsThatAreMissingReport) { - - // Make sure powerState is up to date for missing VMs - try { - if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); - _instanceDao.resetVmPowerStateTracking(instance.getId()); - continue; - } - } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); - continue; - } - - Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + // 2 times of sync-update interval for graceful period + long milliSecondsGracefulPeriod = mgmtServiceConf.getPingInterval() * 2000L; + Map instancePowerStates = new HashMap<>(); + for (VMInstanceVO instance : vmsThatAreMissingReport) { + Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + if (vmStateUpdateTime == null) { + logger.warn("VM power state update time is null, falling back to update time for {}", instance); + vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); - vmStateUpdateTime = instance.getUpdateTime(); - if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); - vmStateUpdateTime = instance.getCreated(); - } - } - - String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", - host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); - - long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); - - if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); - - // this is were a race condition might have happened if we don't re-fetch the instance; - // between the startime of this job and the currentTime of this missing-branch - // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); - } else { - logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); - } - } else { - logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); + logger.warn("VM update time is null, falling back to creation time for {}", instance); + vmStateUpdateTime = instance.getCreated(); } } + logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}", + hostId, + instance.getId(), + instance.getUuid(), + VirtualMachine.PowerState.PowerReportMissing, + DateUtil.getOutputString(vmStateUpdateTime)); + long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); + if (force || (milliSecondsSinceLastStateUpdate > milliSecondsGracefulPeriod)) { + logger.debug("vm id: {} - time since last state update({} ms) has passed graceful period", + instance.getId(), milliSecondsSinceLastStateUpdate); + // this is where a race condition might have happened if we don't re-fetch the instance; + // between the startime of this job and the currentTime of this missing-branch + // an update might have occurred that we should not override in case of out of band migration + instancePowerStates.put(instance.getId(), VirtualMachine.PowerState.PowerReportMissing); + } else { + logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period yet", + instance.getId(), milliSecondsSinceLastStateUpdate); + } } - - logger.debug("Done with process of VM state report. host: {}", host); + updateAndPublishVmPowerStates(hostId, instancePowerStates, startTime); } - public Map> convertVmStateReport(Map states) { - final HashMap> map = new HashMap<>(); - if (states == null) { + private void processReport(long hostId, Map translatedInfo, boolean force) { + logger.debug("Process VM state report. {}, number of records in report: {}. VMs: [{}]", + () -> hostCache.get(hostId), + translatedInfo::size, + () -> translatedInfo.entrySet().stream().map(entry -> entry.getKey() + ":" + entry.getValue()) + .collect(Collectors.joining(", ")) + "]"); + updateAndPublishVmPowerStates(hostId, translatedInfo, DateUtil.currentGMTTime()); + + processMissingVmReport(hostId, translatedInfo.keySet(), force); + + logger.debug("Done with process of VM state report. host: {}", () -> hostCache.get(hostId)); + } + + public Map convertVmStateReport(Map states) { + final HashMap map = new HashMap<>(); + if (MapUtils.isEmpty(states)) { return map; } - + Map nameIdMap = _instanceDao.getNameIdMapForVmInstanceNames(states.keySet()); for (Map.Entry entry : states.entrySet()) { - VMInstanceVO vm = findVM(entry.getKey()); - if (vm != null) { - map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); + Long id = nameIdMap.get(entry.getKey()); + if (id != null) { + map.put(id, entry.getValue().getState()); } else { logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } - return map; } - private VMInstanceVO findVM(String vmName) { - return _instanceDao.findVMByInstanceName(vmName); + protected VMInstanceVO getVmFromId(long vmId) { + return _instanceDao.findById(vmId); + } + + protected HostVO getHostFromId(long hostId) { + return hostDao.findById(hostId); } } diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java index 9616f31d0c5..1bb79ce417a 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java @@ -28,6 +28,8 @@ import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); + List listByHostIdTypes(Long hostId, List capacityTypes); + List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java index 3acae985af4..5e7eee4566c 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -671,6 +671,18 @@ public class CapacityDaoImpl extends GenericDaoBase implements return findOneBy(sc); } + @Override + public List listByHostIdTypes(Long hostId, List capacityTypes) { + SearchBuilder sb = createSearchBuilder(); + sb.and("hostId", sb.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getCapacityType(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("hostId", hostId); + sc.setParameters("type", capacityTypes.toArray()); + return listBy(sc); + } + @Override public List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java index 06c9c525504..5daab544b21 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.Map; import com.cloud.utils.db.GenericDao; @@ -29,6 +30,8 @@ public interface ClusterDetailsDao extends GenericDao { ClusterDetailsVO findDetail(long clusterId, String name); + Map findDetails(long clusterId, Collection names); + void deleteDetails(long clusterId); String getVmwareDcName(Long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java index 0e40f8475c1..a4f6acb9057 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -16,13 +16,16 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; @@ -82,6 +85,23 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase findDetails(long clusterId, Collection names) { + if (CollectionUtils.isEmpty(names)) { + return new HashMap<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("name", names.toArray()); + List results = search(sc, null); + return results.stream() + .collect(Collectors.toMap(ClusterDetailsVO::getName, ClusterDetailsVO::getValue)); + } + @Override public void deleteDetails(long clusterId) { SearchCriteria sc = ClusterSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 6ecfdaeb058..bf12abd5114 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -16,15 +16,15 @@ // under the License. package com.cloud.dc.dao; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; -import java.util.List; -import java.util.Map; -import java.util.Set; - public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -36,7 +36,7 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); - Set getDistictAvailableHypervisorsAcrossClusters(); + Set getDistinctAvailableHypervisorsAcrossClusters(); List listByDcHyType(long dcId, String hyType); @@ -46,9 +46,13 @@ public interface ClusterDao extends GenericDao { List listClustersWithDisabledPods(long zoneId); + Integer countAllByDcId(long zoneId); + + Integer countAllManagedAndEnabledByDcId(long zoneId); + List listClustersByDcId(long zoneId); - List listAllClusters(Long zoneId); + List listAllClusterIds(Long zoneId); boolean getSupportsResigning(long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 9a56f0f2d94..af6b8397643 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -16,25 +16,6 @@ // under the License. package com.cloud.dc.dao; -import com.cloud.cpu.CPU; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterDetailsVO; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.org.Grouping; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.TransactionLegacy; -import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -46,6 +27,28 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.cpu.CPU; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.org.Grouping; +import com.cloud.org.Managed; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; + @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -58,7 +61,6 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ClusterSearch; protected final SearchBuilder ClusterDistinctArchSearch; protected final SearchBuilder ClusterArchSearch; - protected GenericSearchBuilder ClusterIdSearch; private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; @@ -98,6 +100,8 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ZoneClusterSearch = createSearchBuilder(); ZoneClusterSearch.and("dataCenterId", ZoneClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneClusterSearch.and("allocationState", ZoneClusterSearch.entity().getAllocationState(), Op.EQ); + ZoneClusterSearch.and("managedState", ZoneClusterSearch.entity().getManagedState(), Op.EQ); ZoneClusterSearch.done(); ClusterIdSearch = createSearchBuilder(Long.class); @@ -167,23 +171,15 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("zoneId", zoneId); } List clusters = listBy(sc); - List hypers = new ArrayList(4); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - - return hypers; + return clusters.stream() + .map(ClusterVO::getHypervisorType) + .distinct() + .collect(Collectors.toList()); } @Override - public Set getDistictAvailableHypervisorsAcrossClusters() { - SearchCriteria sc = ClusterSearch.create(); - List clusters = listBy(sc); - Set hypers = new HashSet<>(); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - return hypers; + public Set getDistinctAvailableHypervisorsAcrossClusters() { + return new HashSet<>(getAvailableHypervisorInZone(null)); } @Override @@ -266,6 +262,23 @@ public class ClusterDaoImpl extends GenericDaoBase implements C return customSearch(sc, null); } + @Override + public Integer countAllByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + return getCount(sc); + } + + @Override + public Integer countAllManagedAndEnabledByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + sc.setParameters("allocationState", Grouping.AllocationState.Enabled); + sc.setParameters("managedState", Managed.ManagedState.Managed); + + return getCount(sc); + } + @Override public List listClustersByDcId(long zoneId) { SearchCriteria sc = ZoneClusterSearch.create(); @@ -289,7 +302,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C } @Override - public List listAllClusters(Long zoneId) { + public List listAllClusterIds(Long zoneId) { SearchCriteria sc = ClusterIdSearch.create(); if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 48b9c83c64c..ba01e31f80a 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -294,8 +294,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase result = listBy(sc); - return result.size(); + return getCount(sc); } public DataCenterIpAddressDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index 1c29e6a944c..ff668249779 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -81,7 +81,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase sc = DcSearchAllocated.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index a2df6db44e5..003bf4a34a6 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -27,6 +27,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.info.RunningHostCountInfo; import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -39,8 +40,14 @@ public interface HostDao extends GenericDao, StateDao status); + Integer countAllByTypeInZone(long zoneId, final Host.Type type); + Integer countUpAndEnabledHostsInZone(long zoneId); + + Pair countAllHostsAndCPUSocketsByType(Type type); + /** * Mark all hosts associated with a certain management server * as disconnected. @@ -75,32 +82,41 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); + HostVO findOldestExistentHypervisorHostInCluster(long clusterId); List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag); List findByDataCenterId(Long zoneId); + List listIdsByDataCenterId(Long zoneId); + List findByPodId(Long podId); + List listIdsByPodId(Long podId); + List findByClusterId(Long clusterId); + List listIdsByClusterId(Long clusterId); + + List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); + + List listIdsByType(Type type); + + List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType); + List findByClusterIdAndEncryptionSupport(Long clusterId); /** - * Returns hosts that are 'Up' and 'Enabled' from the given Data Center/Zone + * Returns host Ids that are 'Up' and 'Enabled' from the given Data Center/Zone */ - List listByDataCenterId(long id); + List listEnabledIdsByDataCenterId(long id); /** - * Returns hosts that are from the given Data Center/Zone and at a given state (e.g. Creating, Enabled, Disabled, etc). + * Returns host Ids that are 'Up' and 'Disabled' from the given Data Center/Zone */ - List listByDataCenterIdAndState(long id, ResourceState state); - - /** - * Returns hosts that are 'Up' and 'Disabled' from the given Data Center/Zone - */ - List listDisabledByDataCenterId(long id); + List listDisabledIdsByDataCenterId(long id); List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType); @@ -110,8 +126,6 @@ public interface HostDao extends GenericDao, StateDao listAllHostsThatHaveNoRuleTag(Host.Type type, Long clusterId, Long podId, Long dcId); - List listAllHostsByType(Host.Type type); - HostVO findByPublicIp(String publicIp); List listClustersByHostTag(String hostTagOnOffering); @@ -171,4 +185,14 @@ public interface HostDao extends GenericDao, StateDao findClustersThatMatchHostTagRule(String computeOfferingTags); List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); + + boolean isHostUp(long hostId); + + List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes); + + List listDistinctHypervisorTypes(final Long zoneId); + + List listByIds(final List ids); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 63950294654..e5584239a32 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -45,8 +46,8 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.gpu.dao.HostGpuGroupsDao; import com.cloud.gpu.dao.VGPUTypesDao; -import com.cloud.host.Host; import com.cloud.host.DetailVO; +import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; @@ -59,6 +60,7 @@ import com.cloud.org.Grouping; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -74,8 +76,6 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; -import java.util.Arrays; - @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { @@ -98,6 +98,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder TypePodDcStatusSearch; + protected SearchBuilder IdsSearch; protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; @@ -124,6 +125,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder UnmanagedApplianceSearch; protected SearchBuilder MaintenanceCountSearch; protected SearchBuilder HostTypeCountSearch; + protected SearchBuilder HostTypeClusterCountSearch; protected SearchBuilder ResponsibleMsCountSearch; protected SearchBuilder HostTypeZoneCountSearch; protected SearchBuilder ClusterStatusSearch; @@ -136,8 +138,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ManagedRoutingServersSearch; protected SearchBuilder SecondaryStorageVMSearch; - protected GenericSearchBuilder HostIdSearch; - protected GenericSearchBuilder HostsInStatusSearch; + protected GenericSearchBuilder HostsInStatusesSearch; protected GenericSearchBuilder CountRoutingByDc; protected SearchBuilder HostTransferSearch; protected SearchBuilder ClusterManagedSearch; @@ -187,12 +188,21 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostTypeCountSearch = createSearchBuilder(); HostTypeCountSearch.and("type", HostTypeCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("zoneId", HostTypeCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("resourceState", HostTypeCountSearch.entity().getResourceState(), SearchCriteria.Op.EQ); HostTypeCountSearch.done(); ResponsibleMsCountSearch = createSearchBuilder(); ResponsibleMsCountSearch.and("managementServerId", ResponsibleMsCountSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); ResponsibleMsCountSearch.done(); + HostTypeClusterCountSearch = createSearchBuilder(); + HostTypeClusterCountSearch.and("cluster", HostTypeClusterCountSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("type", HostTypeClusterCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("status", HostTypeClusterCountSearch.entity().getStatus(), SearchCriteria.Op.IN); + HostTypeClusterCountSearch.and("removed", HostTypeClusterCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); + HostTypeClusterCountSearch.done(); + HostTypeZoneCountSearch = createSearchBuilder(); HostTypeZoneCountSearch.and("type", HostTypeZoneCountSearch.entity().getType(), SearchCriteria.Op.EQ); HostTypeZoneCountSearch.and("dc", HostTypeZoneCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); @@ -240,6 +250,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); + IdsSearch = createSearchBuilder(); + IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); + IdsSearch.done(); + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -386,14 +400,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao AvailHypevisorInZone.groupBy(AvailHypevisorInZone.entity().getHypervisorType()); AvailHypevisorInZone.done(); - HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); - HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); - HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); - HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); - HostsInStatusSearch.and("type", HostsInStatusSearch.entity().getType(), Op.EQ); - HostsInStatusSearch.and("statuses", HostsInStatusSearch.entity().getStatus(), Op.IN); - HostsInStatusSearch.done(); + HostsInStatusesSearch = createSearchBuilder(Long.class); + HostsInStatusesSearch.selectFields(HostsInStatusesSearch.entity().getId()); + HostsInStatusesSearch.and("dc", HostsInStatusesSearch.entity().getDataCenterId(), Op.EQ); + HostsInStatusesSearch.and("pod", HostsInStatusesSearch.entity().getPodId(), Op.EQ); + HostsInStatusesSearch.and("cluster", HostsInStatusesSearch.entity().getClusterId(), Op.EQ); + HostsInStatusesSearch.and("type", HostsInStatusesSearch.entity().getType(), Op.EQ); + HostsInStatusesSearch.and("statuses", HostsInStatusesSearch.entity().getStatus(), Op.IN); + HostsInStatusesSearch.done(); CountRoutingByDc = createSearchBuilder(Long.class); CountRoutingByDc.select(null, Func.COUNT, null); @@ -456,11 +470,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostsInClusterSearch.and("server", HostsInClusterSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); HostsInClusterSearch.done(); - HostIdSearch = createSearchBuilder(Long.class); - HostIdSearch.selectFields(HostIdSearch.entity().getId()); - HostIdSearch.and("dataCenterId", HostIdSearch.entity().getDataCenterId(), Op.EQ); - HostIdSearch.done(); - searchBuilderFindByRuleTag = _hostTagsDao.createSearchBuilder(); searchBuilderFindByRuleTag.and("is_tag_a_rule", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.EQ); searchBuilderFindByRuleTag.or("tagDoesNotExist", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.NULL); @@ -492,8 +501,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sc.setParameters("resourceState", (Object[])states); sc.setParameters("cluster", clusterId); - List hosts = listBy(sc); - return hosts.size(); + return getCount(sc); } @Override @@ -504,36 +512,62 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } @Override - public Integer countAllByTypeInZone(long zoneId, Type type) { - SearchCriteria sc = HostTypeCountSearch.create(); - sc.setParameters("type", type); - sc.setParameters("dc", zoneId); + public Integer countAllInClusterByTypeAndStates(Long clusterId, final Host.Type type, List status) { + SearchCriteria sc = HostTypeClusterCountSearch.create(); + if (clusterId != null) { + sc.setParameters("cluster", clusterId); + } + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status.toArray()); + } return getCount(sc); } @Override - public List listByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Enabled); + public Integer countAllByTypeInZone(long zoneId, Type type) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", type); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listByDataCenterIdAndState(long id, ResourceState state) { - SearchCriteria sc = scHostsFromZoneUpRouting(id); - sc.setParameters("resourceState", state); - return listBy(sc); + public Integer countUpAndEnabledHostsInZone(long zoneId) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", Type.Routing); + sc.setParameters("resourceState", ResourceState.Enabled); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listDisabledByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Disabled); + public Pair countAllHostsAndCPUSocketsByType(Type type) { + GenericSearchBuilder sb = createSearchBuilder(SumCount.class); + sb.select("sum", Func.SUM, sb.entity().getCpuSockets()); + sb.select("count", Func.COUNT, null); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("type", type); + SumCount result = customSearch(sc, null).get(0); + return new Pair<>((int)result.count, (int)result.sum); } - private SearchCriteria scHostsFromZoneUpRouting(long id) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dc", id); - sc.setParameters("status", Status.Up); - sc.setParameters("type", Host.Type.Routing); - return sc; + private List listIdsForRoutingByZoneIdAndResourceState(long zoneId, ResourceState state) { + return listIdsBy(Type.Routing, Status.Up, state, null, zoneId, null, null); + } + + @Override + public List listEnabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Enabled); + } + + @Override + public List listDisabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Disabled); } @Override @@ -1178,6 +1212,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByDataCenterId(Long zoneId) { + return listIdsBy(Type.Routing, null, null, null, zoneId, null, null); + } + @Override public List findByPodId(Long podId) { SearchCriteria sc = PodSearch.create(); @@ -1185,6 +1224,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listIdsByPodId(Long podId) { + return listIdsBy(null, null, null, null, null, podId, null); + } + @Override public List findByClusterId(Long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -1192,6 +1236,63 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + protected List listIdsBy(Host.Type type, Status status, ResourceState resourceState, + HypervisorType hypervisorType, Long zoneId, Long podId, Long clusterId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status); + } + if (resourceState != null) { + sc.setParameters("resourceState", resourceState); + } + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (podId != null) { + sc.setParameters("podId", podId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + return customSearch(sc, null); + } + + @Override + public List listIdsByClusterId(Long clusterId) { + return listIdsBy(null, null, null, null, null, null, clusterId); + } + + @Override + public List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId) { + return listIdsBy(Type.Routing, Status.Up, null, null, zoneId, podId, clusterId); + } + + @Override + public List listIdsByType(Type type) { + return listIdsBy(type, null, null, null, null, null, null); + } + + @Override + public List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType) { + return listIdsBy(null, Status.Up, ResourceState.Enabled, hypervisorType, zoneId, null, null); + } + @Override public List findByClusterIdAndEncryptionSupport(Long clusterId) { SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); @@ -1244,6 +1345,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + List list = listBy(sc, new Filter(1)); + return list.isEmpty() ? null : list.get(0); + } + @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { SearchCriteria sc = TypeClusterStatusSearch.create(); @@ -1263,9 +1373,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List listAllHosts(long zoneId) { - SearchCriteria sc = HostIdSearch.create(); - sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); - return customSearch(sc, null); + return listIdsBy(null, null, null, null, zoneId, null, null); } @Override @@ -1449,15 +1557,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return result; } - @Override - public List listAllHostsByType(Host.Type type) { - SearchCriteria sc = TypeSearch.create(); - sc.setParameters("type", type); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } - @Override public List listByType(Host.Type type) { SearchCriteria sc = TypeSearch.create(); @@ -1602,4 +1701,71 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } return String.format(sqlFindHostInZoneToExecuteCommand, hostResourceStatus); } + + @Override + public boolean isHostUp(long hostId) { + GenericSearchBuilder sb = createSearchBuilder(Status.class); + sb.and("id", sb.entity().getId(), Op.EQ); + sb.selectFields(sb.entity().getStatus()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", hostId); + List statuses = customSearch(sc, null); + return CollectionUtils.isNotEmpty(statuses) && Status.Up.equals(statuses.get(0)); + } + + @Override + public List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.IN); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.IN); + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sb.and().op(sb.entity().getHypervisorType(), SearchCriteria.Op.NULL); + sb.or("hypervisorTypes", sb.entity().getHypervisorType(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sc.setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + sc.setParameters("resourceState", resourceStates.toArray()); + sc.setParameters("type", types.toArray()); + return customSearch(sc, null); + } + + @Override + public List listDistinctHypervisorTypes(final Long zoneId) { + GenericSearchBuilder sb = createSearchBuilder(HypervisorType.class); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.select(null, Func.DISTINCT, sb.entity().getHypervisorType()); + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + sc.setParameters("type", Type.Routing); + return customSearch(sc, null); + } + + @Override + public List listByIds(List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsSearch.create(); + sc.setParameters("id", ids.toArray()); + return search(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index aa143838c34..5499d04e3a1 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -421,7 +421,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen public long countFreeIpsInVlan(long vlanDbId) { SearchCriteria sc = VlanDbIdSearchUnallocated.create(); sc.setParameters("vlanDbId", vlanDbId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index fa448b026e4..0aae532eac5 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -415,8 +415,7 @@ public class NetworkDaoImpl extends GenericDaoBaseimplements Ne sc.setParameters("broadcastUri", broadcastURI); sc.setParameters("guestType", guestTypes); sc.setJoinParameters("persistent", "persistent", isPersistent); - List persistentNetworks = search(sc, null); - return persistentNetworks.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index a37acdf6029..8229c3a62fc 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -55,8 +55,7 @@ public class CommandExecLogDaoImpl extends GenericDaoBase sc = CommandSearch.create(); sc.setParameters("host_id", id); sc.setParameters("command_name", "CopyCommand"); - List copyCmds = customSearch(sc, null); - return copyCmds.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index 48e63d8e2b5..ceb5b0a4fc1 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - List listByHostTag(String tag); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); + + List listIdsByHostTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 706dcdc1b7b..803522fa6aa 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -34,6 +34,7 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; @@ -293,8 +294,9 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase listByHostTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByHostTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tagNotNull", sb.entity().getHostTag(), SearchCriteria.Op.NNULL); sb.and().op("tagEq", sb.entity().getHostTag(), SearchCriteria.Op.EQ); sb.or("tagStartLike", sb.entity().getHostTag(), SearchCriteria.Op.LIKE); @@ -302,11 +304,12 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = sb.create(); + SearchCriteria sc = sb.create(); + sc.setParameters("tagEq", tag); sc.setParameters("tagStartLike", tag + ",%"); sc.setParameters("tagMidLike", "%," + tag + ",%"); sc.setParameters("tagEndLike", "%," + tag); - return listBy(sc); + return customSearch(sc, null); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index 62ef5b7570d..639c2571541 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -34,7 +34,7 @@ public interface StoragePoolHostDao extends GenericDao List findHostsConnectedToPools(List poolIds); - List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); + boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public void deletePrimaryRecordsForHost(long hostId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 987a42f410e..5a466af348c 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -55,11 +55,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { - ArrayList> l = new ArrayList>(); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("SQLException: ", e); } - return l; + return false; } /** diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 1c5a2cb4256..3ac514530ce 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -67,6 +67,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< public List userIsoSearch(boolean listRemoved); + List listAllReadySystemVMTemplates(Long zoneId); + VMTemplateVO findSystemVMTemplate(long zoneId); VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType); @@ -91,6 +93,5 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listByIds(List ids); - List listByTemplateTag(String tag); - + List listIdsByTemplateTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 4665f660251..7513848536b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -344,19 +344,12 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem readySystemTemplateSearch = createSearchBuilder(); readySystemTemplateSearch.and("state", readySystemTemplateSearch.entity().getState(), SearchCriteria.Op.EQ); readySystemTemplateSearch.and("templateType", readySystemTemplateSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + readySystemTemplateSearch.and("hypervisorType", readySystemTemplateSearch.entity().getHypervisorType(), SearchCriteria.Op.IN); SearchBuilder templateDownloadSearch = _templateDataStoreDao.createSearchBuilder(); templateDownloadSearch.and("downloadState", templateDownloadSearch.entity().getDownloadState(), SearchCriteria.Op.IN); readySystemTemplateSearch.join("vmTemplateJoinTemplateStoreRef", templateDownloadSearch, templateDownloadSearch.entity().getTemplateId(), readySystemTemplateSearch.entity().getId(), JoinBuilder.JoinType.INNER); - SearchBuilder hostHyperSearch2 = _hostDao.createSearchBuilder(); - hostHyperSearch2.and("type", hostHyperSearch2.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("zoneId", hostHyperSearch2.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("removed", hostHyperSearch2.entity().getRemoved(), SearchCriteria.Op.NULL); - hostHyperSearch2.groupBy(hostHyperSearch2.entity().getHypervisorType()); - - readySystemTemplateSearch.join("tmplHyper", hostHyperSearch2, hostHyperSearch2.entity().getHypervisorType(), readySystemTemplateSearch.entity() - .getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch2.done(); + readySystemTemplateSearch.groupBy(readySystemTemplateSearch.entity().getId()); readySystemTemplateSearch.done(); tmpltTypeHyperSearch2 = createSearchBuilder(); @@ -556,29 +549,35 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + public List listAllReadySystemVMTemplates(Long zoneId) { + List availableHypervisors = _hostDao.listDistinctHypervisorTypes(zoneId); + if (CollectionUtils.isEmpty(availableHypervisors)) { + return Collections.emptyList(); + } SearchCriteria sc = readySystemTemplateSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); sc.setParameters("state", VirtualMachineTemplate.State.Active); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", new VMTemplateStorageResourceAssoc.Status[] {VMTemplateStorageResourceAssoc.Status.DOWNLOADED, VMTemplateStorageResourceAssoc.Status.BYPASSED}); - + sc.setParameters("hypervisorType", availableHypervisors.toArray()); + sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", + List.of(VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + VMTemplateStorageResourceAssoc.Status.BYPASSED).toArray()); // order by descending order of id - List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); - - if (tmplts.size() > 0) { - if (hypervisorType == HypervisorType.Any) { - return tmplts.get(0); - } - for (VMTemplateVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hypervisorType) { - return tmplt; - } - } + return listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); + } + @Override + public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + List templates = listAllReadySystemVMTemplates(zoneId); + if (CollectionUtils.isEmpty(templates)) { + return null; } - return null; + if (hypervisorType == HypervisorType.Any) { + return templates.get(0); + } + return templates.stream() + .filter(t -> t.getHypervisorType() == hypervisorType) + .findFirst() + .orElse(null); } @Override @@ -687,13 +686,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public List listByTemplateTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByTemplateTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tag", sb.entity().getTemplateTag(), SearchCriteria.Op.EQ); sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); sc.setParameters("tag", tag); - return listIncludingRemovedBy(sc); + return customSearchIncludingRemoved(sc, null); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 0c4d707635a..750dbf2bee0 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -571,14 +571,6 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } - public static class SumCount { - public long sum; - public long count; - - public SumCount() { - } - } - @Override public List listVolumesToBeDestroyed() { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index b197fb7c030..12049b6f240 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -870,7 +870,7 @@ public class SystemVmTemplateRegistration { public void doInTransactionWithoutResult(final TransactionStatus status) { Set hypervisorsListInUse = new HashSet(); try { - hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + hypervisorsListInUse = clusterDao.getDistinctAvailableHypervisorsAcrossClusters(); } catch (final Exception e) { LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index 1c2c4b3c7ce..0b973d195de 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -114,6 +114,17 @@ public class DatabaseAccessObject { } } + public void renameIndex(Connection conn, String tableName, String oldName, String newName) { + String stmt = String.format("ALTER TABLE %s RENAME INDEX %s TO %s", tableName, oldName, newName); + logger.debug("Statement: {}", stmt); + try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { + pstmt.execute(); + logger.debug("Renamed index {} to {}", oldName, newName); + } catch (SQLException e) { + logger.warn("Unable to rename index {} to {}", oldName, newName, e); + } + } + protected void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { try { if (pstmt != null) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java index 51e6ac7b9a1..2f90422adf8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java @@ -31,6 +31,12 @@ public class DbUpgradeUtils { } } + public static void renameIndexIfNeeded(Connection conn, String tableName, String oldName, String newName) { + if (!dao.indexExists(conn, tableName, oldName)) { + dao.renameIndex(conn, tableName, oldName, newName); + } + } + public static void addForeignKey(Connection conn, String tableName, String tableColumn, String foreignTableName, String foreignColumnName) { dao.addForeignKey(conn, tableName, tableColumn, foreignTableName, foreignColumnName); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java index 197ca1cb34c..6298e0e729a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42000to42010.java @@ -53,6 +53,7 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr @Override public void performDataMigration(Connection conn) { + addIndexes(conn); } @Override @@ -80,4 +81,42 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } + + private void addIndexes(Connection conn) { + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "mgmt_server_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource_state"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "type"); + + DbUpgradeUtils.renameIndexIfNeeded(conn, "user_ip_address", "public_ip_address", "uk_public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "vlan_db_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "vlan_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "op_host_planner_reservation", "resource_usage"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "pool_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "data_center_id", "status", "scope", "hypervisor"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "router_network_ref", "guest_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "domain_router", "role"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "async_job", "instance_type", "job_status"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "cluster", "managed_state"); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java index cb19748fda4..af32163b4c7 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java @@ -45,7 +45,7 @@ public interface ConsoleProxyDao extends GenericDao { public List getDatacenterSessionLoadMatrix(); - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public List> getProxyLoadMatrix(); diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index ef94a4d9f72..bc79194a10f 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; - import org.springframework.stereotype.Component; import com.cloud.info.ConsoleProxyLoadInfo; @@ -76,11 +75,11 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im private static final String GET_PROXY_ACTIVE_LOAD = "SELECT active_session AS count" + " FROM console_proxy" + " WHERE id=?"; - private static final String STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph WHERE " + + "ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ?"; - private static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.pool_type <> 'LVM' AND p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph " + + "WHERE ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ? AND p.pool_type NOT IN ('LVM', 'Filesystem')"; protected SearchBuilder DataCenterStatusSearch; protected SearchBuilder StateSearch; @@ -219,28 +218,23 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im } @Override - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes) { - ArrayList> l = new ArrayList>(); - + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; + String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; - try { - if (countAllPoolTypes) { - pstmt = txn.prepareAutoCloseStatement(STORAGE_POOL_HOST_INFO); - } else { - pstmt = txn.prepareAutoCloseStatement(SHARED_STORAGE_POOL_HOST_INFO); - } + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("Caught SQLException: ", e); } - return l; + return false; } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java index 887b3d73087..44866c0a358 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java @@ -170,8 +170,7 @@ public class NicIpAliasDaoImpl extends GenericDaoBase implem public Integer countAliasIps(long id) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instanceId", id); - List list = listBy(sc); - return list.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 52bc5aac7e2..823642d8c3d 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm.dao; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -81,7 +82,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByHostAndState(long hostId, State... states); - List listByTypes(VirtualMachine.Type... types); + int countByTypes(VirtualMachine.Type... types); VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); @@ -144,21 +145,28 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states); + List findByHostInStates(Long hostId, State... states); List listStartingWithNoHostId(); boolean updatePowerState(long instanceId, long powerHostId, VirtualMachine.PowerState powerState, Date wisdomEra); + Map updatePowerState(Map instancePowerStates, + long powerHostId, Date wisdomEra); + void resetVmPowerStateTracking(long instanceId); + void resetVmPowerStateTracking(List instanceId); + void resetHostPowerStateTracking(long hostId); HashMap countVgpuVMs(Long dcId, Long podId, Long clusterId); VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId); - boolean isPowerStateUpToDate(long instanceId); + boolean isPowerStateUpToDate(VMInstanceVO instance); List listNonMigratingVmsByHostEqualsLastHost(long hostId); @@ -170,4 +178,13 @@ public interface VMInstanceDao extends GenericDao, StateDao< List skippedVmIds); Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId); + + List listIdServiceOfferingForUpVmsByHostId(Long hostId); + + List listIdServiceOfferingForVmsMigratingFromHost(Long hostId); + + Map getNameIdMapForVmInstanceNames(Collection names); + + Map getNameIdMapForVmIds(Collection ids); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 0e87e6bcb7d..ef10af63bae 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -75,6 +76,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder LHVMClusterSearch; protected SearchBuilder IdStatesSearch; protected SearchBuilder AllFieldsSearch; + protected SearchBuilder IdServiceOfferingIdSelectSearch; protected SearchBuilder ZoneTemplateNonExpungedSearch; protected SearchBuilder TemplateNonExpungedSearch; protected SearchBuilder NameLikeSearch; @@ -101,6 +103,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder BackupSearch; protected SearchBuilder LastHostAndStatesSearch; protected SearchBuilder VmsNotInClusterUsingPool; + protected SearchBuilder IdsPowerStateSelectSearch; @Inject ResourceTagDao tagsDao; @@ -175,6 +178,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), Op.EQ); AllFieldsSearch.done(); + IdServiceOfferingIdSelectSearch = createSearchBuilder(); + IdServiceOfferingIdSelectSearch.and("host", IdServiceOfferingIdSelectSearch.entity().getHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("lastHost", IdServiceOfferingIdSelectSearch.entity().getLastHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("state", IdServiceOfferingIdSelectSearch.entity().getState(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("states", IdServiceOfferingIdSelectSearch.entity().getState(), Op.IN); + IdServiceOfferingIdSelectSearch.selectFields(IdServiceOfferingIdSelectSearch.entity().getId(), IdServiceOfferingIdSelectSearch.entity().getServiceOfferingId()); + IdServiceOfferingIdSelectSearch.done(); + ZoneTemplateNonExpungedSearch = createSearchBuilder(); ZoneTemplateNonExpungedSearch.and("zone", ZoneTemplateNonExpungedSearch.entity().getDataCenterId(), Op.EQ); ZoneTemplateNonExpungedSearch.and("template", ZoneTemplateNonExpungedSearch.entity().getTemplateId(), Op.EQ); @@ -274,6 +285,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem HostAndStateSearch = createSearchBuilder(); HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.and("idsNotIn", HostAndStateSearch.entity().getId(), Op.NIN); HostAndStateSearch.done(); StartingWithNoHostSearch = createSearchBuilder(); @@ -323,6 +335,15 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER); VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN); VmsNotInClusterUsingPool.done(); + + IdsPowerStateSelectSearch = createSearchBuilder(); + IdsPowerStateSelectSearch.and("id", IdsPowerStateSelectSearch.entity().getId(), Op.IN); + IdsPowerStateSelectSearch.selectFields(IdsPowerStateSelectSearch.entity().getId(), + IdsPowerStateSelectSearch.entity().getPowerHostId(), + IdsPowerStateSelectSearch.entity().getPowerState(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateCount(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateTime()); + IdsPowerStateSelectSearch.done(); } @Override @@ -458,10 +479,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public List listByTypes(Type... types) { + public int countByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[])types); - return listBy(sc); + return getCount(sc); } @Override @@ -897,6 +918,17 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result; } + @Override + public List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + if (excludingIds != null && !excludingIds.isEmpty()) { + sc.setParameters("idsNotIn", excludingIds.toArray()); + } + sc.setParameters("states", (Object[])states); + return listBy(sc); + } + @Override public List findByHostInStates(Long hostId, State... states) { SearchCriteria sc = HostAndStateSearch.create(); @@ -912,42 +944,109 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - @Override - public boolean updatePowerState(final long instanceId, final long powerHostId, final VirtualMachine.PowerState powerState, Date wisdomEra) { - return Transaction.execute(new TransactionCallback<>() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - boolean needToUpdate = false; - VMInstanceVO instance = findById(instanceId); - if (instance != null - && (null == instance.getPowerStateUpdateTime() - || instance.getPowerStateUpdateTime().before(wisdomEra))) { - Long savedPowerHostId = instance.getPowerHostId(); - if (instance.getPowerState() != powerState - || savedPowerHostId == null - || savedPowerHostId != powerHostId - || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance)) { - instance.setPowerState(powerState); - instance.setPowerHostId(powerHostId); - instance.setPowerStateUpdateCount(1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } else { - // to reduce DB updates, consecutive same state update for more than 3 times - if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { - instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } - } - } - return needToUpdate; + protected List listSelectPowerStateByIds(final List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", ids.toArray()); + return customSearch(sc, null); + } + + protected Integer getPowerUpdateCount(final VMInstanceVO instance, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + if (isStateMismatch) { + return 1; + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + return instance.getPowerStateUpdateCount() + 1; } + } + return null; + } + + @Override + public boolean updatePowerState(final long instanceId, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + return Transaction.execute((TransactionCallback) status -> { + VMInstanceVO instance = findById(instanceId); + if (instance == null) { + return false; + } + // Check if we need to update based on powerStateUpdateTime + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + + if (isStateMismatch) { + instance.setPowerState(powerState); + instance.setPowerHostId(powerHostId); + instance.setPowerStateUpdateCount(1); + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); + } else { + // No need to update if power state is already in sync and count exceeded + return false; + } + instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + update(instanceId, instance); + return true; // Return true since an update occurred + } + return false; }); } + @Override + public Map updatePowerState( + final Map instancePowerStates, long powerHostId, Date wisdomEra) { + Map notUpdated = new HashMap<>(); + List instances = listSelectPowerStateByIds(new ArrayList<>(instancePowerStates.keySet())); + Map updateCounts = new HashMap<>(); + for (VMInstanceVO instance : instances) { + VirtualMachine.PowerState powerState = instancePowerStates.get(instance.getId()); + Integer count = getPowerUpdateCount(instance, powerHostId, powerState, wisdomEra); + if (count != null) { + updateCounts.put(instance.getId(), count); + } else { + notUpdated.put(instance.getId(), powerState); + } + } + if (updateCounts.isEmpty()) { + return notUpdated; + } + StringBuilder sql = new StringBuilder("UPDATE `cloud`.`vm_instance` SET " + + "`power_host` = ?, `power_state_update_time` = now(), `power_state` = CASE "); + updateCounts.keySet().forEach(key -> { + sql.append("WHEN id = ").append(key).append(" THEN '").append(instancePowerStates.get(key)).append("' "); + }); + sql.append("END, `power_state_update_count` = CASE "); + StringBuilder idList = new StringBuilder(); + updateCounts.forEach((key, value) -> { + sql.append("WHEN `id` = ").append(key).append(" THEN ").append(value).append(" "); + idList.append(key).append(","); + }); + idList.setLength(idList.length() - 1); + sql.append("END WHERE `id` IN (").append(idList).append(")"); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) { + pstmt.setLong(1, powerHostId); + pstmt.executeUpdate(); + } catch (SQLException e) { + logger.error("Unable to execute update power states SQL from VMs {} due to: {}", + idList, e.getMessage(), e); + return instancePowerStates; + } + return notUpdated; + } + private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerState powerState, final long powerHostId, final VMInstanceVO instance) { State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) @@ -962,11 +1061,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public boolean isPowerStateUpToDate(final long instanceId) { - VMInstanceVO instance = findById(instanceId); - if(instance == null) { - throw new CloudRuntimeException("checking power state update count on non existing instance " + instanceId); - } + public boolean isPowerStateUpToDate(final VMInstanceVO instance) { return instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT; } @@ -985,6 +1080,25 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem }); } + @Override + public void resetVmPowerStateTracking(List instanceIds) { + if (CollectionUtils.isEmpty(instanceIds)) { + return; + } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", instanceIds.toArray()); + VMInstanceVO vm = createForUpdate(); + vm.setPowerStateUpdateCount(0); + vm.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + UpdateBuilder ub = getUpdateBuilder(vm); + update(ub, sc, null); + } + }); + } + @Override @DB public void resetHostPowerStateTracking(final long hostId) { Transaction.execute(new TransactionCallbackNoReturn() { @@ -1060,6 +1174,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return searchIncludingRemoved(sc, filter, null, false); } + @Override public Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) { SearchCriteria sc = VmsNotInClusterUsingPool.create(); sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); @@ -1069,4 +1184,44 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem List uniqueVms = vms.stream().distinct().collect(Collectors.toList()); return new Pair<>(uniqueVms, uniqueVms.size()); } + + @Override + public List listIdServiceOfferingForUpVmsByHostId(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", new Object[] {State.Starting, State.Running, State.Stopping, State.Migrating}); + return customSearch(sc, null); + } + + @Override + public List listIdServiceOfferingForVmsMigratingFromHost(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("lastHost", hostId); + sc.setParameters("state", State.Migrating); + return customSearch(sc, null); + } + + @Override + public Map getNameIdMapForVmInstanceNames(Collection names) { + SearchBuilder sb = createSearchBuilder(); + sb.and("name", sb.entity().getInstanceName(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("name", names.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } + + @Override + public Map getNameIdMapForVmIds(Collection ids) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", ids.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java index 8f3d264da98..6d0d9378c7c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java @@ -88,6 +88,8 @@ public interface ResourceDetailsDao extends GenericDao public Map listDetailsKeyPairs(long resourceId); + Map listDetailsKeyPairs(long resourceId, List keys); + public Map listDetailsKeyPairs(long resourceId, boolean forDisplay); Map listDetailsVisibility(long resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 4205a7823e4..f2e156f225a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.resourcedetail; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.api.ResourceDetail; import org.apache.commons.collections.CollectionUtils; @@ -91,6 +92,20 @@ public abstract class ResourceDetailsDaoBase extends G return details; } + @Override + public Map listDetailsKeyPairs(long resourceId, List keys) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("name", keys.toArray()); + + List results = search(sc, null); + return results.stream().collect(Collectors.toMap(R::getName, R::getValue)); + } + public Map listDetailsVisibility(long resourceId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 1658fe0a537..07b0b8b517c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,20 +28,20 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql index b6abaabcd48..640b2397a46 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql @@ -76,13 +76,9 @@ SELECT FROM `cloud`.`network_offerings` LEFT JOIN - `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' + `cloud`.`domain` AS `domain` ON `domain`.id IN (SELECT value from `network_offering_details` where `name` = 'domainid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + `cloud`.`data_center` AS `zone` ON `zone`.`id` IN (SELECT value from `network_offering_details` where `name` = 'zoneid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN `cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol' GROUP BY diff --git a/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java new file mode 100644 index 00000000000..76c1092546a --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.capacity.CapacityVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityDaoImplTest { + @Spy + @InjectMocks + CapacityDaoImpl capacityDao = new CapacityDaoImpl(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + CapacityVO capacityVO = mock(CapacityVO.class); + when(searchBuilder.entity()).thenReturn(capacityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(capacityDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListByHostIdTypes() { + // Prepare inputs + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + CapacityVO capacity1 = new CapacityVO(); + CapacityVO capacity2 = new CapacityVO(); + List mockResult = Arrays.asList(capacity1, capacity2); + doReturn(mockResult).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertEquals(2, result.size()); + assertSame(capacity1, result.get(0)); + assertSame(capacity2, result.get(1)); + } + + @Test + public void testListByHostIdTypesEmptyResult() { + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + doReturn(Collections.emptyList()).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(Mockito.eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java new file mode 100644 index 00000000000..a513809be05 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterVO; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterDaoImplTest { + @Spy + @InjectMocks + ClusterDaoImpl clusterDao = new ClusterDaoImpl(); + + private GenericSearchBuilder genericSearchBuilder; + + @Before + public void setUp() { + genericSearchBuilder = mock(SearchBuilder.class); + ClusterVO entityVO = mock(ClusterVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(clusterDao).createSearchBuilder(Long.class); + } + + @Test + public void testListAllIds() { + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + doReturn(Collections.emptyList()).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java new file mode 100644 index 00000000000..81163321c6b --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/host/dao/HostDaoImplTest.java @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.host.dao; + +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class HostDaoImplTest { + + @Spy + HostDaoImpl hostDao = new HostDaoImpl(); + + @Mock + private SearchBuilder mockSearchBuilder; + @Mock + private SearchCriteria mockSearchCriteria; + + @Test + public void testCountUpAndEnabledHostsInZone() { + long testZoneId = 100L; + hostDao.HostTypeCountSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doNothing().when(mockSearchCriteria).setParameters(Mockito.anyString(), Mockito.any()); + int expected = 5; + Mockito.doReturn(expected).when(hostDao).getCount(mockSearchCriteria); + Integer count = hostDao.countUpAndEnabledHostsInZone(testZoneId); + Assert.assertSame(expected, count); + Mockito.verify(mockSearchCriteria).setParameters("type", Host.Type.Routing); + Mockito.verify(mockSearchCriteria).setParameters("resourceState", ResourceState.Enabled); + Mockito.verify(mockSearchCriteria).setParameters("zoneId", testZoneId); + Mockito.verify(hostDao).getCount(mockSearchCriteria); + } + + @Test + public void testCountAllHostsAndCPUSocketsByType() { + Host.Type type = Host.Type.Routing; + GenericDaoBase.SumCount mockSumCount = new GenericDaoBase.SumCount(); + mockSumCount.count = 10; + mockSumCount.sum = 20; + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(GenericDaoBase.SumCount.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(List.of(mockSumCount)).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + Pair result = hostDao.countAllHostsAndCPUSocketsByType(type); + Assert.assertEquals(10, result.first().intValue()); + Assert.assertEquals(20, result.second().intValue()); + Mockito.verify(sc).setParameters("type", type); + } + + @Test + public void testIsHostUp() { + long testHostId = 101L; + List statuses = List.of(Status.Up); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Status.class); + Mockito.doReturn(statuses).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + boolean result = hostDao.isHostUp(testHostId); + Assert.assertTrue("Host should be up", result); + Mockito.verify(sc).setParameters("id", testHostId); + Mockito.verify(hostDao).customSearch(sc, null); + } + + @Test + public void testFindHostIdsByZoneClusterResourceStateTypeAndHypervisorType() { + Long zoneId = 1L; + Long clusterId = 2L; + List resourceStates = List.of(ResourceState.Enabled); + List types = List.of(Host.Type.Routing); + List hypervisorTypes = List.of(Hypervisor.HypervisorType.KVM); + List mockResults = List.of(1001L, 1002L); // Mocked result + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.when(sb.and()).thenReturn(sb); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.findHostIdsByZoneClusterResourceStateTypeAndHypervisorType( + zoneId, clusterId, resourceStates, types, hypervisorTypes); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + Mockito.verify(sc).setParameters("resourceState", resourceStates.toArray()); + Mockito.verify(sc).setParameters("type", types.toArray()); + Mockito.verify(sc).setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + + @Test + public void testListDistinctHypervisorTypes() { + Long zoneId = 1L; + List mockResults = List.of(Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.XenServer); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Hypervisor.HypervisorType.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hypervisorTypes = hostDao.listDistinctHypervisorTypes(zoneId); + Assert.assertEquals(mockResults, hypervisorTypes); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("type", Host.Type.Routing); + } + + @Test + public void testListByIds() { + List ids = List.of(101L, 102L); + List mockResults = List.of(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + hostDao.IdsSearch = mockSearchBuilder; + Mockito.when(mockSearchBuilder.create()).thenReturn(mockSearchCriteria); + Mockito.doReturn(mockResults).when(hostDao).search(Mockito.any(SearchCriteria.class), Mockito.any()); + List hosts = hostDao.listByIds(ids); + Assert.assertEquals(mockResults, hosts); + Mockito.verify(mockSearchCriteria).setParameters("id", ids.toArray()); + Mockito.verify(hostDao).search(mockSearchCriteria, null); + } + + @Test + public void testListIdsBy() { + Host.Type type = Host.Type.Routing; + Status status = Status.Up; + ResourceState resourceState = ResourceState.Enabled; + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + Long zoneId = 1L, podId = 2L, clusterId = 3L; + List mockResults = List.of(1001L, 1002L); + HostVO host = Mockito.mock(HostVO.class); + GenericSearchBuilder sb = Mockito.mock(GenericSearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(host); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(sb).when(hostDao).createSearchBuilder(Long.class); + Mockito.doReturn(mockResults).when(hostDao).customSearch(Mockito.any(SearchCriteria.class), Mockito.any()); + List hostIds = hostDao.listIdsBy(type, status, resourceState, hypervisorType, zoneId, podId, clusterId); + Assert.assertEquals(mockResults, hostIds); + Mockito.verify(sc).setParameters("type", type); + Mockito.verify(sc).setParameters("status", status); + Mockito.verify(sc).setParameters("resourceState", resourceState); + Mockito.verify(sc).setParameters("hypervisorType", hypervisorType); + Mockito.verify(sc).setParameters("zoneId", zoneId); + Mockito.verify(sc).setParameters("podId", podId); + Mockito.verify(sc).setParameters("clusterId", clusterId); + } +} diff --git a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java index 05d9154b6a4..fa47d2cd90b 100644 --- a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java @@ -23,12 +23,9 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.PreparedStatement; -import com.cloud.utils.DateUtil; -import com.cloud.utils.db.TransactionLegacy; import java.util.Date; import java.util.TimeZone; -import com.cloud.usage.UsageStorageVO; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -36,6 +33,10 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.usage.UsageStorageVO; +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.TransactionLegacy; + @RunWith(MockitoJUnitRunner.class) public class UsageStorageDaoImplTest { diff --git a/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java new file mode 100644 index 00000000000..4c54599c396 --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class ResourceDetailsDaoBaseTest { + @Spy + @InjectMocks + TestDetailsDao testDetailsDao = new TestDetailsDao(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + searchCriteria = mock(SearchCriteria.class); + TestDetailVO entityVO = mock(TestDetailVO.class); + when(searchBuilder.entity()).thenReturn(entityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(testDetailsDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListDetailsKeyPairs() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + TestDetailVO result1 = mock(TestDetailVO.class); + when(result1.getName()).thenReturn("key1"); + when(result1.getValue()).thenReturn("value1"); + TestDetailVO result2 = mock(TestDetailVO.class); + when(result2.getName()).thenReturn("key2"); + when(result2.getValue()).thenReturn("value2"); + List mockResults = Arrays.asList(result1, result2); + doReturn(mockResults).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertEquals(2, result.size()); + assertEquals("value1", result.get("key1")); + assertEquals("value2", result.get("key2")); + } + + @Test + public void testListDetailsKeyPairsEmptyResult() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + doReturn(Collections.emptyList()).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertTrue(result.isEmpty()); + } + + protected static class TestDetailsDao extends ResourceDetailsDaoBase { + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new TestDetailVO(resourceId, key, value, display)); + } + } + + @Entity + @Table(name = "test_details") + protected static class TestDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "resource_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value") + private String value; + + @Column(name = "display") + private boolean display = true; + + public TestDetailVO() { + } + + public TestDetailVO(long resourceId, String name, String value, boolean display) { + this.resourceId = resourceId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setName(String name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java index bfcc38ba104..fc41a82e71d 100755 --- a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java +++ b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java @@ -17,12 +17,17 @@ package org.apache.cloudstack.storage.datastore.db; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; import java.sql.SQLException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +39,15 @@ import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; import junit.framework.TestCase; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class PrimaryDataStoreDaoImplTest extends TestCase { @@ -59,6 +66,8 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { @Mock StoragePoolVO storagePoolVO; + private GenericSearchBuilder genericSearchBuilder; + private static final String STORAGE_TAG_1 = "NFS-A"; private static final String STORAGE_TAG_2 = "NFS-B"; private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2}; @@ -155,4 +164,32 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix; verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size()); } + + @Test + public void testListAllIds() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + doReturn(Collections.emptyList()).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } } diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java index c87a0996fcc..062d2226876 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java @@ -42,4 +42,8 @@ public interface IndirectAgentLBAlgorithm { * @return true if the lists are equal, false if not */ boolean compare(final List msList, final List receivedMsList); + + default boolean isHostListNeeded() { + return false; + } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java index ec69f5817ac..92d56da6184 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java @@ -107,8 +107,7 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase l = listBy(sc); - return l.size(); + return getCount(sc); } @Override diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index b47370d9205..911a4ad3707 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -36,6 +35,7 @@ import org.apache.cloudstack.framework.config.ScopedConfigStorage; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.utils.cache.LazyCache; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -44,8 +44,6 @@ import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; /** * ConfigDepotImpl implements the ConfigDepot and ConfigDepotAdmin interface. @@ -87,17 +85,15 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { List _scopedStorages; Set _configured = Collections.synchronizedSet(new HashSet()); Set newConfigs = Collections.synchronizedSet(new HashSet<>()); - Cache configCache; + LazyCache configCache; private HashMap>> _allKeys = new HashMap>>(1007); HashMap>> _scopeLevelConfigsMap = new HashMap>>(); public ConfigDepotImpl() { - configCache = Caffeine.newBuilder() - .maximumSize(512) - .expireAfterWrite(CONFIG_CACHE_EXPIRE_SECONDS, TimeUnit.SECONDS) - .build(); + configCache = new LazyCache<>(512, + CONFIG_CACHE_EXPIRE_SECONDS, this::getConfigStringValueInternal); ConfigKey.init(this); createEmptyScopeLevelMappings(); } @@ -311,7 +307,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { @Override public String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId) { - return configCache.get(getConfigCacheKey(key, scope, scopeId), this::getConfigStringValueInternal); + return configCache.get(getConfigCacheKey(key, scope, scopeId)); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index de8838b0999..44c312ea9d8 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@ -148,6 +148,11 @@ public interface GenericDao { */ List listAll(Filter filter); + /** + * Look IDs for all active rows. + */ + List listAllIds(); + /** * Search for the entity beans * @param sc diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index c7f2daadc51..bf6fb03563f 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1218,6 +1218,35 @@ public abstract class GenericDaoBase extends Compone return executeList(sql.toString()); } + private Object getIdObject() { + T entity = (T)_searchEnhancer.create(); + try { + Method m = _entityBeanType.getMethod("getId"); + return m.invoke(entity); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + logger.warn("Unable to get ID object for entity: {}", _entityBeanType.getSimpleName()); + } + return null; + } + + @Override + public List listAllIds() { + Object idObj = getIdObject(); + if (idObj == null) { + return Collections.emptyList(); + } + Class clazz = (Class)idObj.getClass(); + GenericSearchBuilder sb = createSearchBuilder(clazz); + try { + Method m = sb.entity().getClass().getMethod("getId"); + sb.selectFields(m.invoke(sb.entity())); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + return Collections.emptyList(); + } + sb.done(); + return customSearch(sb.create(), null); + } + @Override public boolean expunge(final ID id) { final TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -2445,4 +2474,11 @@ public abstract class GenericDaoBase extends Compone } } + public static class SumCount { + public long sum; + public long count; + + public SumCount() { + } + } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java index b3bfda0334c..79ec3f2b087 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java @@ -40,4 +40,5 @@ public interface VmWorkJobDao extends GenericDao { void expungeLeftoverWorkJobs(long msid); int expungeByVmList(List vmIds, Long batchSize); + List listVmIdsWithPendingJob(); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index 3b167498a37..a467b5fdf59 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,6 +24,7 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; import org.apache.cloudstack.jobs.JobInfo; @@ -32,6 +33,8 @@ import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -224,4 +227,17 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen sc.setParameters("vmIds", vmIds.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public List listVmIdsWithPendingJob() { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + SearchBuilder asyncJobSearch = _baseJobDao.createSearchBuilder(); + asyncJobSearch.and("status", asyncJobSearch.entity().getStatus(), SearchCriteria.Op.EQ); + sb.join("asyncJobSearch", asyncJobSearch, sb.entity().getId(), asyncJobSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.and("removed", sb.entity().getRemoved(), Op.NULL); + sb.selectFields(sb.entity().getVmInstanceId()); + SearchCriteria sc = sb.create(); + sc.setJoinParameters("asyncJobSearch", "status", JobInfo.Status.IN_PROGRESS); + return customSearch(sc, null); + } } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java index 3e2bc15b1e0..a70a96b1a14 100644 --- a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java @@ -16,27 +16,69 @@ // under the License. package org.apache.cloudstack.framework.jobs.dao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @RunWith(MockitoJUnitRunner.class) public class VmWorkJobDaoImplTest { + @Mock + AsyncJobDao asyncJobDao; @Spy + @InjectMocks VmWorkJobDaoImpl vmWorkJobDaoImpl; + private GenericSearchBuilder genericVmWorkJobSearchBuilder; + private SearchBuilder asyncJobSearchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + genericVmWorkJobSearchBuilder = mock(GenericSearchBuilder.class); + VmWorkJobVO entityVO = mock(VmWorkJobVO.class); + when(genericVmWorkJobSearchBuilder.entity()).thenReturn(entityVO); + asyncJobSearchBuilder = mock(SearchBuilder.class); + AsyncJobVO asyncJobVO = mock(AsyncJobVO.class); + when(asyncJobSearchBuilder.entity()).thenReturn(asyncJobVO); + searchCriteria = mock(SearchCriteria.class); + when(vmWorkJobDaoImpl.createSearchBuilder(Long.class)).thenReturn(genericVmWorkJobSearchBuilder); + when(asyncJobDao.createSearchBuilder()).thenReturn(asyncJobSearchBuilder); + when(genericVmWorkJobSearchBuilder.create()).thenReturn(searchCriteria); + } + @Test public void testExpungeByVmListNoVms() { Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( @@ -47,22 +89,52 @@ public class VmWorkJobDaoImplTest { @Test public void testExpungeByVmList() { - SearchBuilder sb = Mockito.mock(SearchBuilder.class); - SearchCriteria sc = Mockito.mock(SearchCriteria.class); - Mockito.when(sb.create()).thenReturn(sc); - Mockito.doAnswer((Answer) invocationOnMock -> { + SearchBuilder sb = mock(SearchBuilder.class); + SearchCriteria sc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + doAnswer((Answer) invocationOnMock -> { Long batchSize = (Long)invocationOnMock.getArguments()[1]; return batchSize == null ? 0 : batchSize.intValue(); - }).when(vmWorkJobDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); - Mockito.when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); - final VmWorkJobVO mockedVO = Mockito.mock(VmWorkJobVO.class); - Mockito.when(sb.entity()).thenReturn(mockedVO); + }).when(vmWorkJobDaoImpl).batchExpunge(any(SearchCriteria.class), anyLong()); + when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); + final VmWorkJobVO mockedVO = mock(VmWorkJobVO.class); + when(sb.entity()).thenReturn(mockedVO); List vmIds = List.of(1L, 2L); Object[] array = vmIds.toArray(); Long batchSize = 50L; Assert.assertEquals(batchSize.intValue(), vmWorkJobDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); - Mockito.verify(sc).setParameters("vmIds", array); - Mockito.verify(vmWorkJobDaoImpl, Mockito.times(1)) + verify(sc).setParameters("vmIds", array); + verify(vmWorkJobDaoImpl, times(1)) .batchExpunge(sc, batchSize); } + + @Test + public void testListVmIdsWithPendingJob() { + List mockVmIds = Arrays.asList(101L, 102L, 103L); + doReturn(mockVmIds).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(101L), result.get(0)); + assertEquals(Long.valueOf(102L), result.get(1)); + assertEquals(Long.valueOf(103L), result.get(2)); + } + + @Test + public void testListVmIdsWithPendingJobEmptyResult() { + doReturn(Collections.emptyList()).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertTrue(result.isEmpty()); + } } diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index db40b6e68dd..030e0bcf014 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -26,20 +26,21 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.lang3.StringUtils; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.UnavailableCommandException; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.PluggableService; -import org.apache.commons.lang3.StringUtils; public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements APIAclChecker { - @Inject private AccountService accountService; @Inject @@ -48,6 +49,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API private List services; private Map> annotationRoleBasedApisMap = new HashMap>(); + private LazyCache accountCache; + private LazyCache>> rolePermissionsCache; + private int cachePeriod; protected DynamicRoleBasedAPIAccessChecker() { super(); @@ -99,23 +103,66 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API annotationRoleBasedApisMap.get(role.getRoleType()).contains(apiName); } + protected Account getAccountFromId(long accountId) { + return accountService.getAccount(accountId); + } + + protected Pair> getRolePermissions(long roleId) { + final Role accountRole = roleService.findRole(roleId); + if (accountRole == null || accountRole.getId() < 1L) { + return new Pair<>(null, null); + } + + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + return new Pair<>(accountRole, null); + } + + return new Pair<>(accountRole, roleService.findAllPermissionsBy(accountRole.getId())); + } + + protected Pair> getRolePermissionsUsingCache(long roleId) { + if (cachePeriod > 0) { + return rolePermissionsCache.get(roleId); + } + return getRolePermissions(roleId); + } + + protected Account getAccountFromIdUsingCache(long accountId) { + if (cachePeriod > 0) { + return accountCache.get(accountId); + } + return getAccountFromId(accountId); + } + @Override public boolean checkAccess(User user, String commandName) throws PermissionDeniedException { if (!isEnabled()) { return true; } - - Account account = accountService.getAccount(user.getAccountId()); + Account account = getAccountFromIdUsingCache(user.getAccountId()); if (account == null) { - throw new PermissionDeniedException(String.format("The account id [%s] for user id [%s] is null.", user.getAccountId(), user.getUuid())); + throw new PermissionDeniedException(String.format("Account for user id [%s] cannot be found", user.getUuid())); } - - return checkAccess(account, commandName); + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { + throw new PermissionDeniedException(String.format("Account role for user id [%s] cannot be found.", user.getUuid())); + } + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + logger.info("Account for user id {} is Root Admin or Domain Admin, all APIs are allowed.", user.getUuid()); + return true; + } + List allPermissions = roleAndPermissions.second(); + if (checkApiPermissionByRole(accountRole, commandName, allPermissions)) { + return true; + } + throw new UnavailableCommandException(String.format("The API [%s] does not exist or is not available for the account for user id [%s].", commandName, user.getUuid())); } public boolean checkAccess(Account account, String commandName) { - final Role accountRole = roleService.findRole(account.getRoleId()); - if (accountRole == null || accountRole.getId() < 1L) { + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { throw new PermissionDeniedException(String.format("The account [%s] has role null or unknown.", account)); } @@ -160,6 +207,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + cachePeriod = Math.max(0, RoleService.DynamicApiCheckerCachePeriod.value()); + accountCache = new LazyCache<>(32, cachePeriod, this::getAccountFromId); + rolePermissionsCache = new LazyCache<>(32, cachePeriod, this::getRolePermissions); return true; } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 0ed658aa70d..667b475eada 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -321,13 +321,13 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } //add all hosts inside this in includeList - List hostList = _hostDao.listByDataCenterId(dr.getDataCenterId()); - for (HostVO host : hostList) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + List hostList = _hostDao.listEnabledIdsByDataCenterId(dr.getDataCenterId()); + for (Long hostId : hostList) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null && !dedicatedResources.contains(dHost)) { - avoidList.addHost(host.getId()); + avoidList.addHost(hostId); } else { - includeList.addHost(host.getId()); + includeList.addHost(hostId); } } } @@ -337,7 +337,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement List pods = _podDao.listByDataCenterId(dc.getId()); List clusters = _clusterDao.listClustersByDcId(dc.getId()); - List hosts = _hostDao.listByDataCenterId(dc.getId()); + List hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); Set podsInIncludeList = includeList.getPodsToAvoid(); Set clustersInIncludeList = includeList.getClustersToAvoid(); Set hostsInIncludeList = includeList.getHostsToAvoid(); @@ -357,9 +357,9 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement } } - for (HostVO host : hosts) { - if (hostsInIncludeList != null && !hostsInIncludeList.contains(host.getId())) { - avoidList.addHost(host.getId()); + for (Long hostId : hostIds) { + if (hostsInIncludeList != null && !hostsInIncludeList.contains(hostId)) { + avoidList.addHost(hostId); } } return avoidList; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index cd7dc2bbbad..1f020726793 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -23,7 +23,6 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -45,8 +44,9 @@ import org.apache.cloudstack.api.response.DedicatePodResponse; import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -126,7 +126,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") public List dedicateZone(final Long zoneId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -203,18 +203,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.listByDataCenterId(dc.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under {} is dedicated to different account/domain", host, dc); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -230,7 +232,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -284,7 +286,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { childDomainIds.add(domainId); checkAccountAndDomain(accountId, domainId); HostPodVO pod = _podDao.findById(podId); - List hosts = null; + List hostIds = null; if (pod == null) { throw new InvalidParameterValueException("Unable to find pod by id " + podId); } else { @@ -339,18 +341,20 @@ public class DedicatedResourceManagerImpl implements DedicatedService { releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.findByPodId(pod.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listIdsByPodId(pod.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(getDomainChildIds(domainId).contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); + HostVO host = _hostDao.findById(hostId); + logger.error("{} under this {} is dedicated to different account/domain", host, pod); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -366,7 +370,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -402,7 +406,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") public List dedicateCluster(final Long clusterId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -448,12 +452,13 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } //check if any resource under this cluster is dedicated to different account or sub-domain - hosts = _hostDao.findByClusterId(cluster.getId()); + hostIds = _hostDao.listIdsByClusterId(cluster.getId()); List hostsToRelease = new ArrayList(); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Cluster " + cluster.getName() + " is dedicated to different account/domain"); } @@ -479,7 +484,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -576,7 +581,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host.getId()); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,13 +667,14 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(host.getId()); + List allVmsOnHost = getVmsOnHost(hostId); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another account", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } @@ -676,7 +682,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + Host host = _hostDao.findById(hostId); + logger.info("{} found to be unsuitable for explicit dedication as it is running instances of another domain", host); throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } @@ -685,10 +692,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { return suitable; } - private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { + private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hostIds) { boolean suitable = true; - for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); + for (Long hostId : hostIds) { + checkHostSuitabilityForExplicitDedication(accountId, domainIds, hostId); } return suitable; } diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index b971b3b8596..f9cde2ae441 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -21,14 +21,15 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.host.HostVO; import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -38,7 +39,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineProfile; -import org.springframework.util.CollectionUtils; public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { @@ -73,12 +73,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - allHosts.add(hostVO.getId()); - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of @@ -224,20 +223,15 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy } private List getUpdatedClusterList(List clusterList, Set hostsSet) { - List updatedClusterList = new ArrayList(); - for (Long cluster : clusterList) { - List hosts = resourceMgr.listAllHostsInCluster(cluster); - Set hostsInClusterSet = new HashSet(); - for (HostVO host : hosts) { - hostsInClusterSet.add(host.getId()); - } - - if (!hostsSet.containsAll(hostsInClusterSet)) { - updatedClusterList.add(cluster); - } + if (CollectionUtils.isEmpty(clusterList)) { + return new ArrayList<>(); } - - return updatedClusterList; + return clusterList.stream() + .filter(cluster -> { + Set hostsInClusterSet = new HashSet<>(hostDao.listIdsByClusterId(cluster)); + return !hostsSet.containsAll(hostsInClusterSet); + }) + .collect(Collectors.toList()); } @Override @@ -257,15 +251,11 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy Account account = vmProfile.getOwner(); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - if (!CollectionUtils.isEmpty(clusterList)) { - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - - allHosts.add(hostVO.getId()); - } - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of // 1. All empty hosts, not running any vms. diff --git a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index e174824cfdd..2d2b4c78261 100644 --- a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -16,11 +16,11 @@ // under the License. package org.apache.cloudstack.implicitplanner; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -36,7 +36,11 @@ import java.util.UUID; import javax.inject.Inject; -import com.cloud.user.User; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -54,12 +58,6 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; @@ -73,7 +71,6 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.ImplicitDedicationPlanner; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.gpu.dao.HostGpuGroupsDao; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.host.dao.HostTagsDao; @@ -90,6 +87,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; +import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; @@ -387,21 +385,9 @@ public class ImplicitPlannerTest { when(serviceOfferingDetailsDao.listDetailsKeyPairs(offeringId)).thenReturn(details); // Initialize hosts in clusters - HostVO host1 = mock(HostVO.class); - when(host1.getId()).thenReturn(5L); - HostVO host2 = mock(HostVO.class); - when(host2.getId()).thenReturn(6L); - HostVO host3 = mock(HostVO.class); - when(host3.getId()).thenReturn(7L); - List hostsInCluster1 = new ArrayList(); - List hostsInCluster2 = new ArrayList(); - List hostsInCluster3 = new ArrayList(); - hostsInCluster1.add(host1); - hostsInCluster2.add(host2); - hostsInCluster3.add(host3); - when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1); - when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2); - when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3); + when(hostDao.listIdsByClusterId(1L)).thenReturn(List.of(5L)); + when(hostDao.listIdsByClusterId(2L)).thenReturn(List.of(6L)); + when(hostDao.listIdsByClusterId(3L)).thenReturn(List.of(7L)); // Mock vms on each host. long offeringIdForVmsOfThisAccount = 15L; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java index 80ced4c230d..e1f4d87fa7e 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java @@ -109,7 +109,7 @@ public class AgentRoutingResource extends AgentStorageResource { public PingCommand getCurrentStatus(long id) { TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { - MockConfigurationVO config = _simMgr.getMockConfigurationDao().findByNameBottomUP(agentHost.getDataCenterId(), agentHost.getPodId(), agentHost.getClusterId(), agentHost.getId(), "PingCommand"); + MockConfigurationVO config = null; if (config != null) { Map configParameters = config.getParameters(); for (Map.Entry entry : configParameters.entrySet()) { @@ -122,7 +122,7 @@ public class AgentRoutingResource extends AgentStorageResource { } } - config = _simMgr.getMockConfigurationDao().findByNameBottomUP(agentHost.getDataCenterId(), agentHost.getPodId(), agentHost.getClusterId(), agentHost.getId(), "PingRoutingWithNwGroupsCommand"); + config = null; if (config != null) { String message = config.getJsonResponse(); if (message != null) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 8a5e59e4373..4fa7e788224 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -31,6 +31,7 @@ import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.maven.artifact.versioning.ComparableVersion; import org.apache.xmlrpc.XmlRpcException; @@ -144,8 +145,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L sc.and(sc.entity().getGuid(), Op.EQ, guid); List clusters = sc.list(); ClusterVO clu = clusters.get(0); - List clusterHosts = _resourceMgr.listAllHostsInCluster(clu.getId()); - if (clusterHosts == null || clusterHosts.size() == 0) { + List clusterHostIds = _hostDao.listIdsByClusterId(clu.getId()); + if (CollectionUtils.isEmpty(clusterHostIds)) { clu.setGuid(null); _clusterDao.update(clu.getId(), clu); _clusterDao.update(cluster.getId(), cluster); @@ -245,8 +246,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L if (clu.getGuid() == null) { setClusterGuid(clu, poolUuid); } else { - List clusterHosts = _resourceMgr.listAllHostsInCluster(clusterId); - if (clusterHosts != null && clusterHosts.size() > 0) { + List clusterHostIds = _hostDao.listIdsByClusterId(clusterId); + if (CollectionUtils.isNotEmpty(clusterHostIds)) { if (!clu.getGuid().equals(poolUuid)) { String msg = "Please join the host " + hostIp + " to XS pool " + clu.getGuid() + " through XC/XS before adding it through CS UI"; diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java index a84b1a6e2de..e5dc0b29171 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java @@ -298,8 +298,8 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp metricsList.add(new ItemHostMemory(zoneName, zoneUuid, null, null, null, null, ALLOCATED, allocatedCapacityByTag.third(), 0, tag)); }); - List allHostTagVOS = hostDao.listAll().stream() - .flatMap( h -> _hostTagsDao.getHostTags(h.getId()).stream()) + List allHostTagVOS = hostDao.listAllIds().stream() + .flatMap( h -> _hostTagsDao.getHostTags(h).stream()) .distinct() .collect(Collectors.toList()); List allHostTags = new ArrayList<>(); diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java index 48033dd7538..bb776368838 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.response.ClusterMetricsResponse; import org.apache.cloudstack.response.DbMetricsResponse; import org.apache.cloudstack.response.HostMetricsResponse; @@ -47,6 +48,11 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.PluggableService; public interface MetricsService extends PluggableService { + + ConfigKey AllowListMetricsComputation = new ConfigKey<>("Advanced", Boolean.class, "allow.list.metrics.computation", "true", + "Whether the list zones and cluster metrics APIs are allowed metrics computation. Large environments may disabled this.", + true, ConfigKey.Scope.Global); + InfrastructureResponse listInfrastructure(); ListResponse searchForVmMetricsStats(ListVMsUsageHistoryCmd cmd); @@ -56,10 +62,10 @@ public interface MetricsService extends PluggableService { List listVmMetrics(List vmResponses); List listStoragePoolMetrics(List poolResponses); List listHostMetrics(List poolResponses); - List listManagementServerMetrics(List poolResponses); List listClusterMetrics(Pair, Integer> clusterResponses); List listZoneMetrics(List poolResponses); + List listManagementServerMetrics(List poolResponses); UsageServerMetricsResponse listUsageServerMetrics(); DbMetricsResponse listDbMetrics(); } diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 51524c12912..4f96b490291 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -61,6 +61,8 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.cluster.ClusterDrsAlgorithm; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.management.ManagementServerHost.State; import org.apache.cloudstack.response.ClusterMetricsResponse; import org.apache.cloudstack.response.DbMetricsResponse; @@ -110,8 +112,6 @@ import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.network.router.VirtualRouter; import com.cloud.org.Cluster; -import com.cloud.org.Grouping; -import com.cloud.org.Managed; import com.cloud.server.DbStatsCollection; import com.cloud.server.ManagementServerHostStats; import com.cloud.server.StatsCollector; @@ -141,8 +141,7 @@ import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.dao.VmStatsDao; import com.google.gson.Gson; -public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements MetricsService { - +public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements MetricsService, Configurable { @Inject private DataCenterDao dataCenterDao; @Inject @@ -197,7 +196,6 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements } private void updateHostMetrics(final HostMetrics hostMetrics, final HostJoinVO host) { - hostMetrics.incrTotalHosts(); hostMetrics.addCpuAllocated(host.getCpuReservedCapacity() + host.getCpuUsedCapacity()); hostMetrics.addMemoryAllocated(host.getMemReservedCapacity() + host.getMemUsedCapacity()); final HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId()); @@ -561,22 +559,17 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements response.setZones(dataCenterDao.countAll()); response.setPods(podDao.countAll()); response.setClusters(clusterDao.countAll()); - response.setHosts(hostDao.countAllByType(Host.Type.Routing)); + Pair hostCountAndCpuSockets = hostDao.countAllHostsAndCPUSocketsByType(Host.Type.Routing); + response.setHosts(hostCountAndCpuSockets.first()); response.setStoragePools(storagePoolDao.countAll()); response.setImageStores(imageStoreDao.countAllImageStores()); response.setObjectStores(objectStoreDao.countAllObjectStores()); - response.setSystemvms(vmInstanceDao.listByTypes(VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm).size()); + response.setSystemvms(vmInstanceDao.countByTypes(VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm)); response.setRouters(domainRouterDao.countAllByRole(VirtualRouter.Role.VIRTUAL_ROUTER)); response.setInternalLbs(domainRouterDao.countAllByRole(VirtualRouter.Role.INTERNAL_LB_VM)); response.setAlerts(alertDao.countAll()); - int cpuSockets = 0; - for (final Host host : hostDao.listByType(Host.Type.Routing)) { - if (host.getCpuSockets() != null) { - cpuSockets += host.getCpuSockets(); - } - } - response.setCpuSockets(cpuSockets); - response.setManagementServers(managementServerHostDao.listAll().size()); + response.setCpuSockets(hostCountAndCpuSockets.second()); + response.setManagementServers(managementServerHostDao.countAll()); return response; } @@ -764,38 +757,44 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity(Capacity.CAPACITY_TYPE_CPU, null, clusterId); final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity(Capacity.CAPACITY_TYPE_MEMORY, null, clusterId); final HostMetrics hostMetrics = new HostMetrics(cpuCapacity, memoryCapacity); + hostMetrics.setUpResources(Long.valueOf(hostDao.countAllInClusterByTypeAndStates(clusterId, Host.Type.Routing, List.of(Status.Up)))); + hostMetrics.setTotalResources(Long.valueOf(hostDao.countAllInClusterByTypeAndStates(clusterId, Host.Type.Routing, null))); + hostMetrics.setTotalHosts(hostMetrics.getTotalResources()); - List> cpuList = new ArrayList<>(); - List> memoryList = new ArrayList<>(); - - for (final Host host: hostDao.findByClusterId(clusterId)) { - if (host == null || host.getType() != Host.Type.Routing) { - continue; + if (AllowListMetricsComputation.value()) { + List> cpuList = new ArrayList<>(); + List> memoryList = new ArrayList<>(); + for (final Host host : hostDao.findByClusterId(clusterId)) { + if (host == null || host.getType() != Host.Type.Routing) { + continue; + } + updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + HostJoinVO hostJoin = hostJoinDao.findById(host.getId()); + cpuList.add(new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed())); + memoryList.add(new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory())); } - if (host.getStatus() == Status.Up) { - hostMetrics.incrUpResources(); + try { + Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); + metricsResponse.setDrsImbalance(imbalance.isNaN() ? null : 100.0 * imbalance); + } catch (ConfigurationException e) { + logger.warn("Failed to get cluster imbalance for cluster {}", clusterId, e); + } + } else { + if (cpuCapacity != null) { + hostMetrics.setCpuAllocated(cpuCapacity.getAllocatedCapacity()); + } + if (memoryCapacity != null) { + hostMetrics.setMemoryAllocated(memoryCapacity.getAllocatedCapacity()); } - hostMetrics.incrTotalResources(); - HostJoinVO hostJoin = hostJoinDao.findById(host.getId()); - updateHostMetrics(hostMetrics, hostJoin); - - cpuList.add(new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed())); - memoryList.add(new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory())); } - try { - Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); - metricsResponse.setDrsImbalance(imbalance.isNaN() ? null : 100.0 * imbalance); - } catch (ConfigurationException e) { - logger.warn("Failed to get cluster imbalance for cluster " + clusterId, e); - } - - metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState()); - metricsResponse.setResources(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); addHostCpuMetricsToResponse(metricsResponse, clusterId, hostMetrics); addHostMemoryMetricsToResponse(metricsResponse, clusterId, hostMetrics); metricsResponse.setHasAnnotation(clusterResponse.hasAnnotation()); + metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState()); + metricsResponse.setResources(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); + metricsResponses.add(metricsResponse); } return metricsResponses; @@ -942,35 +941,38 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, zoneId, null); final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, zoneId, null); final HostMetrics hostMetrics = new HostMetrics(cpuCapacity, memoryCapacity); + hostMetrics.setUpResources(Long.valueOf(clusterDao.countAllManagedAndEnabledByDcId(zoneId))); + hostMetrics.setTotalResources(Long.valueOf(clusterDao.countAllByDcId(zoneId))); + hostMetrics.setTotalHosts(Long.valueOf(hostDao.countAllByTypeInZone(zoneId, Host.Type.Routing))); - for (final Cluster cluster : clusterDao.listClustersByDcId(zoneId)) { - if (cluster == null) { - continue; - } - hostMetrics.incrTotalResources(); - if (cluster.getAllocationState() == Grouping.AllocationState.Enabled - && cluster.getManagedState() == Managed.ManagedState.Managed) { - hostMetrics.incrUpResources(); - } - - for (final Host host: hostDao.findByClusterId(cluster.getId())) { - if (host == null || host.getType() != Host.Type.Routing) { + if (AllowListMetricsComputation.value()) { + for (final Cluster cluster : clusterDao.listClustersByDcId(zoneId)) { + if (cluster == null) { continue; } - updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + for (final Host host: hostDao.findByClusterId(cluster.getId())) { + if (host == null || host.getType() != Host.Type.Routing) { + continue; + } + updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + } + } + } else { + if (cpuCapacity != null) { + hostMetrics.setCpuAllocated(cpuCapacity.getAllocatedCapacity()); + } + if (memoryCapacity != null) { + hostMetrics.setMemoryAllocated(memoryCapacity.getAllocatedCapacity()); } } + addHostCpuMetricsToResponse(metricsResponse, null, hostMetrics); + addHostMemoryMetricsToResponse(metricsResponse, null, hostMetrics); + metricsResponse.setHasAnnotation(zoneResponse.hasAnnotation()); metricsResponse.setState(zoneResponse.getAllocationState()); metricsResponse.setResource(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); - final Long totalHosts = hostMetrics.getTotalHosts(); - // CPU - addHostCpuMetricsToResponse(metricsResponse, null, hostMetrics); - // Memory - addHostMemoryMetricsToResponse(metricsResponse, null, hostMetrics); - metricsResponses.add(metricsResponse); } return metricsResponses; @@ -1028,12 +1030,14 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements private void getQueryHistory(DbMetricsResponse response) { Map dbStats = ApiDBUtils.getDbStatistics(); - if (dbStats != null) { - response.setQueries((Long)dbStats.get(DbStatsCollection.queries)); - response.setUptime((Long)dbStats.get(DbStatsCollection.uptime)); + if (dbStats == null) { + return; } - List loadHistory = (List) dbStats.get(DbStatsCollection.loadAvarages); + response.setQueries((Long)dbStats.getOrDefault(DbStatsCollection.queries, -1L)); + response.setUptime((Long)dbStats.getOrDefault(DbStatsCollection.uptime, -1L)); + + List loadHistory = (List) dbStats.getOrDefault(DbStatsCollection.loadAvarages, new ArrayList()); double[] loadAverages = new double[loadHistory.size()]; int index = 0; @@ -1108,6 +1112,16 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements return cmdList; } + @Override + public String getConfigComponentName() { + return MetricsService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {AllowListMetricsComputation}; + } + private class HostMetrics { // CPU metrics private Long totalCpu = 0L; @@ -1133,6 +1147,14 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements } } + public void setCpuAllocated(Long cpuAllocated) { + this.cpuAllocated = cpuAllocated; + } + + public void setMemoryAllocated(Long memoryAllocated) { + this.memoryAllocated = memoryAllocated; + } + public void addCpuAllocated(Long cpuAllocated) { this.cpuAllocated += cpuAllocated; } @@ -1161,16 +1183,16 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements } } - public void incrTotalHosts() { - this.totalHosts++; + public void setTotalHosts(Long totalHosts) { + this.totalHosts = totalHosts; } - public void incrTotalResources() { - this.totalResources++; + public void setTotalResources(Long totalResources) { + this.totalResources = totalResources; } - public void incrUpResources() { - this.upResources++; + public void setUpResources(Long upResources) { + this.upResources = upResources; } public Long getTotalCpu() { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 771fcf09255..351d59f6b03 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,6 +18,26 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; @@ -48,6 +68,7 @@ import com.cloud.storage.dao.StoragePoolWorkDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachineManager; @@ -56,23 +77,6 @@ import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; - -import javax.inject.Inject; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -326,18 +330,14 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor } private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, String storageHost) { - - List allHosts = - _resourceMgr.listAllUpHosts(Host.Type.Routing, clusterId, podId, zoneId); - if (allHosts.isEmpty()) { + List allHostIds = _hostDao.listIdsForUpRouting(zoneId, podId, clusterId); + if (allHostIds.isEmpty()) { throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in zone: %s pod: %s cluster: %s", zoneDao.findById(zoneId), podDao.findById(podId), clusterDao.findById(clusterId))); } - - boolean success = false; - for (HostVO h : allHosts) { + for (Long hId : allHostIds) { ValidateVcenterDetailsCommand cmd = new ValidateVcenterDetailsCommand(storageHost); - final Answer answer = agentMgr.easySend(h.getId(), cmd); + final Answer answer = agentMgr.easySend(hId, cmd); if (answer != null && answer.getResult()) { logger.info("Successfully validated vCenter details provided"); return; @@ -346,7 +346,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor throw new InvalidParameterValueException(String.format("Provided vCenter server details does not match with the existing vCenter in zone: %s", zoneDao.findById(zoneId))); } else { - logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", h); + logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", hostDao.findById(hId)); } } } @@ -385,85 +385,57 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor } } + private Pair, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) { + if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) { + return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(), + primaryStore.getPodId(), primaryStore.getClusterId()), true); + } + List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), + primaryStore.getPodId(), primaryStore.getDataCenterId()); + if (allHosts.isEmpty()) { + return new Pair<>(Collections.emptyList(), true); + } + List hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList()); + if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) { + return new Pair<>(hostIds, false); + } + return new Pair<>(hostIds, true); + } + @Override public boolean attachCluster(DataStore store, ClusterScope scope) { - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)store; - // Check if there is host up in this cluster - List allHosts = - _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); - if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primarystore.getClusterId()))); + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)store; + Pair, Boolean> result = prepareOcfs2NodesIfNeeded(primaryStore); + List hostIds = result.first(); + if (hostIds.isEmpty()) { + primaryDataStoreDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster: " + + clusterDao.findById(primaryStore.getClusterId())); } - - if (primarystore.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - logger.warn("Can not create storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); - primaryDataStoreDao.expunge(primarystore.getId()); + if (!result.second()) { + logger.warn("Can not create storage pool {} on {}", primaryStore, + clusterDao.findById(primaryStore.getClusterId())); + primaryDataStoreDao.expunge(primaryStore.getId()); return false; } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h, primarystore); - if (success) { + for (Long hId : hostIds) { + HostVO host = _hostDao.findById(hId); + if (createStoragePool(host, primaryStore)) { break; } } - logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - storageMgr.connectHostToSharedPool(h, primarystore.getId()); - poolHosts.add(h); - } catch (StorageConflictException se) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("Storage has already been added as local storage"); - } catch (Exception e) { - logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); - String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); - if (reason != null) { - throw new CloudRuntimeException(reason); - } - } - } - - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("Failed to access storage pool"); - } - + storageMgr.connectHostsToPool(store, hostIds, scope, true, true); dataStoreHelper.attachCluster(store); return true; } @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List hosts = _resourceMgr.listAllUpHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) { + List hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); logger.debug("In createPool. Attaching the pool to each of the hosts."); - List poolHosts = new ArrayList(); - for (HostVO host : hosts) { - try { - storageMgr.connectHostToSharedPool(host, dataStore.getId()); - poolHosts.add(host); - } catch (StorageConflictException se) { - primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException(String.format("Storage has already been added as local storage to host: %s", host)); - } catch (Exception e) { - logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); - if (reason != null) { - throw new CloudRuntimeException(reason); - } - } - } - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + dataStore + " in this zone."); - primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); - } - dataStoreHelper.attachZone(dataStore, hypervisorType); + storageMgr.connectHostsToPool(store, hostIds, scope, true, true); + dataStoreHelper.attachZone(store, hypervisorType); return true; } diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 4bab2f83712..24c036d443d 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -19,23 +19,14 @@ package org.apache.cloudstack.storage.datastore.lifecycle; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.ModifyStoragePoolAnswer; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.exception.StorageConflictException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceState; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StorageManagerImpl; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.utils.exception.CloudRuntimeException; -import junit.framework.TestCase; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; + import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -58,14 +49,23 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.exception.StorageConflictException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +import junit.framework.TestCase; /** * Created by ajna123 on 9/22/2015. @@ -118,6 +118,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { @Mock PrimaryDataStoreHelper primaryDataStoreHelper; + @Mock + HostDao hostDao; + AutoCloseable closeable; @Before @@ -129,17 +132,6 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { ReflectionTestUtils.setField(storageMgr, "_dataStoreMgr", _dataStoreMgr); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageMgr); - List hostList = new ArrayList(); - HostVO host1 = new HostVO(1L, "aa01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem); - HostVO host2 = new HostVO(1L, "aa02", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem); - - host1.setResourceState(ResourceState.Enabled); - host2.setResourceState(ResourceState.Disabled); - hostList.add(host1); - hostList.add(host2); - when(_dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); when(store.isShared()).thenReturn(true); @@ -152,7 +144,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { storageMgr.registerHostListener("default", hostListener); - when(_resourceMgr.listAllUpHosts(eq(Host.Type.Routing), anyLong(), anyLong(), anyLong())).thenReturn(hostList); + when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong())) + .thenReturn(List.of(1L, 2L)); + when(hostDao.findById(anyLong())).thenReturn(mock(HostVO.class)); when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); @@ -171,18 +165,17 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { } @Test - public void testAttachClusterException() throws Exception { - String exceptionString = "Mount failed due to incorrect mount options."; + public void testAttachClusterException() { String mountFailureReason = "Incorrect mount option specified."; - CloudRuntimeException exception = new CloudRuntimeException(exceptionString); + ClusterScope scope = new ClusterScope(1L, 1L, 1L); + CloudRuntimeException exception = new CloudRuntimeException(mountFailureReason); StorageManager storageManager = Mockito.mock(StorageManager.class); - Mockito.when(storageManager.connectHostToSharedPool(Mockito.any(), Mockito.anyLong())).thenThrow(exception); - Mockito.when(storageManager.getStoragePoolMountFailureReason(exceptionString)).thenReturn(mountFailureReason); + Mockito.doThrow(exception).when(storageManager).connectHostsToPool(Mockito.eq(store), Mockito.anyList(), Mockito.eq(scope), Mockito.eq(true), Mockito.eq(true)); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageManager); try { - _cloudStackPrimaryDataStoreLifeCycle.attachCluster(store, new ClusterScope(1L, 1L, 1L)); + _cloudStackPrimaryDataStoreLifeCycle.attachCluster(store, scope); Assert.fail(); } catch (Exception e) { Assert.assertEquals(e.getMessage(), mountFailureReason); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index 38f9dc20fbd..5104787fd9a 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -24,17 +24,12 @@ import java.net.URISyntaxException; import java.net.URLDecoder; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.UUID; import javax.inject.Inject; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; -import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -44,9 +39,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParame import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -55,9 +54,9 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; import com.cloud.storage.Storage; @@ -74,9 +73,13 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { + @Inject + DataCenterDao dataCenterDao; @Inject private ClusterDao clusterDao; @Inject + private HostDao hostDao; + @Inject private PrimaryDataStoreDao primaryDataStoreDao; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @@ -258,28 +261,15 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy } PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; - List hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); - if (hostsInCluster.isEmpty()) { + List hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(), + primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId()); + if (hostIds.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster); } - logger.debug("Attaching the pool to each of the hosts in the cluster: {}", cluster); - List poolHosts = new ArrayList(); - for (HostVO host : hostsInCluster) { - try { - if (storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId())) { - poolHosts.add(host); - } - } catch (Exception e) { - logger.warn(String.format("Unable to establish a connection between host: %s and pool: %s on the cluster: %s", host, dataStore, cluster), e); - } - } - - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '{}' on cluster '{}'.", primaryDataStoreInfo, cluster); - } + logger.debug("Attaching the pool to each of the hosts in the {}", cluster); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachCluster(dataStore); return true; @@ -296,21 +286,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); } - logger.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); - List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - List poolHosts = new ArrayList(); - for (HostVO host : hosts) { - try { - if (storageMgr.connectHostToSharedPool(host, dataStore.getId())) { - poolHosts.add(host); - } - } catch (Exception e) { - logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "in the zone: " + scope.getScopeId(), e); - } - } - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + dataStore + " in the zone: " + scope.getScopeId()); - } + logger.debug("Attaching the pool to each of the hosts in the {}", + dataCenterDao.findById(scope.getScopeId())); + List hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachZone(dataStore); return true; diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index dbeba0e4bde..136d598ebeb 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -30,7 +30,6 @@ import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -56,15 +55,13 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceState; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolAutomation; @@ -73,7 +70,6 @@ import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.template.TemplateManager; import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.test.util.ReflectionTestUtils; @RunWith(MockitoJUnitRunner.class) public class ScaleIOPrimaryDataStoreLifeCycleTest { @@ -85,8 +81,6 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { @Mock private PrimaryDataStoreHelper dataStoreHelper; @Mock - private ResourceManager resourceManager; - @Mock private StoragePoolAutomation storagePoolAutomation; @Mock private StoragePoolHostDao storagePoolHostDao; @@ -100,6 +94,10 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { private PrimaryDataStore store; @Mock private TemplateManager templateMgr; + @Mock + HostDao hostDao; + @Mock + DataCenterDao dataCenterDao; @InjectMocks private StorageManager storageMgr = new StorageManagerImpl(); @@ -115,6 +113,7 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { public void setUp() { closeable = MockitoAnnotations.openMocks(this); ReflectionTestUtils.setField(scaleIOPrimaryDataStoreLifeCycleTest, "storageMgr", storageMgr); + when(dataCenterDao.findById(anyLong())).thenReturn(mock(DataCenterVO.class)); } @After @@ -137,17 +136,8 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { final ZoneScope scope = new ZoneScope(1L); - List hostList = new ArrayList(); - HostVO host1 = new HostVO(1L, "host01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); - HostVO host2 = new HostVO(2L, "host02", Host.Type.Routing, "192.168.1.2", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); - - host1.setResourceState(ResourceState.Enabled); - host2.setResourceState(ResourceState.Enabled); - hostList.add(host1); - hostList.add(host2); - when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(hostList); + when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM)) + .thenReturn(List.of(1L, 2L)); when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.isShared()).thenReturn(true); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java index 3113ae8fdaa..f13d296af3b 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java @@ -219,17 +219,17 @@ public class StorPoolHelper { } public static Long findClusterIdByGlobalId(String globalId, ClusterDao clusterDao) { - List clusterVo = clusterDao.listAll(); - if (clusterVo.size() == 1) { + List clusterIds = clusterDao.listAllIds(); + if (clusterIds.size() == 1) { StorPoolUtil.spLog("There is only one cluster, sending backup to secondary command"); return null; } - for (ClusterVO clusterVO2 : clusterVo) { - if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()) != null - && globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()).toString())) { - StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterVO2.getId(), + for (Long clusterId : clusterIds) { + if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterId) != null + && globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterId))) { + StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterId, globalId); - return clusterVO2.getId(); + return clusterId; } } throw new CloudRuntimeException( diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index a9e66c6aece..f4a8167c5a2 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -26,8 +26,11 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.Timer; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import javax.inject.Inject; import javax.mail.MessagingException; @@ -75,12 +78,11 @@ import com.cloud.event.AlertGenerator; import com.cloud.event.EventTypes; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.network.Ipv6Service; import com.cloud.network.dao.IPAddressDao; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; -import com.cloud.service.ServiceOfferingVO; -import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StorageManager; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; @@ -124,9 +126,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Inject protected ConfigDepot _configDepot; @Inject - ServiceOfferingDao _offeringsDao; - @Inject Ipv6Service ipv6Service; + @Inject + HostDao hostDao; private Timer _timer = null; private long _capacityCheckPeriod = 60L * 60L * 1000L; // One hour by default. @@ -260,6 +262,66 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } } + /** + * Recalculates the capacities of hosts, including CPU and RAM. + */ + protected void recalculateHostCapacities() { + List hostIds = hostDao.listIdsByType(Host.Type.Routing); + if (hostIds.isEmpty()) { + return; + } + ConcurrentHashMap> futures = new ConcurrentHashMap<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, + Math.min(CapacityManager.CapacityCalculateWorkers.value(), hostIds.size()))); + for (Long hostId : hostIds) { + futures.put(hostId, executorService.submit(() -> { + final HostVO host = hostDao.findById(hostId); + _capacityMgr.updateCapacityForHost(host); + return null; + })); + } + for (Map.Entry> entry: futures.entrySet()) { + try { + entry.getValue().get(); + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Error during capacity calculation for host: %d due to : %s", + entry.getKey(), e.getMessage()), e); + } + } + executorService.shutdown(); + } + + protected void recalculateStorageCapacities() { + List storagePoolIds = _storagePoolDao.listAllIds(); + if (storagePoolIds.isEmpty()) { + return; + } + ConcurrentHashMap> futures = new ConcurrentHashMap<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, + Math.min(CapacityManager.CapacityCalculateWorkers.value(), storagePoolIds.size()))); + for (Long poolId: storagePoolIds) { + futures.put(poolId, executorService.submit(() -> { + final StoragePoolVO pool = _storagePoolDao.findById(poolId); + long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); + if (pool.isShared()) { + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); + } else { + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); + } + return null; + })); + } + for (Map.Entry> entry: futures.entrySet()) { + try { + entry.getValue().get(); + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Error during capacity calculation for storage pool: %d due to : %s", + entry.getKey(), e.getMessage()), e); + } + } + executorService.shutdown(); + } + @Override public void recalculateCapacity() { // FIXME: the right way to do this is to register a listener (see RouterStatsListener, VMSyncListener) @@ -275,36 +337,14 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi logger.debug("recalculating system capacity"); logger.debug("Executing cpu/ram capacity update"); } - // Calculate CPU and RAM capacities - // get all hosts...even if they are not in 'UP' state - List hosts = _resourceMgr.listAllNotInMaintenanceHostsInOneZone(Host.Type.Routing, null); - if (hosts != null) { - // prepare the service offerings - List offerings = _offeringsDao.listAllIncludingRemoved(); - Map offeringsMap = new HashMap(); - for (ServiceOfferingVO offering : offerings) { - offeringsMap.put(offering.getId(), offering); - } - for (HostVO host : hosts) { - _capacityMgr.updateCapacityForHost(host, offeringsMap); - } - } + recalculateHostCapacities(); if (logger.isDebugEnabled()) { logger.debug("Done executing cpu/ram capacity update"); logger.debug("Executing storage capacity update"); } // Calculate storage pool capacity - List storagePools = _storagePoolDao.listAll(); - for (StoragePoolVO pool : storagePools) { - long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); - if (pool.isShared()) { - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); - } else { - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); - } - } - + recalculateStorageCapacities(); if (logger.isDebugEnabled()) { logger.debug("Done executing storage capacity update"); logger.debug("Executing capacity updates for public ip and Vlans"); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 0a9c7ba6311..94a87a0b31c 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -2355,7 +2355,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q // ids hostSearchBuilder.and("id", hostSearchBuilder.entity().getId(), SearchCriteria.Op.EQ); hostSearchBuilder.and("name", hostSearchBuilder.entity().getName(), SearchCriteria.Op.EQ); - hostSearchBuilder.and("type", hostSearchBuilder.entity().getType(), SearchCriteria.Op.LIKE); + hostSearchBuilder.and("type", hostSearchBuilder.entity().getType(), SearchCriteria.Op.EQ); hostSearchBuilder.and("status", hostSearchBuilder.entity().getStatus(), SearchCriteria.Op.EQ); hostSearchBuilder.and("dataCenterId", hostSearchBuilder.entity().getDataCenterId(), SearchCriteria.Op.EQ); hostSearchBuilder.and("podId", hostSearchBuilder.entity().getPodId(), SearchCriteria.Op.EQ); @@ -2407,7 +2407,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.setParameters("name", name); } if (type != null) { - sc.setParameters("type", "%" + type); + sc.setParameters("type", type); } if (state != null) { sc.setParameters("status", state); @@ -4557,7 +4557,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q // check if zone is configured, if not, just return empty list List hypers = null; if (!isIso) { - hypers = _resourceMgr.listAvailHypervisorInZone(null, null); + hypers = _resourceMgr.listAvailHypervisorInZone(null); if (hypers == null || hypers.isEmpty()) { return new Pair, Integer>(new ArrayList(), 0); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java index b4427a6315a..58b73096de8 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java @@ -44,6 +44,6 @@ public interface UserVmJoinDao extends GenericDao { List listActiveByIsoId(Long isoId); - List listByAccountServiceOfferingTemplateAndNotInState(long accountId, List states, - List offeringIds, List templateIds); + List listByAccountServiceOfferingTemplateAndNotInState(long accountId, + List states, List offeringIds, List templateIds); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index af26a242db4..7e10df24e1b 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -693,6 +693,8 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation listByAccountServiceOfferingTemplateAndNotInState(long accountId, List states, List offeringIds, List templateIds) { SearchBuilder userVmSearch = createSearchBuilder(); + userVmSearch.selectFields(userVmSearch.entity().getId(), userVmSearch.entity().getCpu(), + userVmSearch.entity().getRamSize()); userVmSearch.and("accountId", userVmSearch.entity().getAccountId(), Op.EQ); userVmSearch.and("serviceOfferingId", userVmSearch.entity().getServiceOfferingId(), Op.IN); userVmSearch.and("templateId", userVmSearch.entity().getTemplateId(), Op.IN); @@ -713,6 +715,6 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation> clusterValuesCache; + private SingleCache> serviceOfferingsCache; + @Override public boolean configure(String name, Map params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); @@ -156,6 +162,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, public boolean start() { _resourceMgr.registerResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, this); _resourceMgr.registerResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER, this); + clusterValuesCache = new LazyCache<>(128, 60, this::getClusterValues); + serviceOfferingsCache = new SingleCache<>(60, this::getServiceOfferingsMap); return true; } @@ -209,8 +217,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedMem = capacityMemory.getReservedCapacity(); long reservedCpuCore = capacityCpuCore.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "cpuOvercommitRatio").getValue()); - float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "memoryOvercommitRatio").getValue()); + float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); int vmCPU = svo.getCpu() * svo.getSpeed(); int vmCPUCore = svo.getCpu(); long vmMem = svo.getRamSize() * 1024L * 1024L; @@ -283,8 +291,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, final long hostId = vm.getHostId(); final HostVO host = _hostDao.findById(hostId); final long clusterId = host.getClusterId(); - final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio").getValue()); - final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "memoryOvercommitRatio").getValue()); + final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); @@ -376,13 +384,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, toHumanReadableSize(capacityMem.getReservedCapacity()), toHumanReadableSize(ram), fromLastHost); long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.CPU_OVER_COMMIT_RATIO); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), cpucore, cpuspeed); + hostHasCpuCapability = checkIfHostHasCpuCapability(host, cpucore, cpuspeed); if (hostHasCpuCapability) { // first check from reserved capacity @@ -412,25 +420,16 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } @Override - public boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed) { - + public boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed) { // Check host can support the Cpu Number and Speed. - Host host = _hostDao.findById(hostId); boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; - if (isCpuNumGood && isCpuSpeedGood) { - if (logger.isDebugEnabled()) { - logger.debug("Host: {} has cpu capability (cpu:{}, speed:{}) " + - "to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); - } - return true; - } else { - if (logger.isDebugEnabled()) { - logger.debug("Host: {} doesn't have cpu capability (cpu:{}, speed:{})" + - " to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); - } - return false; - } + boolean hasCpuCapability = isCpuNumGood && isCpuSpeedGood; + + logger.debug("{} {} cpu capability (cpu: {}, speed: {} ) to support requested CPU: {} and requested speed: {}", + host, hasCpuCapability ? "has" : "doesn't have" ,host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); + + return hasCpuCapability; } @Override @@ -628,21 +627,50 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return totalAllocatedSize; } - @DB - @Override - public void updateCapacityForHost(final Host host) { - // prepare the service offerings - List offerings = _offeringsDao.listAllIncludingRemoved(); - Map offeringsMap = new HashMap(); - for (ServiceOfferingVO offering : offerings) { - offeringsMap.put(offering.getId(), offering); + protected Pair getClusterValues(long clusterId) { + Map map = _clusterDetailsDao.findDetails(clusterId, + List.of(VmDetailConstants.CPU_OVER_COMMIT_RATIO, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)); + return new Pair<>(map.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO), + map.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)); + } + + + protected Map getServiceOfferingsMap() { + List serviceOfferings = _offeringsDao.listAllIncludingRemoved(); + if (CollectionUtils.isEmpty(serviceOfferings)) { + return new HashMap<>(); } - updateCapacityForHost(host, offeringsMap); + return serviceOfferings.stream() + .collect(Collectors.toMap( + ServiceOfferingVO::getId, + offering -> offering + )); + } + + protected ServiceOfferingVO getServiceOffering(long id) { + Map map = serviceOfferingsCache.get(); + if (map.containsKey(id)) { + return map.get(id); + } + ServiceOfferingVO serviceOfferingVO = _offeringsDao.findByIdIncludingRemoved(id); + if (serviceOfferingVO != null) { + serviceOfferingsCache.invalidate(); + } + return serviceOfferingVO; + } + + protected Map getVmDetailsForCapacityCalculation(long vmId) { + return _userVmDetailsDao.listDetailsKeyPairs(vmId, + List.of(VmDetailConstants.CPU_OVER_COMMIT_RATIO, + VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, + UsageEventVO.DynamicParameters.memory.name(), + UsageEventVO.DynamicParameters.cpuNumber.name(), + UsageEventVO.DynamicParameters.cpuSpeed.name())); } @DB @Override - public void updateCapacityForHost(final Host host, final Map offeringsMap) { + public void updateCapacityForHost(final Host host) { long usedCpuCore = 0; long reservedCpuCore = 0; long usedCpu = 0; @@ -651,32 +679,27 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = 0; final CapacityState capacityState = (host.getResourceState() == ResourceState.Enabled) ? CapacityState.Enabled : CapacityState.Disabled; - List vms = _vmDao.listUpByHostId(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found {} VMs on host {}", vms.size(), host); - } + List vms = _vmDao.listIdServiceOfferingForUpVmsByHostId(host.getId()); + logger.debug("Found {} VMs on {}", vms.size(), host); - final List vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found {} VMs are Migrating from host {}", vosMigrating.size(), host); - } + final List vosMigrating = _vmDao.listIdServiceOfferingForVmsMigratingFromHost(host.getId()); + logger.debug("Found {} VMs are Migrating from {}", vosMigrating.size(), host); vms.addAll(vosMigrating); - ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); - ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); - Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue()); - Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue()); + Pair clusterValues = + clusterValuesCache.get(host.getClusterId()); + Float clusterCpuOvercommitRatio = Float.parseFloat(clusterValues.first()); + Float clusterRamOvercommitRatio = Float.parseFloat(clusterValues.second()); for (VMInstanceVO vm : vms) { Float cpuOvercommitRatio = 1.0f; Float ramOvercommitRatio = 1.0f; - Map vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); - String vmDetailCpu = vmDetails.get("cpuOvercommitRatio"); - String vmDetailRam = vmDetails.get("memoryOvercommitRatio"); + Map vmDetails = getVmDetailsForCapacityCalculation(vm.getId()); + String vmDetailCpu = vmDetails.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO); + String vmDetailRam = vmDetails.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); // if vmDetailCpu or vmDetailRam is not null it means it is running in a overcommitted cluster. cpuOvercommitRatio = (vmDetailCpu != null) ? Float.parseFloat(vmDetailCpu) : clusterCpuOvercommitRatio; ramOvercommitRatio = (vmDetailRam != null) ? Float.parseFloat(vmDetailRam) : clusterRamOvercommitRatio; - ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); + ServiceOffering so = getServiceOffering(vm.getServiceOfferingId()); if (so == null) { so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); } @@ -702,26 +725,25 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } List vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found {} VM, not running on host {}", vmsByLastHostId.size(), host); - } + logger.debug("Found {} VM, not running on {}", vmsByLastHostId.size(), host); + for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; Float ramOvercommitRatio = 1.0f; long lastModificationTime = Optional.ofNullable(vm.getUpdateTime()).orElse(vm.getCreated()).getTime(); long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - lastModificationTime) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO); - UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); + Map vmDetails = getVmDetailsForCapacityCalculation(vm.getId()); + String vmDetailCpu = vmDetails.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO); + String vmDetailRam = vmDetails.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); if (vmDetailCpu != null) { //if vmDetail_cpu is not null it means it is running in a overcommited cluster. - cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue()); + cpuOvercommitRatio = Float.parseFloat(vmDetailCpu); } if (vmDetailRam != null) { - ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue()); + ramOvercommitRatio = Float.parseFloat(vmDetailRam); } - ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); - Map vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); + ServiceOffering so = getServiceOffering(vm.getServiceOfferingId()); if (so == null) { so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); } @@ -761,9 +783,24 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } } - CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); - CapacityVO memCap = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); - CapacityVO cpuCoreCap = _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_CPU_CORE); + List capacities = _capacityDao.listByHostIdTypes(host.getId(), List.of(Capacity.CAPACITY_TYPE_CPU, + Capacity.CAPACITY_TYPE_MEMORY, + CapacityVO.CAPACITY_TYPE_CPU_CORE)); + CapacityVO cpuCap = null; + CapacityVO memCap = null; + CapacityVO cpuCoreCap = null; + for (CapacityVO c : capacities) { + if (c.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { + cpuCap = c; + } else if (c.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY) { + memCap = c; + } else if (c.getCapacityType() == Capacity.CAPACITY_TYPE_CPU_CORE) { + cpuCoreCap = c; + } + if (ObjectUtils.allNotNull(cpuCap, memCap, cpuCoreCap)) { + break; + } + } if (cpuCoreCap != null) { long hostTotalCpuCore = host.getCpus().longValue(); @@ -995,8 +1032,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_CPU); List capacityVOCpus = _capacityDao.search(capacitySC, null); - Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "cpuOvercommitRatio").getValue()); - Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "memoryOvercommitRatio").getValue()); + Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { CapacityVO CapacityVOCpu = capacityVOCpus.get(0); @@ -1053,9 +1090,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, String capacityOverProvisioningName = ""; if (capacityType == Capacity.CAPACITY_TYPE_CPU) { - capacityOverProvisioningName = "cpuOvercommitRatio"; + capacityOverProvisioningName = VmDetailConstants.CPU_OVER_COMMIT_RATIO; } else if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { - capacityOverProvisioningName = "memoryOvercommitRatio"; + capacityOverProvisioningName = VmDetailConstants.MEMORY_OVER_COMMIT_RATIO; } else { throw new CloudRuntimeException("Invalid capacityType - " + capacityType); } @@ -1093,13 +1130,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, public Pair checkIfHostHasCpuCapabilityAndCapacity(Host host, ServiceOffering offering, boolean considerReservedCapacity) { int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - Cluster cluster = _clusterDao.findById(host.getClusterId()); - ClusterDetailsVO clusterDetailsCpuOvercommit = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); - ClusterDetailsVO clusterDetailsRamOvercommmt = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); + Pair clusterDetails = getClusterValues(host.getClusterId()); + Float cpuOvercommitRatio = Float.parseFloat(clusterDetails.first()); + Float memoryOvercommitRatio = Float.parseFloat(clusterDetails.second()); - boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); boolean hostHasCapacity = checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, considerReservedCapacity); @@ -1241,6 +1276,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CpuOverprovisioningFactor, MemOverprovisioningFactor, StorageCapacityDisableThreshold, StorageOverprovisioningFactor, StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, ImageStoreNFSVersion, SecondaryStorageCapacityThreshold, - StorageAllocatedCapacityDisableThresholdForVolumeSize }; + StorageAllocatedCapacityDisableThresholdForVolumeSize, CapacityCalculateWorkers }; } } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index d7e2160ef35..8eb5c5203ec 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -16,6 +16,11 @@ // under the License. package com.cloud.configuration; +import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; +import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; +import static com.cloud.offering.NetworkOffering.RoutingMode.Static; +import static org.apache.cloudstack.framework.config.ConfigKey.CATEGORY_SYSTEM; + import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; @@ -308,11 +313,6 @@ import com.google.common.collect.Sets; import com.googlecode.ipv6.IPv6Address; import com.googlecode.ipv6.IPv6Network; -import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; -import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; -import static com.cloud.offering.NetworkOffering.RoutingMode.Static; -import static org.apache.cloudstack.framework.config.ConfigKey.CATEGORY_SYSTEM; - public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { public static final String PERACCOUNT = "peraccount"; public static final String PERZONE = "perzone"; @@ -2521,7 +2521,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Check if there are any non-removed hosts in the zone. - if (!_hostDao.listByDataCenterId(zoneId).isEmpty()) { + if (!_hostDao.listEnabledIdsByDataCenterId(zoneId).isEmpty()) { throw new CloudRuntimeException(errorMsg + "there are servers in this zone."); } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 3db02f91775..6b0bbde856e 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -869,11 +869,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy } public boolean isZoneReady(Map zoneHostInfoMap, DataCenter dataCenter) { - List hosts = hostDao.listByDataCenterId(dataCenter.getId()); - if (CollectionUtils.isEmpty(hosts)) { - if (logger.isDebugEnabled()) { - logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenter); - } + Integer totalUpAndEnabledHosts = hostDao.countUpAndEnabledHostsInZone(dataCenter.getId()); + if (totalUpAndEnabledHosts != null && totalUpAndEnabledHosts < 1) { + logger.debug("{} has no host available which is enabled and in Up state", dataCenter); return false; } ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenter.getId()); @@ -894,8 +892,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy if (templateHostRef != null) { Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenter.getId())); - List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenter.getId(), useLocalStorage); - if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { + boolean hasDatacenterStoragePoolHostInfo = consoleProxyDao.hasDatacenterStoragePoolHostInfo(dataCenter.getId(), !useLocalStorage); + if (hasDatacenterStoragePoolHostInfo) { return true; } else { if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index a3c889cd070..e7b926eb4e4 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -36,22 +36,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.cpu.CPU; -import com.cloud.vm.UserVmManager; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.user.AccountVO; -import com.cloud.user.dao.AccountDao; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.utils.db.Filter; -import com.cloud.utils.fsm.StateMachine2; - -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; @@ -64,6 +49,8 @@ import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; @@ -71,6 +58,9 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -85,6 +75,7 @@ import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManagerImpl; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -102,6 +93,7 @@ import com.cloud.deploy.dao.PlannerHostReservationDao; import com.cloud.exception.AffinityConflictException; import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -122,15 +114,19 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; @@ -138,13 +134,16 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Event; @@ -295,8 +294,9 @@ StateListener, Configurable { return; } final Long lastHostClusterId = lastHost.getClusterId(); - logger.warn("VM last host ID: {} belongs to zone ID: {} for which config - {} is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: {} from this zone to avoid list", lastHost, vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId); - List clusterIds = _clusterDao.listAllClusters(lastHost.getDataCenterId()); + logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", + lastHost.getId(), vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId)); + List clusterIds = _clusterDao.listAllClusterIds(lastHost.getDataCenterId()); Set existingAvoidedClusters = avoids.getClustersToAvoid(); clusterIds = clusterIds.stream().filter(x -> !Objects.equals(x, lastHostClusterId) && (existingAvoidedClusters == null || !existingAvoidedClusters.contains(x))).collect(Collectors.toList()); avoids.addClusterList(clusterIds); @@ -492,7 +492,7 @@ StateListener, Configurable { float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); if (hostHasCpuCapability) { // first check from reserved capacity @@ -736,12 +736,10 @@ StateListener, Configurable { * Adds disabled Hosts to the ExcludeList in order to avoid them at the deployment planner. */ protected void avoidDisabledHosts(DataCenter dc, ExcludeList avoids) { - List disabledHosts = _hostDao.listDisabledByDataCenterId(dc.getId()); - logger.debug("Adding hosts [{}] of datacenter [{}] to the avoid set, because these hosts are in the Disabled state.", - disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc); - for (HostVO host : disabledHosts) { - avoids.addHost(host.getId()); - } + List disabledHostIds = _hostDao.listDisabledIdsByDataCenterId(dc.getId()); + logger.debug("Adding hosts {} of datacenter [{}] to the avoid set, because these hosts are in the Disabled state.", + StringUtils.join(disabledHostIds), dc.getUuid()); + disabledHostIds.forEach(avoids::addHost); } /** @@ -860,7 +858,7 @@ StateListener, Configurable { List allDedicatedPods = _dedicatedDao.listAllPods(); allPodsInDc.retainAll(allDedicatedPods); - List allClustersInDc = _clusterDao.listAllClusters(dc.getId()); + List allClustersInDc = _clusterDao.listAllClusterIds(dc.getId()); List allDedicatedClusters = _dedicatedDao.listAllClusters(); allClustersInDc.retainAll(allDedicatedClusters); @@ -1147,9 +1145,11 @@ StateListener, Configurable { private void checkHostReservations() { List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); - - for (PlannerHostReservationVO hostReservation : reservedHosts) { - HostVO host = _hostDao.findById(hostReservation.getHostId()); + List hosts = _hostDao.listByIds(reservedHosts + .stream() + .map(PlannerHostReservationVO::getHostId) + .collect(Collectors.toList())); + for (HostVO host : hosts) { if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { checkHostReservationRelease(host); } @@ -1338,7 +1338,7 @@ StateListener, Configurable { Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired, readyAndReusedVolumes, plan.getPreferredHosts(), vmProfile.getVirtualMachine()); if (potentialResources != null) { - Host host = _hostDao.findById(potentialResources.first().getId()); + Host host = potentialResources.first(); Map storageVolMap = potentialResources.second(); // remove the reused vol<->pool from destination, since // we don't have to prepare this volume. diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index ffd482b711d..a83db42f598 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -16,6 +16,29 @@ // under the License. package com.cloud.hypervisor.kvm.discoverer; +import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; + +import java.net.InetAddress; +import java.net.URI; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.agent.lb.IndirectAgentLB; +import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.ca.SetupCertificateCommand; +import org.apache.cloudstack.direct.download.DirectDownloadManager; +import org.apache.cloudstack.framework.ca.Certificate; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.cloudstack.utils.security.KeyStoreUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -32,6 +55,7 @@ import com.cloud.exception.DiscoveredWithErrorException; import com.cloud.exception.DiscoveryException; import com.cloud.exception.OperationTimedoutException; import com.cloud.host.Host; +import com.cloud.host.HostInfo; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; @@ -48,26 +72,7 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SSHCmdHelper; import com.trilead.ssh2.Connection; -import org.apache.cloudstack.agent.lb.IndirectAgentLB; -import org.apache.cloudstack.ca.CAManager; -import org.apache.cloudstack.ca.SetupCertificateCommand; -import org.apache.cloudstack.direct.download.DirectDownloadManager; -import org.apache.cloudstack.framework.ca.Certificate; -import org.apache.cloudstack.utils.security.KeyStoreUtils; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import java.net.InetAddress; -import java.net.URI; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { private final int _waitTime = 5; /* wait for 5 minutes */ @@ -89,6 +94,16 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements @Inject private HostDao hostDao; + private LazyCache clusterExistingHostCache; + + private HostVO getExistingHostForCluster(long clusterId) { + HostVO existingHostInCluster = _hostDao.findAnyStateHypervisorHostInCluster(clusterId); + if (existingHostInCluster != null) { + _hostDao.loadDetails(existingHostInCluster); + } + return existingHostInCluster; + } + @Override public abstract Hypervisor.HypervisorType getHypervisorType(); @@ -425,6 +440,9 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements _kvmGuestNic = _kvmPrivateNic; } + clusterExistingHostCache = new LazyCache<>(32, 30, + this::getExistingHostForCluster); + agentMgr.registerForHostEvents(this, true, false, false); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -467,12 +485,10 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements throw new IllegalArgumentException("cannot add host, due to can't find cluster: " + host.getClusterId()); } - List hostsInCluster = _resourceMgr.listAllHostsInCluster(clusterVO.getId()); - if (!hostsInCluster.isEmpty()) { - HostVO oneHost = hostsInCluster.get(0); - _hostDao.loadDetails(oneHost); - String hostOsInCluster = oneHost.getDetail("Host.OS"); - String hostOs = ssCmd.getHostDetails().get("Host.OS"); + HostVO existingHostInCluster = clusterExistingHostCache.get(clusterVO.getId()); + if (existingHostInCluster != null) { + String hostOsInCluster = existingHostInCluster.getDetail(HostInfo.HOST_OS); + String hostOs = ssCmd.getHostDetails().get(HostInfo.HOST_OS); if (!isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)) { String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster); if (hostOs != null && hostOs.startsWith(hostOsInCluster)) { diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index c6628e457de..2b08bd25eba 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -40,16 +40,6 @@ import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.bgp.BGPService; -import com.cloud.dc.VlanDetailsVO; -import com.cloud.dc.dao.VlanDetailsDao; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.dao.PublicIpQuarantineDao; -import com.cloud.network.dao.VirtualRouterProviderDao; -import com.cloud.network.element.NsxProviderVO; -import com.cloud.network.element.VirtualRouterProviderVO; -import com.cloud.offering.ServiceOffering; -import com.cloud.service.dao.ServiceOfferingDao; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.alert.AlertService; @@ -104,6 +94,7 @@ import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.DomainRouterJoinDao; import com.cloud.api.query.vo.DomainRouterJoinVO; +import com.cloud.bgp.BGPService; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource; @@ -114,12 +105,14 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.DataCenterVnetVO; import com.cloud.dc.DomainVlanMapVO; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanDetailsVO; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterVnetDao; import com.cloud.dc.dao.DomainVlanMapDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.deploy.DeployDestination; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -165,6 +158,7 @@ import com.cloud.network.dao.NetworkDomainDao; import com.cloud.network.dao.NetworkDomainVO; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.OvsProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; @@ -172,9 +166,13 @@ import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.network.dao.PublicIpQuarantineDao; +import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.element.NetworkElement; +import com.cloud.network.element.NsxProviderVO; import com.cloud.network.element.OvsProviderVO; import com.cloud.network.element.VirtualRouterElement; +import com.cloud.network.element.VirtualRouterProviderVO; import com.cloud.network.element.VpcVirtualRouterElement; import com.cloud.network.guru.GuestNetworkGuru; import com.cloud.network.guru.NetworkGuru; @@ -198,6 +196,7 @@ import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.offering.NetworkOffering; +import com.cloud.offering.ServiceOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; @@ -207,6 +206,7 @@ import com.cloud.projects.ProjectManager; import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 3d3a28d1404..bda84f09fe6 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -2715,7 +2715,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage return vmStatsById; } try { - vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host.getId(), host.getName(), vmIds); + vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host, vmIds); if (MapUtils.isEmpty(vmStatsById)) { logger.warn("Got empty result for virtual machine statistics from host: " + host); } diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index 067f2fbdbb2..c79d466bb1b 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -22,8 +22,8 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 1349e03f205..c9ba51ce5a6 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; @@ -547,8 +548,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, details.put("ovm3pool", allParams.get("ovm3pool")); details.put("ovm3cluster", allParams.get("ovm3cluster")); } - details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString()); - details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, CapacityManager.CpuOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, CapacityManager.MemOverprovisioningFactor.value().toString()); _clusterDetailsDao.persist(cluster.getId(), details); return result; } @@ -558,8 +559,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, details.put("url", url); details.put("username", StringUtils.defaultString(username)); details.put("password", StringUtils.defaultString(password)); - details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString()); - details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, CapacityManager.CpuOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, CapacityManager.MemOverprovisioningFactor.value().toString()); _clusterDetailsDao.persist(cluster.getId(), details); boolean success = false; @@ -643,8 +644,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw ex; } else { if (cluster.getGuid() == null) { - final List hosts = listAllHostsInCluster(clusterId); - if (!hosts.isEmpty()) { + final List hostIds = _hostDao.listIdsByClusterId(clusterId); + if (!hostIds.isEmpty()) { final CloudRuntimeException ex = new CloudRuntimeException("Guid is not updated for cluster with specified cluster id; need to wait for hosts in this cluster to come up"); ex.addProxyObject(cluster.getUuid(), "clusterId"); @@ -780,9 +781,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } clusterId = cluster.getId(); - if (_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio") == null) { - final ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", "1"); - final ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", "1"); + if (_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO) == null) { + final ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO, "1"); + final ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, "1"); _clusterDetailsDao.persist(cluster_cpu_detail); _clusterDetailsDao.persist(cluster_memory_detail); } @@ -964,8 +965,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, Host hostRemoved = _hostDao.findById(hostId); _hostDao.remove(hostId); if (clusterId != null) { - final List hosts = listAllHostsInCluster(clusterId); - if (hosts.size() == 0) { + final List hostIds = _hostDao.listIdsByClusterId(clusterId); + if (CollectionUtils.isEmpty(hostIds)) { final ClusterVO cluster = _clusterDao.findById(clusterId); cluster.setGuid(null); _clusterDao.update(clusterId, cluster); @@ -1089,21 +1090,17 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final Hypervisor.HypervisorType hypervisorType = cluster.getHypervisorType(); - final List hosts = listAllHostsInCluster(cmd.getId()); - if (hosts.size() > 0) { - if (logger.isDebugEnabled()) { - logger.debug("Cluster: {} still has hosts, can't remove", cluster); - } - throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has hosts", cluster)); + final List hostIds = _hostDao.listIdsByClusterId(cmd.getId()); + if (!hostIds.isEmpty()) { + logger.debug("{} still has hosts, can't remove", cluster); + throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); } // don't allow to remove the cluster if it has non-removed storage // pools final List storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId()); if (storagePools.size() > 0) { - if (logger.isDebugEnabled()) { - logger.debug("Cluster: {} still has storage pools, can't remove", cluster); - } + logger.debug("{} still has storage pools, can't remove", cluster); throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has storage pools", cluster)); } @@ -2437,10 +2434,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, boolean clusterSupportsResigning = true; - List hostVOs = _hostDao.findByClusterId(host.getClusterId()); + List hostIds = _hostDao.listIdsByClusterId(host.getClusterId()); - for (HostVO hostVO : hostVOs) { - DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostVO.getId(), name); + for (Long hostId : hostIds) { + DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostId, name); if (hostDetailVO == null || Boolean.parseBoolean(hostDetailVO.getValue()) == false) { clusterSupportsResigning = false; @@ -3054,10 +3051,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, public boolean updateClusterPassword(final UpdateHostPasswordCmd command) { final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost(); // get agents for the cluster - final List hosts = listAllHostsInCluster(command.getClusterId()); - for (final HostVO host : hosts) { + final List hostIds = _hostDao.listIdsByClusterId(command.getClusterId()); + for (final Long hostId : hostIds) { try { - final Boolean result = propagateResourceEvent(host.getId(), ResourceState.Event.UpdatePassword); + final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.UpdatePassword); if (result != null) { return result; } @@ -3066,8 +3063,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (shouldUpdateHostPasswd) { - final boolean isUpdated = doUpdateHostPassword(host.getId()); + final boolean isUpdated = doUpdateHostPassword(hostId); if (!isUpdated) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException( String.format("CloudStack failed to update the password of %s. Please make sure you are still able to connect to your hosts.", host)); } @@ -3281,26 +3279,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } @Override - public List listAvailHypervisorInZone(final Long hostId, final Long zoneId) { - final SearchCriteria sc = _hypervisorsInDC.create(); - if (zoneId != null) { - sc.setParameters("dataCenter", zoneId); + public List listAvailHypervisorInZone(final Long zoneId) { + List systemVMTemplates = _templateDao.listAllReadySystemVMTemplates(zoneId); + final Set hypervisors = new HashSet<>(); + for (final VMTemplateVO systemVMTemplate : systemVMTemplates) { + hypervisors.add(systemVMTemplate.getHypervisorType()); } - if (hostId != null) { - // exclude the given host, since we want to check what hypervisor is already handled - // in adding this new host - sc.setParameters("id", hostId); - } - sc.setParameters("type", Host.Type.Routing); - - // The search is not able to return list of enums, so getting - // list of hypervisors as strings and then converting them to enum - final List hvs = _hostDao.customSearch(sc, null); - final List hypervisors = new ArrayList(); - for (final String hv : hvs) { - hypervisors.add(HypervisorType.getType(hv)); - } - return hypervisors; + return new ArrayList<>(hypervisors); } @Override @@ -3318,17 +3303,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } @Override - public HostStats getHostStatistics(final long hostId) { - HostVO host = _hostDao.findById(hostId); - final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(host.getGuid(), host.getName(), hostId)); + public HostStats getHostStatistics(final Host host) { + final Answer answer = _agentMgr.easySend(host.getId(), new GetHostStatsCommand(host.getGuid(), host.getName(), host.getId())); if (answer != null && answer instanceof UnsupportedAnswer) { return null; } if (answer == null || !answer.getResult()) { - final String msg = String.format("Unable to obtain host %s statistics. ", host); - logger.warn(msg); + logger.warn("Unable to obtain {} statistics.", host); return null; } else { diff --git a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java index 72c28953021..0838e835993 100644 --- a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java @@ -652,7 +652,7 @@ public class RollingMaintenanceManagerImpl extends ManagerBase implements Rollin continue; } boolean maxGuestLimit = capacityManager.checkIfHostReachMaxGuestLimit(host); - boolean hostHasCPUCapacity = capacityManager.checkIfHostHasCpuCapability(hostInCluster.getId(), cpu, speed); + boolean hostHasCPUCapacity = capacityManager.checkIfHostHasCpuCapability(hostInCluster, cpu, speed); int cpuRequested = cpu * speed; long ramRequested = ramSize * 1024L * 1024L; ClusterDetailsVO clusterDetailsCpuOvercommit = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index f37b661c22f..b8c6e29c278 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -93,7 +93,6 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectAccount.Role; import com.cloud.projects.dao.ProjectAccountDao; import com.cloud.projects.dao.ProjectDao; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; @@ -105,7 +104,6 @@ import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.dao.VolumeDaoImpl.SumCount; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -118,6 +116,7 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase.SumCount; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; @@ -1290,16 +1289,14 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim if (StringUtils.isEmpty(tag)) { return _userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(accountId, states, null, null); } - List offerings = serviceOfferingDao.listByHostTag(tag); - List templates = _vmTemplateDao.listByTemplateTag(tag); + List offerings = serviceOfferingDao.listIdsByHostTag(tag); + List templates = _vmTemplateDao.listIdsByTemplateTag(tag); if (CollectionUtils.isEmpty(offerings) && CollectionUtils.isEmpty(templates)) { return new ArrayList<>(); } return _userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(accountId, states, - offerings.stream().map(ServiceOfferingVO::getId).collect(Collectors.toList()), - templates.stream().map(VMTemplateVO::getId).collect(Collectors.toList()) - ); + offerings, templates); } protected List getVmsWithAccount(long accountId) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 76d2943e18c..f54fd96bfdf 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -44,7 +44,6 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.utils.security.CertificateHelper; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroupProcessor; @@ -292,6 +291,7 @@ import org.apache.cloudstack.api.command.admin.vm.DeployVMCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.DestroyVMCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.GetVMUserDataCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.admin.vm.ListVMsCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.vm.MigrateVirtualMachineWithVolumeCmd; @@ -527,7 +527,6 @@ import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; -import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.api.command.user.vm.ListVMsCmd; @@ -820,6 +819,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.net.MacAddress; import com.cloud.utils.net.NetUtils; +import com.cloud.utils.security.CertificateHelper; import com.cloud.utils.ssh.SSHKeysHelper; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DiskProfile; @@ -5037,7 +5037,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { // get all the hosts in this cluster - final List hosts = _resourceMgr.listAllHostsInCluster(command.getClusterId()); + final List hostIds = _hostDao.listIdsByClusterId(command.getClusterId()); String userNameWithoutSpaces = StringUtils.deleteWhitespace(command.getUsername()); if (StringUtils.isBlank(userNameWithoutSpaces)) { @@ -5047,19 +5047,17 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - for (final HostVO host : hosts) { - if (logger.isDebugEnabled()) { - logger.debug("Changing password for host {}", host); - } + for (final Long hostId : hostIds) { + logger.debug("Changing password for {}", () -> _hostDao.findById(hostId)); // update password for this host - final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); + final DetailVO nv = _detailsDao.findDetail(hostId, ApiConstants.USERNAME); if (nv == null) { - final DetailVO nvu = new DetailVO(host.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); + final DetailVO nvu = new DetailVO(hostId, ApiConstants.USERNAME, userNameWithoutSpaces); _detailsDao.persist(nvu); - final DetailVO nvp = new DetailVO(host.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); + final DetailVO nvp = new DetailVO(hostId, ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else if (nv.getValue().equals(userNameWithoutSpaces)) { - final DetailVO nvp = _detailsDao.findDetail(host.getId(), ApiConstants.PASSWORD); + final DetailVO nvp = _detailsDao.findDetail(hostId, ApiConstants.PASSWORD); nvp.setValue(DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else { diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 2bdc008ca1a..5934716da66 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -132,6 +133,7 @@ import com.cloud.user.dao.UserStatisticsDao; import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentMethodInterceptable; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -148,7 +150,6 @@ import com.cloud.utils.net.MacAddress; import com.cloud.utils.script.Script; import com.cloud.vm.NicVO; import com.cloud.vm.UserVmManager; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -626,17 +627,21 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc externalStatsPrefix, externalStatsHost, externalStatsPort)); } - protected Map getVmMapForStatsForHost(Host host) { + protected Pair, Map> getVmMapForStatsForHost(Host host) { List vms = _vmInstance.listByHostAndState(host.getId(), VirtualMachine.State.Running); boolean collectUserVMStatsOnly = Boolean.TRUE.equals(vmStatsCollectUserVMOnly.value()); - Map vmMap = new HashMap<>(); - for (VMInstanceVO vm : vms) { - if (collectUserVMStatsOnly && !VirtualMachine.Type.User.equals(vm.getType())) { - continue; - } - vmMap.put(vm.getId(), vm); + if (collectUserVMStatsOnly) { + vms = vms.stream().filter(vm -> VirtualMachine.Type.User.equals(vm.getType())).collect(Collectors.toList()); } - return vmMap; + Map idInstanceMap = new HashMap<>(); + Map instanceNameIdMap = new HashMap<>(); + vms.forEach(vm -> { + if (!collectUserVMStatsOnly || VirtualMachine.Type.User.equals(vm.getType())) { + idInstanceMap.put(vm.getId(), vm); + instanceNameIdMap.put(vm.getInstanceName(), vm.getId()); + } + }); + return new Pair<>(idInstanceMap, instanceNameIdMap); } class HostCollector extends AbstractStatsCollector { @@ -650,7 +655,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc Map metrics = new HashMap<>(); for (HostVO host : hosts) { - HostStatsEntry hostStatsEntry = (HostStatsEntry) _resourceMgr.getHostStatistics(host.getId()); + HostStatsEntry hostStatsEntry = (HostStatsEntry) _resourceMgr.getHostStatistics(host); if (hostStatsEntry != null) { hostStatsEntry.setHostVo(host); metrics.put(hostStatsEntry.getHostId(), hostStatsEntry); @@ -1219,40 +1224,41 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc Map metrics = new HashMap<>(); for (HostVO host : hosts) { Date timestamp = new Date(); - Map vmMap = getVmMapForStatsForHost(host); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); try { - Map vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host.getId(), host.getName(), vmMap); - - if (vmStatsById != null) { - Set vmIdSet = vmStatsById.keySet(); - for (Long vmId : vmIdSet) { - VmStatsEntry statsForCurrentIteration = (VmStatsEntry)vmStatsById.get(vmId); - statsForCurrentIteration.setVmId(vmId); - VMInstanceVO vm = vmMap.get(vmId); - statsForCurrentIteration.setVmUuid(vm.getUuid()); - - persistVirtualMachineStats(statsForCurrentIteration, timestamp); - - if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { - prepareVmMetricsForGraphite(metrics, statsForCurrentIteration); - } else { - metrics.put(statsForCurrentIteration.getVmId(), statsForCurrentIteration); - } - } - - if (!metrics.isEmpty()) { - if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { - sendVmMetricsToGraphiteHost(metrics, host); - } else if (externalStatsType == ExternalStatsProtocol.INFLUXDB) { - sendMetricsToInfluxdb(metrics); - } - } - - metrics.clear(); + Map vmStatsById = virtualMachineManager.getVirtualMachineStatistics( + host, vmsAndMap.second()); + if (MapUtils.isEmpty(vmStatsById)) { + continue; } + Set vmIdSet = vmStatsById.keySet(); + for (Long vmId : vmIdSet) { + VmStatsEntry statsForCurrentIteration = (VmStatsEntry)vmStatsById.get(vmId); + statsForCurrentIteration.setVmId(vmId); + VMInstanceVO vm = vmMap.get(vmId); + statsForCurrentIteration.setVmUuid(vm.getUuid()); + + persistVirtualMachineStats(statsForCurrentIteration, timestamp); + + if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { + prepareVmMetricsForGraphite(metrics, statsForCurrentIteration); + } else { + metrics.put(statsForCurrentIteration.getVmId(), statsForCurrentIteration); + } + } + + if (!metrics.isEmpty()) { + if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { + sendVmMetricsToGraphiteHost(metrics, host); + } else if (externalStatsType == ExternalStatsProtocol.INFLUXDB) { + sendMetricsToInfluxdb(metrics); + } + } + + metrics.clear(); } catch (Exception e) { - logger.debug("Failed to get VM stats for host: {}", host); - continue; + logger.debug("Failed to get VM stats for : {}", host); } } @@ -1437,8 +1443,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - Map vmMap = getVmMapForStatsForHost(host); - HashMap> vmDiskStatsById = virtualMachineManager.getVmDiskStatistics(host.getId(), host.getName(), vmMap); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); + HashMap> vmDiskStatsById = + virtualMachineManager.getVmDiskStatistics(host, vmsAndMap.second()); if (vmDiskStatsById == null) return; @@ -1544,8 +1552,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - Map vmMap = getVmMapForStatsForHost(host); - HashMap> vmNetworkStatsById = virtualMachineManager.getVmNetworkStatistics(host.getId(), host.getName(), vmMap); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); + HashMap> vmNetworkStatsById = + virtualMachineManager.getVmNetworkStatistics(host, vmsAndMap.second()); if (vmNetworkStatsById == null) return; @@ -1554,9 +1564,9 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc List vmNetworkStats = vmNetworkStatsById.get(vmId); if (CollectionUtils.isEmpty(vmNetworkStats)) continue; - UserVmVO userVm = _userVmDao.findById(vmId); - if (userVm == null) { - logger.debug("Cannot find uservm with id: " + vmId + " , continue"); + VMInstanceVO userVm = vmMap.get(vmId); + if (!VirtualMachine.Type.User.equals(userVm.getType())) { + logger.debug("Cannot find uservm with id: {} , continue", vmId); continue; } logger.debug("Now we are updating the user_statistics table for VM: {} after collecting vm network statistics from host: {}", userVm, host); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 8e99e3429ba..03f24dde27d 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -42,10 +42,14 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import javax.inject.Inject; @@ -86,6 +90,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; @@ -1557,6 +1562,82 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return sb.toString(); } + protected void cleanupConnectedHostConnectionForFailedStorage(DataStore primaryStore, List poolHostIds) { + List hosts = _hostDao.listByIds(poolHostIds); + StoragePool pool = _storagePoolDao.findById(primaryStore.getId()); + for (HostVO host : hosts) { + try { + disconnectHostFromSharedPool(host, pool); + } catch (StorageUnavailableException | StorageConflictException e) { + logger.error("Error during cleaning up failed storage host connection", e); + } + } + } + + @Override + public void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleExceptionsPartially, boolean errorOnNoUpHost) throws CloudRuntimeException { + if (CollectionUtils.isEmpty(hostIds)) { + return; + } + CopyOnWriteArrayList poolHostIds = new CopyOnWriteArrayList<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, Math.min(hostIds.size(), + StoragePoolHostConnectWorkers.value()))); + List> futures = new ArrayList<>(); + AtomicBoolean exceptionOccurred = new AtomicBoolean(false); + for (Long hostId : hostIds) { + futures.add(executorService.submit(() -> { + if (exceptionOccurred.get()) { + return null; + } + HostVO host = _hostDao.findById(hostId); + try { + connectHostToSharedPool(host, primaryStore.getId()); + poolHostIds.add(hostId); + } catch (Exception e) { + if (handleExceptionsPartially && e.getCause() instanceof StorageConflictException) { + exceptionOccurred.set(true); + throw e; + } + logger.warn("Unable to establish a connection between {} and {}", host, primaryStore, e); + String reason = getStoragePoolMountFailureReason(e.getMessage()); + if (handleExceptionsPartially && reason != null) { + exceptionOccurred.set(true); + throw new CloudRuntimeException(reason); + } + } + return null; + })); + } + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + Throwable cause = e.getCause(); + if (cause instanceof StorageConflictException || cause instanceof CloudRuntimeException) { + executorService.shutdown(); + cleanupConnectedHostConnectionForFailedStorage(primaryStore, poolHostIds); + primaryStoreDao.expunge(primaryStore.getId()); + if (cause instanceof CloudRuntimeException) { + throw (CloudRuntimeException)cause; + } + throw new CloudRuntimeException("Storage has already been added as local storage", e); + } else { + logger.error("Error occurred while connecting host to shared pool", e); + } + } + } + executorService.shutdown(); + if (poolHostIds.isEmpty()) { + logger.warn("No host can access storage pool {} in {}: {}.", + primaryStore, scope.getScopeType(), scope.getScopeId()); + if (errorOnNoUpHost) { + primaryStoreDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("Failed to access storage pool"); + } + } + } + @Override public boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); @@ -4086,7 +4167,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C VmwareCreateCloneFull, VmwareAllowParallelExecution, DataStoreDownloadFollowRedirects, - AllowVolumeReSizeBeyondAllocation + AllowVolumeReSizeBeyondAllocation, + StoragePoolHostConnectWorkers }; } diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index 488e77ede29..e06a31d96b5 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.download; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -24,10 +25,6 @@ import java.util.Timer; import javax.inject.Inject; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -41,6 +38,10 @@ import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -54,7 +55,7 @@ import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.exception.ConnectionException; import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.download.DownloadState.DownloadEvent; @@ -136,6 +137,20 @@ public class DownloadListener implements Listener { @Inject private VolumeService _volumeSrv; + private LazyCache> zoneHypervisorsCache; + + private List listAvailHypervisorInZone(long zoneId) { + if (_resourceMgr == null) { + return Collections.emptyList(); + } + return _resourceMgr.listAvailHypervisorInZone(zoneId); + } + + protected void initZoneHypervisorsCache() { + zoneHypervisorsCache = + new LazyCache<>(32, 30, this::listAvailHypervisorInZone); + } + // TODO: this constructor should be the one used for template only, remove other template constructor later public DownloadListener(EndPoint ssAgent, DataStore store, DataObject object, Timer timer, DownloadMonitorImpl downloadMonitor, DownloadCommand cmd, AsyncCompletionCallback callback) { @@ -151,6 +166,12 @@ public class DownloadListener implements Listener { _callback = callback; DownloadAnswer answer = new DownloadAnswer("", Status.NOT_DOWNLOADED); callback(answer); + initZoneHypervisorsCache(); + } + + public DownloadListener(DownloadMonitorImpl monitor) { + _downloadMonitor = monitor; + initZoneHypervisorsCache(); } public AsyncCompletionCallback getCallback() { @@ -212,10 +233,6 @@ public class DownloadListener implements Listener { message, object.getType(), object.getUuid(), object, _ssAgent.getId(), _ssAgent.getUuid()); } - public DownloadListener(DownloadMonitorImpl monitor) { - _downloadMonitor = monitor; - } - @Override public boolean isRecurring() { return false; @@ -280,14 +297,15 @@ public class DownloadListener implements Listener { @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) throws ConnectionException { if (cmd instanceof StartupRoutingCommand) { - List hypers = _resourceMgr.listAvailHypervisorInZone(agent.getId(), agent.getDataCenterId()); - HypervisorType hostHyper = agent.getHypervisorType(); - if (hypers.contains(hostHyper)) { + List hypervisors = zoneHypervisorsCache.get(agent.getDataCenterId()); + Hypervisor.HypervisorType hostHyper = agent.getHypervisorType(); + if (hypervisors.contains(hostHyper)) { return; } _imageSrv.handleSysTemplateDownload(hostHyper, agent.getDataCenterId()); // update template_zone_ref for cross-zone templates _imageSrv.associateCrosszoneTemplatesToZone(agent.getDataCenterId()); + } /* This can be removed else if ( cmd instanceof StartupStorageCommand) { diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index bdd2ce94f3e..f497c881015 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2098,7 +2098,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // #1 Check existing host has capacity & and the correct tags if (!excludes.shouldAvoid(ApiDBUtils.findHostById(vmInstance.getHostId()))) { - existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(vmInstance.getHostId(), newCpu, newSpeed) + existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(host, newCpu, newSpeed) && _capacityMgr.checkIfHostHasCapacity(host, cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_CPU), _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false) diff --git a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java index 87b119542c5..d1ae1b44a51 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java @@ -594,7 +594,7 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {RoleService.EnableDynamicApiChecker}; + return new ConfigKey[] {EnableDynamicApiChecker, DynamicApiCheckerCachePeriod}; } @Override diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index 97e503974cf..84c3081bfc1 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -18,9 +18,7 @@ package org.apache.cloudstack.agent.lb; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,17 +32,19 @@ import org.apache.cloudstack.agent.lb.algorithm.IndirectAgentLBStaticAlgorithm; import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import org.apache.commons.lang3.StringUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.host.Host; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceState; import com.cloud.utils.component.ComponentLifecycleBase; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.lang3.StringUtils; public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implements IndirectAgentLB, Configurable { @@ -60,11 +60,23 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement private static Map algorithmMap = new HashMap<>(); + @Inject + private DataCenterDao dataCenterDao; + @Inject + private ClusterDao clusterDao; @Inject private HostDao hostDao; @Inject private AgentManager agentManager; + private static final List agentValidResourceStates = List.of( + ResourceState.Enabled, ResourceState.Maintenance, ResourceState.Disabled, + ResourceState.ErrorInMaintenance, ResourceState.PrepareForMaintenance); + private static final List agentValidHostTypes = List.of(Host.Type.Routing, Host.Type.ConsoleProxy, + Host.Type.SecondaryStorage, Host.Type.SecondaryStorageVM); + private static final List agentValidHypervisorTypes = List.of( + Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.LXC); + ////////////////////////////////////////////////////// /////////////// Agent MSLB Methods /////////////////// ////////////////////////////////////////////////////// @@ -76,22 +88,22 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement throw new CloudRuntimeException(String.format("No management server addresses are defined in '%s' setting", ApiServiceConfiguration.ManagementServerAddresses.key())); } + final List msList = Arrays.asList(msServerAddresses.replace(" ", "").split(",")); + if (msList.size() == 1) { + return msList; + } + final org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm algorithm = getAgentMSLBAlgorithm(); List hostIdList = orderedHostIdList; if (hostIdList == null) { - hostIdList = getOrderedHostIdList(dcId); + hostIdList = algorithm.isHostListNeeded() ? getOrderedHostIdList(dcId) : new ArrayList<>(); } // just in case we have a host in creating state make sure it is in the list: if (null != hostId && ! hostIdList.contains(hostId)) { - if (logger.isTraceEnabled()) { - logger.trace("adding requested host to host list as it does not seem to be there; " + hostId); - } + logger.trace("adding requested host to host list as it does not seem to be there; {}", hostId); hostIdList.add(hostId); } - - final org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm algorithm = getAgentMSLBAlgorithm(); - final List msList = Arrays.asList(msServerAddresses.replace(" ", "").split(",")); return algorithm.sort(msList, hostIdList, hostId); } @@ -119,76 +131,14 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement } List getOrderedHostIdList(final Long dcId) { - final List hostIdList = new ArrayList<>(); - for (final Host host : getAllAgentBasedHosts()) { - if (host.getDataCenterId() == dcId) { - hostIdList.add(host.getId()); - } - } - Collections.sort(hostIdList, new Comparator() { - @Override - public int compare(Long x, Long y) { - return Long.compare(x,y); - } - }); + final List hostIdList = getAllAgentBasedHostsFromDB(dcId, null); + hostIdList.sort(Comparator.comparingLong(x -> x)); return hostIdList; } - private List getAllAgentBasedHosts() { - final List allHosts = hostDao.listAll(); - if (allHosts == null) { - return new ArrayList<>(); - } - final List agentBasedHosts = new ArrayList<>(); - for (final Host host : allHosts) { - conditionallyAddHost(agentBasedHosts, host); - } - return agentBasedHosts; - } - - private void conditionallyAddHost(List agentBasedHosts, Host host) { - if (host == null) { - if (logger.isTraceEnabled()) { - logger.trace("trying to add no host to a list"); - } - return; - } - - EnumSet allowedStates = EnumSet.of( - ResourceState.Enabled, - ResourceState.Maintenance, - ResourceState.Disabled, - ResourceState.ErrorInMaintenance, - ResourceState.PrepareForMaintenance); - // so the remaining EnumSet disallowedStates = EnumSet.complementOf(allowedStates) - // would be {ResourceState.Creating, ResourceState.Error}; - if (!allowedStates.contains(host.getResourceState())) { - if (logger.isTraceEnabled()) { - logger.trace("host ({}) is in '{}' state, not adding to the host list", host, host.getResourceState()); - } - return; - } - - if (host.getType() != Host.Type.Routing - && host.getType() != Host.Type.ConsoleProxy - && host.getType() != Host.Type.SecondaryStorage - && host.getType() != Host.Type.SecondaryStorageVM) { - if (logger.isTraceEnabled()) { - logger.trace(String.format("host (%s) is of wrong type, not adding to the host list, type = %s", host, host.getType())); - } - return; - } - - if (host.getHypervisorType() != null - && ! (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.LXC)) { - - if (logger.isTraceEnabled()) { - logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (host: %s, hypervisortype: %s)", host, host.getHypervisorType())); - } - return; - } - - agentBasedHosts.add(host); + private List getAllAgentBasedHostsFromDB(final Long zoneId, final Long clusterId) { + return hostDao.findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(zoneId, clusterId, + agentValidResourceStates, agentValidHostTypes, agentValidHypervisorTypes); } private org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm getAgentMSLBAlgorithm() { @@ -208,18 +158,28 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement public void propagateMSListToAgents() { logger.debug("Propagating management server list update to agents"); final String lbAlgorithm = getLBAlgorithmName(); - final Map> dcOrderedHostsMap = new HashMap<>(); - for (final Host host : getAllAgentBasedHosts()) { - final Long dcId = host.getDataCenterId(); - if (!dcOrderedHostsMap.containsKey(dcId)) { - dcOrderedHostsMap.put(dcId, getOrderedHostIdList(dcId)); + List zones = dataCenterDao.listAll(); + for (DataCenterVO zone : zones) { + List zoneHostIds = new ArrayList<>(); + Map> clusterHostIdsMap = new HashMap<>(); + List clusterIds = clusterDao.listAllClusterIds(zone.getId()); + for (Long clusterId : clusterIds) { + List hostIds = getAllAgentBasedHostsFromDB(zone.getId(), clusterId); + clusterHostIdsMap.put(clusterId, hostIds); + zoneHostIds.addAll(hostIds); } - final List msList = getManagementServerList(host.getId(), host.getDataCenterId(), dcOrderedHostsMap.get(dcId)); - final Long lbCheckInterval = getLBPreferredHostCheckInterval(host.getClusterId()); - final SetupMSListCommand cmd = new SetupMSListCommand(msList, lbAlgorithm, lbCheckInterval); - final Answer answer = agentManager.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - logger.warn(String.format("Failed to setup management servers list to the agent of %s", host)); + zoneHostIds.sort(Comparator.comparingLong(x -> x)); + for (Long clusterId : clusterIds) { + final Long lbCheckInterval = getLBPreferredHostCheckInterval(clusterId); + List hostIds = clusterHostIdsMap.get(clusterId); + for (Long hostId : hostIds) { + final List msList = getManagementServerList(hostId, zone.getId(), zoneHostIds); + final SetupMSListCommand cmd = new SetupMSListCommand(msList, lbAlgorithm, lbCheckInterval); + final Answer answer = agentManager.easySend(hostId, cmd); + if (answer == null || !answer.getResult()) { + logger.warn("Failed to setup management servers list to the agent of ID: {}", hostId); + } + } } } } diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java b/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java index 63fea5e74e6..49e6899aaeb 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java @@ -56,4 +56,9 @@ public class IndirectAgentLBRoundRobinAlgorithm implements IndirectAgentLBAlgori public boolean compare(final List msList, final List receivedMsList) { return msList != null && receivedMsList != null && msList.equals(receivedMsList); } + + @Override + public boolean isHostListNeeded() { + return true; + } } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index e4481dab548..d5013f71cb5 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -289,11 +289,11 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf && isOutOfBandManagementEnabledForHost(host.getId()); } - public boolean transitionPowerStateToDisabled(List hosts) { + public boolean transitionPowerStateToDisabled(List hostIds) { boolean result = true; - for (Host host : hosts) { + for (Long hostId : hostIds) { result = result && transitionPowerState(OutOfBandManagement.PowerState.Event.Disabled, - outOfBandManagementDao.findByHost(host.getId())); + outOfBandManagementDao.findByHost(hostId)); } return result; } @@ -322,7 +322,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf @ActionEvent(eventType = EventTypes.EVENT_HOST_OUTOFBAND_MANAGEMENT_DISABLE, eventDescription = "disabling out-of-band management on a zone") public OutOfBandManagementResponse disableOutOfBandManagement(final DataCenter zone) { dataCenterDetailsDao.persist(zone.getId(), OOBM_ENABLED_DETAIL, String.valueOf(false)); - transitionPowerStateToDisabled(hostDao.findByDataCenterId(zone.getId())); + transitionPowerStateToDisabled(hostDao.listIdsByDataCenterId(zone.getId())); return buildEnableDisableResponse(false); } @@ -338,7 +338,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf @ActionEvent(eventType = EventTypes.EVENT_HOST_OUTOFBAND_MANAGEMENT_DISABLE, eventDescription = "disabling out-of-band management on a cluster") public OutOfBandManagementResponse disableOutOfBandManagement(final Cluster cluster) { clusterDetailsDao.persist(cluster.getId(), OOBM_ENABLED_DETAIL, String.valueOf(false)); - transitionPowerStateToDisabled(hostDao.findByClusterId(cluster.getId())); + transitionPowerStateToDisabled(hostDao.listIdsByClusterId(cluster.getId())); return buildEnableDisableResponse(false); } @@ -358,7 +358,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf outOfBandManagementConfig.setEnabled(true); boolean updateResult = outOfBandManagementDao.update(outOfBandManagementConfig.getId(), (OutOfBandManagementVO) outOfBandManagementConfig); if (updateResult) { - transitionPowerStateToDisabled(Collections.singletonList(host)); + transitionPowerStateToDisabled(Collections.singletonList(host.getId())); } return buildEnableDisableResponse(true); } @@ -371,7 +371,7 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf outOfBandManagementConfig.setEnabled(false); boolean updateResult = outOfBandManagementDao.update(outOfBandManagementConfig.getId(), (OutOfBandManagementVO) outOfBandManagementConfig); if (updateResult) { - transitionPowerStateToDisabled(Collections.singletonList(host)); + transitionPowerStateToDisabled(Collections.singletonList(host.getId())); } return buildEnableDisableResponse(false); } @@ -582,10 +582,8 @@ public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOf if (isOutOfBandManagementEnabled(host)) { submitBackgroundPowerSyncTask(host); } else if (outOfBandManagementHost.getPowerState() != OutOfBandManagement.PowerState.Disabled) { - if (transitionPowerStateToDisabled(Collections.singletonList(host))) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Out-of-band management was disabled in zone/cluster/host, disabled power state for %s", host)); - } + if (transitionPowerStateToDisabled(Collections.singletonList(host.getId()))) { + logger.debug("Out-of-band management was disabled in zone/cluster/host, disabled power state for {}", host); } } } diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index e04c5e181e7..2959e73ae9e 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -16,13 +16,13 @@ // under the License. package com.cloud.alert; -import com.cloud.alert.dao.AlertDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.HostPodDao; +import java.io.UnsupportedEncodingException; +import java.util.List; + +import javax.mail.MessagingException; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.logging.log4j.Logger; import org.junit.Assert; @@ -34,8 +34,19 @@ import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; -import javax.mail.MessagingException; -import java.io.UnsupportedEncodingException; +import com.cloud.alert.dao.AlertDao; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.StorageManager; @RunWith(MockitoJUnitRunner.class) public class AlertManagerImplTest { @@ -59,6 +70,18 @@ public class AlertManagerImplTest { @Mock AlertVO alertVOMock; + @Mock + HostDao hostDao; + + @Mock + PrimaryDataStoreDao primaryDataStoreDao; + + @Mock + CapacityManager capacityManager; + + @Mock + StorageManager storageManager; + @Mock Logger loggerMock; @@ -119,4 +142,34 @@ public class AlertManagerImplTest { Mockito.verify(alertManagerImplMock.logger, Mockito.times(2)).warn(Mockito.anyString()); Mockito.verify(alertManagerImplMock, Mockito.never()).sendMessage(Mockito.any()); } + + @Test + public void testRecalculateHostCapacities() { + List mockHostIds = List.of(1L, 2L, 3L); + Mockito.when(hostDao.listIdsByType(Host.Type.Routing)).thenReturn(mockHostIds); + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(hostDao.findById(Mockito.anyLong())).thenReturn(host); + Mockito.doNothing().when(capacityManager).updateCapacityForHost(host); + alertManagerImplMock.recalculateHostCapacities(); + Mockito.verify(hostDao, Mockito.times(3)).findById(Mockito.anyLong()); + Mockito.verify(capacityManager, Mockito.times(3)).updateCapacityForHost(host); + } + + @Test + public void testRecalculateStorageCapacities() { + List mockPoolIds = List.of(101L, 102L, 103L); + Mockito.when(primaryDataStoreDao.listAllIds()).thenReturn(mockPoolIds); + StoragePoolVO sharedPool = Mockito.mock(StoragePoolVO.class); + Mockito.when(sharedPool.isShared()).thenReturn(true); + Mockito.when(primaryDataStoreDao.findById(mockPoolIds.get(0))).thenReturn(sharedPool); + Mockito.when(primaryDataStoreDao.findById(mockPoolIds.get(1))).thenReturn(sharedPool); + StoragePoolVO nonSharedPool = Mockito.mock(StoragePoolVO.class); + Mockito.when(nonSharedPool.isShared()).thenReturn(false); + Mockito.when(primaryDataStoreDao.findById(mockPoolIds.get(2))).thenReturn(nonSharedPool); + Mockito.when(capacityManager.getAllocatedPoolCapacity(sharedPool, null)).thenReturn(10L); + Mockito.when(capacityManager.getAllocatedPoolCapacity(nonSharedPool, null)).thenReturn(20L); + alertManagerImplMock.recalculateStorageCapacities(); + Mockito.verify(storageManager, Mockito.times(2)).createCapacityEntry(sharedPool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 10L); + Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(nonSharedPool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, 20L); + } } diff --git a/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java b/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java new file mode 100644 index 00000000000..a69830e2e0b --- /dev/null +++ b/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.host.Host; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.utils.Pair; +import com.cloud.vm.VmDetailConstants; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityManagerImplTest { + @Mock + ClusterDetailsDao clusterDetailsDao; + @Mock + ServiceOfferingDao serviceOfferingDao; + + @Spy + @InjectMocks + CapacityManagerImpl capacityManager = new CapacityManagerImpl(); + + + private Host host; + private ServiceOffering offering; + private static final long CLUSTER_ID = 1L; + private static final int OFFERING_CPU = 4; + private static final int OFFERING_CPU_SPEED = 2000; + private static final int OFFERING_MEMORY = 4096; + + @Before + public void setUp() { + host = mock(Host.class); + offering = mock(ServiceOffering.class); + when(host.getClusterId()).thenReturn(CLUSTER_ID); + when(offering.getCpu()).thenReturn(OFFERING_CPU); + when(offering.getSpeed()).thenReturn(OFFERING_CPU_SPEED); + when(offering.getRamSize()).thenReturn(OFFERING_MEMORY); + } + + @Test + public void testGetClusterValues() { + long clusterId = 1L; + String cpuOvercommit = "2.0"; + String memoryOvercommit = "1.0"; + Map clusterDetails = new HashMap<>(); + clusterDetails.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, cpuOvercommit); + clusterDetails.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, memoryOvercommit); + + when(clusterDetailsDao.findDetails(eq(clusterId), anyList())).thenReturn(clusterDetails); + + Pair result = capacityManager.getClusterValues(clusterId); + + assertEquals(cpuOvercommit, result.first()); + assertEquals(memoryOvercommit, result.second()); + verify(clusterDetailsDao).findDetails(eq(clusterId), anyList()); + } + + @Test + public void testGetServiceOfferingsMap() { + Long offering1Id = 1L; + ServiceOfferingVO offering1 = mock(ServiceOfferingVO.class); + when(offering1.getId()).thenReturn(offering1Id); + Long offering2Id = 2L; + ServiceOfferingVO offering2 = mock(ServiceOfferingVO.class); + when(offering2.getId()).thenReturn(offering2Id); + when(serviceOfferingDao.listAllIncludingRemoved()).thenReturn(List.of(offering1, offering2)); + Map result = capacityManager.getServiceOfferingsMap(); + assertEquals(2, result.size()); + assertEquals(offering1, result.get(offering1Id)); + assertEquals(offering2, result.get(offering2Id)); + verify(serviceOfferingDao).listAllIncludingRemoved(); + } + + @Test + public void testGetServiceOfferingsMapEmptyList() { + when(serviceOfferingDao.listAllIncludingRemoved()).thenReturn(Collections.emptyList()); + Map result = capacityManager.getServiceOfferingsMap(); + assertTrue(result.isEmpty()); + verify(serviceOfferingDao).listAllIncludingRemoved(); + } + + @Test + public void testCheckIfHostHasCpuCapabilityAndCapacity() { + Float cpuOvercommit = 2.0f; + Float memoryOvercommit = 1.5f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(true).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(true).when(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertTrue(result.first()); + assertTrue(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } + + @Test + public void testCheckIfHostHasNoCpuCapabilityButHasCapacity() { + Float cpuOvercommit = 1.5f; + Float memoryOvercommit = 1.2f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(false).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(true).when(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertFalse(result.first()); + assertTrue(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } + + @Test + public void testCheckIfHostHasCpuCapabilityButNoCapacity() { + Float cpuOvercommit = 2.0f; + Float memoryOvercommit = 1.5f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(true).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(false).when(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertTrue(result.first()); + assertFalse(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(host), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } +} diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index badf730a061..c2c78402aa1 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -831,7 +831,7 @@ public class ConfigurationManagerTest { @Test public void checkIfZoneIsDeletableSuccessTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -845,11 +845,7 @@ public class ConfigurationManagerTest { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnHostTest() { - HostVO hostVO = Mockito.mock(HostVO.class); - ArrayList arrayList = new ArrayList(); - arrayList.add(hostVO); - - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(arrayList); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(List.of(1L)); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -867,7 +863,7 @@ public class ConfigurationManagerTest { ArrayList arrayList = new ArrayList(); arrayList.add(hostPodVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(arrayList); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -881,7 +877,7 @@ public class ConfigurationManagerTest { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnPrivateIpAddressTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(1); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -895,7 +891,7 @@ public class ConfigurationManagerTest { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnPublicIpAddressTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(1); @@ -913,7 +909,7 @@ public class ConfigurationManagerTest { ArrayList arrayList = new ArrayList(); arrayList.add(vMInstanceVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -931,7 +927,7 @@ public class ConfigurationManagerTest { ArrayList arrayList = new ArrayList(); arrayList.add(volumeVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -949,7 +945,7 @@ public class ConfigurationManagerTest { ArrayList arrayList = new ArrayList(); arrayList.add(physicalNetworkVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 58bc8509768..e7fe559994d 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -889,7 +889,7 @@ public class DeploymentPlanningManagerImplTest { Pair> potentialResources = new Pair<>(host, suitableVolumeStoragePoolMap); Mockito.when(capacityMgr.checkIfHostReachMaxGuestLimit(host)).thenReturn(false); - Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); + Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.any(Host.class), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); Mockito.when(capacityMgr.checkIfHostHasCapacity( ArgumentMatchers.any(), ArgumentMatchers.anyInt(), @@ -1218,7 +1218,7 @@ public class DeploymentPlanningManagerImplTest { throw new RuntimeException(e); } List allClusters = List.of(101L, 102L, 103L, 104L); - Mockito.when(_clusterDao.listAllClusters(Mockito.anyLong())).thenReturn(allClusters); + Mockito.when(_clusterDao.listAllClusterIds(Mockito.anyLong())).thenReturn(allClusters); if (mockVolumes) { VolumeVO vol1 = Mockito.mock(VolumeVO.class); Mockito.when(vol1.getPoolId()).thenReturn(1L); diff --git a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java index b59eeaa4624..a3bd58d82c5 100644 --- a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java @@ -16,6 +16,38 @@ // under the License. package com.cloud.network; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanRegistrationException; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; + +import org.apache.cloudstack.api.command.user.ipv6.CreateIpv6FirewallRuleCmd; +import org.apache.cloudstack.api.command.user.ipv6.UpdateIpv6FirewallRuleCmd; +import org.apache.cloudstack.api.response.Ipv6RouteResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.commons.collections.CollectionUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + import com.cloud.api.ApiDBUtils; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterGuestIpv6PrefixVO; @@ -63,36 +95,6 @@ import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; import com.googlecode.ipv6.IPv6Network; import com.googlecode.ipv6.IPv6NetworkMask; -import org.apache.cloudstack.api.command.user.ipv6.CreateIpv6FirewallRuleCmd; -import org.apache.cloudstack.api.command.user.ipv6.UpdateIpv6FirewallRuleCmd; -import org.apache.cloudstack.api.response.Ipv6RouteResponse; -import org.apache.cloudstack.api.response.VpcResponse; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.commons.collections.CollectionUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanRegistrationException; -import javax.management.MalformedObjectNameException; -import javax.management.NotCompliantMBeanException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.UUID; @RunWith(MockitoJUnitRunner.class) public class Ipv6ServiceImplTest { diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index aea29d59356..c14ec2e9335 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -22,11 +22,11 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doReturn; import java.lang.reflect.Field; import java.util.ArrayList; @@ -38,15 +38,6 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import com.cloud.domain.Domain; -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.dao.PublicIpQuarantineDao; -import com.cloud.network.vo.PublicIpQuarantineVO; -import com.cloud.user.dao.AccountDao; -import com.cloud.utils.net.Ip; -import com.cloud.exception.InsufficientAddressCapacityException; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.api.command.admin.network.CreateNetworkCmdByAdmin; import org.apache.cloudstack.api.command.user.address.UpdateQuarantinedIpCmd; @@ -56,6 +47,7 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -64,8 +56,10 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentMatchers; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; import com.cloud.agent.api.to.IpAddressTO; @@ -75,6 +69,10 @@ import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.Domain; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; @@ -82,10 +80,13 @@ import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.router.CommandSetupHelper; import com.cloud.network.router.NetworkHelper; +import com.cloud.network.vo.PublicIpQuarantineVO; import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.VpcDao; @@ -103,18 +104,17 @@ import com.cloud.user.AccountService; import com.cloud.user.AccountVO; import com.cloud.user.User; import com.cloud.user.UserVO; +import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.Ip; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; -import org.junit.After; -import org.mockito.MockedStatic; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class NetworkServiceImplTest { diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index 4c5531277fe..7d2b35361bc 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -16,6 +16,66 @@ // under the License. package com.cloud.network.as; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CompletionService; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; + +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.annotation.AnnotationService; +import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmProfileCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateConditionCmd; +import org.apache.cloudstack.api.command.user.autoscale.ListCountersCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateConditionCmd; +import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; +import org.apache.cloudstack.config.ApiServiceConfiguration; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.userdata.UserDataManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.PerformanceMonitorAnswer; import com.cloud.agent.api.PerformanceMonitorCommand; @@ -39,6 +99,7 @@ import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.Network; @@ -106,65 +167,6 @@ import com.cloud.vm.VmStats; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.affinity.AffinityGroupVO; -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.annotation.AnnotationService; -import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmProfileCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateConditionCmd; -import org.apache.cloudstack.api.command.user.autoscale.ListCountersCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateConditionCmd; -import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; -import org.apache.cloudstack.config.ApiServiceConfiguration; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.userdata.UserDataManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; -import org.springframework.test.util.ReflectionTestUtils; - -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class AutoScaleManagerImplTest { @@ -2256,32 +2258,31 @@ public class AutoScaleManagerImplTest { } @Test - public void getVmStatsByIdFromHost() { - List vmIds = Mockito.mock(ArrayList.class); - - Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(-1L, vmIds); - + public void getVmStatsByIdFromHostNullHost() { + Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(-1L, List.of(1L, 2L)); Assert.assertEquals(0, result.size()); + Mockito.verify(virtualMachineManager, never()).getVirtualMachineStatistics(Mockito.any(Host.class), anyList()); + } - Mockito.verify(virtualMachineManager, never()).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); + @Test + public void getVmStatsByIdFromHostEmptyVmList() { + Mockito.when(hostDao.findById(1L)).thenReturn(hostMock); + Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(1L, Collections.emptyList()); + Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(hostMock, Collections.emptyList()); + Assert.assertEquals(0, result.size()); } @Test public void getVmStatsByIdFromHost2() { - List vmIds = Mockito.mock(ArrayList.class); - VmStatsEntry vmStats = new VmStatsEntry(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "vm"); + List vmIds = List.of(virtualMachineId); + VmStatsEntry vmStats = new VmStatsEntry(virtualMachineId, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "vm"); HashMap vmStatsById = new HashMap<>(); vmStatsById.put(virtualMachineId, vmStats); when(hostDao.findById(hostId)).thenReturn(hostMock); - when(hostMock.getId()).thenReturn(hostId); - when(hostMock.getName()).thenReturn(hostName); - Mockito.doReturn(vmStatsById).when(virtualMachineManager).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); - + Mockito.doReturn(vmStatsById).when(virtualMachineManager).getVirtualMachineStatistics(hostMock, vmIds); Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(hostId, vmIds); - Assert.assertEquals(vmStatsById, result); - - Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); + Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(Mockito.any(Host.class), anyList()); } @Test diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index e8b297ff188..b7bb2238334 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -446,7 +446,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana * @see com.cloud.resource.ResourceManager#listAvailHypervisorInZone(java.lang.Long, java.lang.Long) */ @Override - public List listAvailHypervisorInZone(final Long hostId, final Long zoneId) { + public List listAvailHypervisorInZone(final Long zoneId) { // TODO Auto-generated method stub return null; } @@ -473,7 +473,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana * @see com.cloud.resource.ResourceManager#getHostStatistics(long) */ @Override - public HostStats getHostStatistics(final long hostId) { + public HostStats getHostStatistics(final Host host) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index defcd09b174..34030626d22 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -59,10 +59,8 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.projects.ProjectVO; import com.cloud.projects.dao.ProjectDao; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateDao; @@ -715,8 +713,8 @@ public class ResourceLimitManagerImplTest extends TestCase { @Test public void testGetVmsWithAccountAndTagNegative() { String tag = hostTags.get(0); - Mockito.when(serviceOfferingDao.listByHostTag(tag)).thenReturn(null); - Mockito.when(vmTemplateDao.listByTemplateTag(tag)).thenReturn(null); + Mockito.when(serviceOfferingDao.listIdsByHostTag(tag)).thenReturn(null); + Mockito.when(vmTemplateDao.listIdsByTemplateTag(tag)).thenReturn(null); List result = resourceLimitManager.getVmsWithAccountAndTag(1L, hostTags.get(0)); Assert.assertTrue(CollectionUtils.isEmpty(result)); } @@ -725,12 +723,8 @@ public class ResourceLimitManagerImplTest extends TestCase { public void testGetVmsWithAccountAndTag() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(VirtualMachineManager.ResourceCountRunningVMsonly, "_defaultValue", "true"); String tag = hostTags.get(0); - ServiceOfferingVO serviceOfferingVO = Mockito.mock(ServiceOfferingVO.class); - Mockito.when(serviceOfferingVO.getId()).thenReturn(1L); - VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); - Mockito.when(templateVO.getId()).thenReturn(1L); - Mockito.when(serviceOfferingDao.listByHostTag(tag)).thenReturn(List.of(serviceOfferingVO)); - Mockito.when(vmTemplateDao.listByTemplateTag(tag)).thenReturn(List.of(templateVO)); + Mockito.when(serviceOfferingDao.listIdsByHostTag(tag)).thenReturn(List.of(1L)); + Mockito.when(vmTemplateDao.listIdsByTemplateTag(tag)).thenReturn(List.of(1L)); List vmList = List.of(Mockito.mock(UserVmJoinVO.class)); Mockito.when(userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(Mockito.anyLong(), Mockito.anyList(), Mockito.anyList(), Mockito.anyList())).thenReturn(vmList); List result = resourceLimitManager.getVmsWithAccountAndTag(1L, tag); diff --git a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java index 2d52f0aa52e..5a1dba215ae 100644 --- a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java @@ -17,6 +17,30 @@ package com.cloud.user; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.apache.cloudstack.region.RegionManager; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + import com.cloud.api.query.dao.DiskOfferingJoinDao; import com.cloud.api.query.dao.NetworkOfferingJoinDao; import com.cloud.api.query.dao.ServiceOfferingJoinDao; @@ -44,29 +68,6 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.cloudstack.framework.messagebus.MessageBus; -import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.cloudstack.network.RoutedIpv4Manager; -import org.apache.cloudstack.region.RegionManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.ArgumentMatchers; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; @RunWith(MockitoJUnitRunner.class) diff --git a/server/src/test/java/com/cloud/user/MockUsageEventDao.java b/server/src/test/java/com/cloud/user/MockUsageEventDao.java index 7e15a4f0aaf..f73203ce564 100644 --- a/server/src/test/java/com/cloud/user/MockUsageEventDao.java +++ b/server/src/test/java/com/cloud/user/MockUsageEventDao.java @@ -27,6 +27,7 @@ import com.cloud.utils.db.SearchCriteria; import javax.naming.ConfigurationException; import java.util.ArrayList; +import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; @@ -142,6 +143,11 @@ public class MockUsageEventDao implements UsageEventDao{ return null; } + @Override + public List listAllIds() { + return Collections.emptyList(); + } + @Override public List search(SearchCriteria sc, Filter filter) { diff --git a/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java index 2f0c1c3185c..0c0097393ca 100644 --- a/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java @@ -16,13 +16,17 @@ // under the License. package org.apache.cloudstack.agent.lb; -import com.cloud.agent.AgentManager; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ResourceState; -import com.cloud.utils.exception.CloudRuntimeException; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.stream.Collectors; + import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.messagebus.MessageBus; @@ -32,16 +36,17 @@ import org.junit.Before; import org.junit.Test; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.Spy; -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; - -import static org.mockito.Mockito.when; +import com.cloud.agent.AgentManager; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.exception.CloudRuntimeException; public class IndirectAgentLBServiceImplTest { @@ -85,6 +90,7 @@ public class IndirectAgentLBServiceImplTest { } private void configureMocks() throws NoSuchFieldException, IllegalAccessException { + List hosts = Arrays.asList(host1, host2, host3, host4); long id = 1; for (HostVO h : Arrays.asList(host1, host2, host3, host4)) { when(h.getId()).thenReturn(id); @@ -98,7 +104,9 @@ public class IndirectAgentLBServiceImplTest { addField(agentMSLB, "hostDao", hostDao); addField(agentMSLB, "agentManager", agentManager); - when(hostDao.listAll()).thenReturn(Arrays.asList(host4, host2, host1, host3)); + List hostIds = hosts.stream().map(HostVO::getId).collect(Collectors.toList()); + doReturn(hostIds).when(hostDao).findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(Mockito.anyLong(), + Mockito.eq(null), Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); } @Before @@ -193,21 +201,18 @@ public class IndirectAgentLBServiceImplTest { } @Test - public void testGetOrderedRunningHostIdsNullList() { - when(hostDao.listAll()).thenReturn(null); - Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_1_ID).size() == 0); + public void testGetOrderedRunningHostIdsEmptyList() { + doReturn(Collections.emptyList()).when(hostDao).findHostIdsByZoneClusterResourceStateTypeAndHypervisorType( + Mockito.eq(DC_1_ID), Mockito.eq(null), Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); + Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_1_ID).isEmpty()); } @Test public void testGetOrderedRunningHostIdsOrderList() { - when(hostDao.listAll()).thenReturn(Arrays.asList(host4, host2, host1, host3)); + doReturn(Arrays.asList(host4.getId(), host2.getId(), host1.getId(), host3.getId())).when(hostDao) + .findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(Mockito.eq(DC_1_ID), Mockito.eq(null), + Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); Assert.assertEquals(Arrays.asList(host1.getId(), host2.getId(), host3.getId(), host4.getId()), agentMSLB.getOrderedHostIdList(DC_1_ID)); } - - @Test - public void testGetHostsPerZoneNullHosts() { - when(hostDao.listAll()).thenReturn(null); - Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_2_ID).size() == 0); - } } diff --git a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java index 25b4bdda45f..403b14965b1 100644 --- a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java +++ b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.networkoffering; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.nullable; import java.util.HashMap; @@ -28,21 +27,21 @@ import java.util.Set; import javax.inject.Inject; -import com.cloud.network.dao.PublicIpQuarantineDao; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Mockito; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.event.dao.UsageEventDetailsDao; @@ -52,6 +51,7 @@ import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; import com.cloud.network.Networks.TrafficType; import com.cloud.network.dao.LoadBalancerVMMapDao; +import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.NetworkOffering.Availability; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -61,48 +61,44 @@ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.UserVO; -import com.cloud.utils.component.ComponentContext; import com.cloud.vm.dao.UserVmDetailsDao; + import junit.framework.TestCase; -@RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(locations = "classpath:/createNetworkOffering.xml") +@RunWith(MockitoJUnitRunner.class) public class CreateNetworkOfferingTest extends TestCase { - @Inject - ConfigurationManager configMgr; - - @Inject + @Mock ConfigurationDao configDao; - @Inject + @Mock NetworkOfferingDao offDao; - @Inject + @Mock NetworkOfferingServiceMapDao mapDao; - @Inject + @Mock AccountManager accountMgr; - @Inject + @Mock VpcManager vpcMgr; - @Inject + @Mock UserVmDetailsDao userVmDetailsDao; - @Inject + @Mock UsageEventDao UsageEventDao; - @Inject + @Mock UsageEventDetailsDao usageEventDetailsDao; - @Inject + @Mock UserIpAddressDetailsDao userIpAddressDetailsDao; - @Inject + @Mock LoadBalancerVMMapDao _loadBalancerVMMapDao; - @Inject + @Mock AnnotationDao annotationDao; @Inject VlanDetailsDao vlanDetailsDao; @@ -110,15 +106,12 @@ public class CreateNetworkOfferingTest extends TestCase { @Inject PublicIpQuarantineDao publicIpQuarantineDao; + @InjectMocks + ConfigurationManager configMgr = new ConfigurationManagerImpl(); + @Override @Before public void setUp() { - ComponentContext.initComponentsLifeCycle(); - - ConfigurationVO configVO = new ConfigurationVO("200", "200", "200", "200", "200", "200"); - Mockito.when(configDao.findByName(anyString())).thenReturn(configVO); - - Mockito.when(offDao.persist(any(NetworkOfferingVO.class))).thenReturn(new NetworkOfferingVO()); Mockito.when(offDao.persist(any(NetworkOfferingVO.class), nullable(Map.class))).thenReturn(new NetworkOfferingVO()); Mockito.when(mapDao.persist(any(NetworkOfferingServiceMapVO.class))).thenReturn(new NetworkOfferingServiceMapVO()); Mockito.when(accountMgr.getSystemUser()).thenReturn(new UserVO(1)); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index dc6b042fc3a..d8e44203187 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.secondarystorage; +import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; + import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -157,8 +159,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; -import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; - /** * Class to manage secondary storages.

* Possible secondary storage VM state transition cases:
@@ -813,11 +813,9 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { - List hosts = _hostDao.listByDataCenterId(dataCenterId); - if (CollectionUtils.isEmpty(hosts)) { - if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); - } + Integer totalUpAndEnabledHosts = _hostDao.countUpAndEnabledHostsInZone(dataCenterId); + if (totalUpAndEnabledHosts != null && totalUpAndEnabledHosts < 1) { + logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenterId); return false; } ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); @@ -844,8 +842,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar } boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); - List> storagePoolHostInfos = _storagePoolHostDao.getDatacenterStoragePoolHostInfo(dataCenterId, !useLocalStorage); - if (CollectionUtils.isNotEmpty(storagePoolHostInfos) && storagePoolHostInfos.get(0).second() > 0) { + boolean hasStoragePoolHostInfo = _storagePoolHostDao.hasDatacenterStoragePoolHostInfo(dataCenterId, !useLocalStorage); + if (hasStoragePoolHostInfo) { return true; } else { if (logger.isDebugEnabled()) { diff --git a/setup/db/create-schema-simulator.sql b/setup/db/create-schema-simulator.sql index 73896af1f12..f52faa043d8 100644 --- a/setup/db/create-schema-simulator.sql +++ b/setup/db/create-schema-simulator.sql @@ -64,7 +64,8 @@ CREATE TABLE `simulator`.`mockstoragepool` ( `capacity` bigint, `pool_type` varchar(40), `hostguid` varchar(255) UNIQUE, - PRIMARY KEY (`id`) + PRIMARY KEY (`id`), + INDEX `i_mockstoragepool__guid`(`guid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/test/integration/smoke/test_dynamicroles.py b/test/integration/smoke/test_dynamicroles.py index b91ba9c2eba..e404835fbb8 100644 --- a/test/integration/smoke/test_dynamicroles.py +++ b/test/integration/smoke/test_dynamicroles.py @@ -18,7 +18,7 @@ from marvin.cloudstackAPI import * from marvin.cloudstackTestCase import cloudstackTestCase from marvin.cloudstackException import CloudstackAPIException -from marvin.lib.base import Account, Role, RolePermission +from marvin.lib.base import Account, Role, RolePermission, Configurations from marvin.lib.utils import cleanup_resources from nose.plugins.attrib import attr from random import shuffle @@ -26,6 +26,7 @@ from random import shuffle import copy import random import re +import time class TestData(object): @@ -109,6 +110,14 @@ class TestDynamicRoles(cloudstackTestCase): self.testdata["account"], roleid=self.role.id ) + + cache_period_config = Configurations.list( + self.apiclient, + name='dynamic.apichecker.cache.period' + )[0] + + self.cache_period = int(cache_period_config.value) + self.cleanup = [ self.account, self.rolepermission, @@ -623,6 +632,8 @@ class TestDynamicRoles(cloudstackTestCase): testdata ) + time.sleep(self.cache_period + 5) + userApiClient = self.getUserApiClient(self.account.name, domain=self.account.domain, role_type=self.account.roletype) # Perform listApis check @@ -645,6 +656,8 @@ class TestDynamicRoles(cloudstackTestCase): self.dbclient.execute("insert into role_permissions (uuid, role_id, rule, permission, sort_order) values (UUID(), %d, '%s', '%s', %d)" % (roleId, rule, perm.upper(), sortOrder)) sortOrder += 1 + time.sleep(self.cache_period + 5) + userApiClient = self.getUserApiClient(self.account.name, domain=self.account.domain, role_type=self.account.roletype) # Perform listApis check diff --git a/ui/src/config/section/infra/hosts.js b/ui/src/config/section/infra/hosts.js index 727da7242d7..5def7f3b7fc 100644 --- a/ui/src/config/section/infra/hosts.js +++ b/ui/src/config/section/infra/hosts.js @@ -27,7 +27,7 @@ export default { searchFilters: ['name', 'zoneid', 'podid', 'clusterid', 'hypervisor'], resourceType: 'Host', filters: () => { - const filters = ['enabled', 'disabled', 'maintenance', 'up', 'down', 'alert'] + const filters = ['enabled', 'disabled', 'maintenance', 'up', 'down', 'disconnected', 'alert'] return filters }, params: { type: 'routing' }, diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 9b94e1d7686..27445ddeb29 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -1755,7 +1755,7 @@ export default { if (filter === 'all') { delete query.resourcestate delete query.state - } else if (['up', 'down', 'alert'].includes(filter)) { + } else if (['up', 'down', 'disconnected', 'alert'].includes(filter)) { delete query.resourcestate query.state = filter } else { diff --git a/ui/src/views/dashboard/CapacityDashboard.vue b/ui/src/views/dashboard/CapacityDashboard.vue index c2461b5adce..53a3d87aa23 100644 --- a/ui/src/views/dashboard/CapacityDashboard.vue +++ b/ui/src/views/dashboard/CapacityDashboard.vue @@ -41,7 +41,7 @@
+ @click="() => { updateData(zoneSelected, true); listAlerts(); listEvents(); }"> {{ $t('label.fetch.latest') }} @@ -170,7 +170,7 @@ - +