mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Made changes to configuration. Eliminated ConfigValue and only use ConfigKey
This commit is contained in:
parent
b8e79c30a8
commit
8f556e6d88
@ -16,6 +16,8 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.agent;
|
package com.cloud.agent;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
import com.cloud.agent.manager.Commands;
|
import com.cloud.agent.manager.Commands;
|
||||||
@ -30,6 +32,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|||||||
*/
|
*/
|
||||||
public interface AgentManager {
|
public interface AgentManager {
|
||||||
final static String WaitCK = "wait";
|
final static String WaitCK = "wait";
|
||||||
|
static final ConfigKey<Integer> Wait = new ConfigKey<Integer>("Advanced", Integer.class, WaitCK, "1800", "Time in seconds to wait for control commands to return", true);
|
||||||
|
|
||||||
public enum TapAgentsAction {
|
public enum TapAgentsAction {
|
||||||
Add, Del, Contains,
|
Add, Del, Contains,
|
||||||
|
|||||||
@ -43,7 +43,6 @@ import org.apache.log4j.Logger;
|
|||||||
import org.apache.cloudstack.context.ServerContexts;
|
import org.apache.cloudstack.context.ServerContexts;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||||
@ -155,13 +154,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
|
|
||||||
protected int _retry = 2;
|
protected int _retry = 2;
|
||||||
|
|
||||||
protected ConfigValue<Integer> _wait;
|
|
||||||
protected ConfigValue<Integer> _alertWait;
|
|
||||||
protected long _nodeId = -1;
|
protected long _nodeId = -1;
|
||||||
|
|
||||||
protected ConfigValue<Integer> _pingInterval;
|
|
||||||
protected ConfigValue<Float> _pingTimeout;
|
|
||||||
|
|
||||||
protected ExecutorService _executor;
|
protected ExecutorService _executor;
|
||||||
protected ThreadPoolExecutor _connectExecutor;
|
protected ThreadPoolExecutor _connectExecutor;
|
||||||
protected ScheduledExecutorService _directAgentExecutor;
|
protected ScheduledExecutorService _directAgentExecutor;
|
||||||
@ -188,47 +182,34 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
"The number of direct agents to load each time", false);
|
"The number of direct agents to load each time", false);
|
||||||
protected final ConfigKey<Integer> DirectAgentPoolSize = new ConfigKey<Integer>(Integer.class, "direct.agent.pool.size", "Advance", "500",
|
protected final ConfigKey<Integer> DirectAgentPoolSize = new ConfigKey<Integer>(Integer.class, "direct.agent.pool.size", "Advance", "500",
|
||||||
"Default size for DirectAgentPool", false);
|
"Default size for DirectAgentPool", false);
|
||||||
static final ConfigKey<Integer> Wait = new ConfigKey<Integer>("Advanced", Integer.class, WaitCK, "1800", "Time in seconds to wait for control commands to return", true);
|
|
||||||
|
|
||||||
protected ConfigValue<Integer> _port;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||||
|
|
||||||
_port = _configDepot.get(Port);
|
s_logger.info("Ping Timeout is " + PingTimeout.value());
|
||||||
ConfigValue<Integer> workers = _configDepot.get(Workers);
|
|
||||||
|
|
||||||
_pingInterval = _configDepot.get(PingInterval);
|
int threads = DirectAgentLoadSize.value();
|
||||||
|
|
||||||
_wait = _configDepot.get(Wait);
|
|
||||||
_alertWait = _configDepot.get(AlertWait);
|
|
||||||
_pingTimeout = _configDepot.get(PingTimeout);
|
|
||||||
|
|
||||||
s_logger.info("Ping Timeout is " + _pingTimeout);
|
|
||||||
|
|
||||||
ConfigValue<Integer> threads = _configDepot.get(DirectAgentLoadSize);
|
|
||||||
|
|
||||||
_nodeId = ManagementServerNode.getManagementServerId();
|
_nodeId = ManagementServerNode.getManagementServerId();
|
||||||
s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
|
s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
|
||||||
|
|
||||||
long lastPing = (System.currentTimeMillis() >> 10) - (long)(_pingTimeout.value() * _pingInterval.value());
|
long lastPing = (System.currentTimeMillis() >> 10) - (long)(PingTimeout.value() * PingInterval.value());
|
||||||
_hostDao.markHostsAsDisconnected(_nodeId, lastPing);
|
_hostDao.markHostsAsDisconnected(_nodeId, lastPing);
|
||||||
|
|
||||||
registerForHostEvents(new BehindOnPingListener(), true, true, false);
|
registerForHostEvents(new BehindOnPingListener(), true, true, false);
|
||||||
|
|
||||||
_executor = new ThreadPoolExecutor(threads.value(), threads.value(), 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentTaskPool"));
|
_executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentTaskPool"));
|
||||||
|
|
||||||
_connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentConnectTaskPool"));
|
_connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentConnectTaskPool"));
|
||||||
//allow core threads to time out even when there are no items in the queue
|
//allow core threads to time out even when there are no items in the queue
|
||||||
_connectExecutor.allowCoreThreadTimeOut(true);
|
_connectExecutor.allowCoreThreadTimeOut(true);
|
||||||
|
|
||||||
_connection = new NioServer("AgentManager", _port.value(), workers.value() + 10, this);
|
_connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this);
|
||||||
s_logger.info("Listening on " + _port.value() + " with " + workers.value() + " workers");
|
s_logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers");
|
||||||
|
|
||||||
|
|
||||||
ConfigValue<Integer> size = _configDepot.get(DirectAgentPoolSize);
|
_directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent"));
|
||||||
_directAgentExecutor = new ScheduledThreadPoolExecutor(size.value(), new NamedThreadFactory("DirectAgent"));
|
s_logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value());
|
||||||
s_logger.debug("Created DirectAgentAttache pool with size: " + size.value());
|
|
||||||
|
|
||||||
_monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor"));
|
_monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor"));
|
||||||
|
|
||||||
@ -236,7 +217,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected long getTimeout() {
|
protected long getTimeout() {
|
||||||
return (long)(_pingTimeout.value() * _pingInterval.value());
|
return (long)(PingTimeout.value() * PingInterval.value());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -357,7 +338,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected int getPingInterval() {
|
protected int getPingInterval() {
|
||||||
return _pingInterval.value();
|
return PingInterval.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -392,7 +373,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (timeout <= 0) {
|
if (timeout <= 0) {
|
||||||
timeout = _wait.value();
|
timeout = Wait.value();
|
||||||
}
|
}
|
||||||
assert noDbTxn() : "I know, I know. Why are we so strict as to not allow txn across an agent call? ... Why are we so cruel ... Why are we such a dictator .... Too bad... Sorry...but NO AGENT COMMANDS WRAPPED WITHIN DB TRANSACTIONS!";
|
assert noDbTxn() : "I know, I know. Why are we so strict as to not allow txn across an agent call? ... Why are we so cruel ... Why are we such a dictator .... Too bad... Sorry...but NO AGENT COMMANDS WRAPPED WITHIN DB TRANSACTIONS!";
|
||||||
|
|
||||||
@ -576,7 +557,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
_connection.start();
|
_connection.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
_monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), _pingInterval.value(), _pingInterval.value(), TimeUnit.SECONDS);
|
_monitorExecutor.scheduleWithFixedDelay(new MonitorTask(), PingInterval.value(), PingInterval.value(), TimeUnit.SECONDS);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -828,7 +809,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
} else if (determinedState == Status.Disconnected) {
|
} else if (determinedState == Status.Disconnected) {
|
||||||
s_logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName());
|
s_logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName());
|
||||||
if (currentStatus == Status.Disconnected) {
|
if (currentStatus == Status.Disconnected) {
|
||||||
if (((System.currentTimeMillis() >> 10) - host.getLastPinged()) > _alertWait.value()) {
|
if (((System.currentTimeMillis() >> 10) - host.getLastPinged()) > AlertWait.value()) {
|
||||||
s_logger.warn("Host " + host.getId() + " has been disconnected pass the time it should be disconnected.");
|
s_logger.warn("Host " + host.getId() + " has been disconnected pass the time it should be disconnected.");
|
||||||
event = Status.Event.WaitedTooLong;
|
event = Status.Event.WaitedTooLong;
|
||||||
} else {
|
} else {
|
||||||
@ -840,8 +821,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
HostPodVO podVO = _podDao.findById(host.getPodId());
|
HostPodVO podVO = _podDao.findById(host.getPodId());
|
||||||
String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
|
String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName();
|
||||||
if ((host.getType() != Host.Type.SecondaryStorage) && (host.getType() != Host.Type.ConsoleProxy)) {
|
if ((host.getType() != Host.Type.SecondaryStorage) && (host.getType() != Host.Type.ConsoleProxy)) {
|
||||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc, "If the agent for host [" + hostDesc
|
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc,
|
||||||
+ "] is not restarted within " + _alertWait + " seconds, HA will begin on the VMs");
|
"If the agent for host [" + hostDesc + "] is not restarted within " + AlertWait + " seconds, HA will begin on the VMs");
|
||||||
}
|
}
|
||||||
event = Status.Event.AgentDisconnected;
|
event = Status.Event.AgentDisconnected;
|
||||||
}
|
}
|
||||||
@ -1408,7 +1389,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||||||
attache = createAttacheForDirectConnect(host, resource);
|
attache = createAttacheForDirectConnect(host, resource);
|
||||||
StartupAnswer[] answers = new StartupAnswer[cmds.length];
|
StartupAnswer[] answers = new StartupAnswer[cmds.length];
|
||||||
for (int i = 0; i < answers.length; i++) {
|
for (int i = 0; i < answers.length; i++) {
|
||||||
answers[i] = new StartupAnswer(cmds[i], attache.getId(), _pingInterval.value());
|
answers[i] = new StartupAnswer(cmds[i], attache.getId(), PingInterval.value());
|
||||||
}
|
}
|
||||||
attache.process(answers);
|
attache.process(answers);
|
||||||
attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
|
attache = notifyMonitorsOfConnection(attache, cmds, forRebalance);
|
||||||
|
|||||||
@ -50,7 +50,6 @@ import com.google.gson.Gson;
|
|||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||||
|
|
||||||
@ -142,11 +141,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
"Interval between scans to load agents", false, ConfigKey.Scope.Global, 1000);
|
"Interval between scans to load agents", false, ConfigKey.Scope.Global, 1000);
|
||||||
|
|
||||||
|
|
||||||
protected ConfigValue<Boolean> _agentLBEnabled;
|
|
||||||
protected ConfigValue<Double> _connectedAgentsThreshold;
|
|
||||||
protected ConfigValue<Integer> _loadSize;
|
|
||||||
protected ConfigValue<Integer> _directAgentScanInterval;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> xmlParams) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> xmlParams) throws ConfigurationException {
|
||||||
_peers = new HashMap<String, SocketChannel>(7);
|
_peers = new HashMap<String, SocketChannel>(7);
|
||||||
@ -155,11 +149,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
|
|
||||||
s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
|
s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
|
||||||
|
|
||||||
_loadSize = _configDepot.get(LoadSize);
|
|
||||||
_directAgentScanInterval = _configDepot.get(ScanInterval);
|
|
||||||
_agentLBEnabled = _configDepot.get(EnableLB);
|
|
||||||
_connectedAgentsThreshold = _configDepot.get(ConnectedAgentThreshold);
|
|
||||||
|
|
||||||
ClusteredAgentAttache.initialize(this);
|
ClusteredAgentAttache.initialize(this);
|
||||||
|
|
||||||
_clusterMgr.registerListener(this);
|
_clusterMgr.registerListener(this);
|
||||||
@ -175,9 +164,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
if (!super.start()) {
|
if (!super.start()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
_timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, _directAgentScanInterval.value());
|
_timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value());
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Scheduled direct agent scan task to run at an interval of " + _directAgentScanInterval.value() + " seconds");
|
s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
|
||||||
}
|
}
|
||||||
|
|
||||||
// schedule transfer scan executor - if agent LB is enabled
|
// schedule transfer scan executor - if agent LB is enabled
|
||||||
@ -207,7 +196,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
|
|
||||||
// for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
|
// for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
|
||||||
long cutSeconds = (System.currentTimeMillis() >> 10) - getTimeout();
|
long cutSeconds = (System.currentTimeMillis() >> 10) - getTimeout();
|
||||||
List<HostVO> hosts = _hostDao.findAndUpdateDirectAgentToLoad(cutSeconds, _loadSize.value().longValue(), _nodeId);
|
List<HostVO> hosts = _hostDao.findAndUpdateDirectAgentToLoad(cutSeconds, LoadSize.value().longValue(), _nodeId);
|
||||||
List<HostVO> appliances = _hostDao.findAndUpdateApplianceToLoad(cutSeconds, _nodeId);
|
List<HostVO> appliances = _hostDao.findAndUpdateApplianceToLoad(cutSeconds, _nodeId);
|
||||||
hosts.addAll(appliances);
|
hosts.addAll(appliances);
|
||||||
|
|
||||||
@ -506,13 +495,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
throw new CloudRuntimeException("Unable to resolve " + ip);
|
throw new CloudRuntimeException("Unable to resolve " + ip);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
ch = SocketChannel.open(new InetSocketAddress(addr, _port.value()));
|
ch = SocketChannel.open(new InetSocketAddress(addr, Port.value()));
|
||||||
ch.configureBlocking(true); // make sure we are working at blocking mode
|
ch.configureBlocking(true); // make sure we are working at blocking mode
|
||||||
ch.socket().setKeepAlive(true);
|
ch.socket().setKeepAlive(true);
|
||||||
ch.socket().setSoTimeout(60 * 1000);
|
ch.socket().setSoTimeout(60 * 1000);
|
||||||
try {
|
try {
|
||||||
SSLContext sslContext = Link.initSSLContext(true);
|
SSLContext sslContext = Link.initSSLContext(true);
|
||||||
sslEngine = sslContext.createSSLEngine(ip, _port.value());
|
sslEngine = sslContext.createSSLEngine(ip, Port.value());
|
||||||
sslEngine.setUseClientMode(true);
|
sslEngine.setUseClientMode(true);
|
||||||
|
|
||||||
Link.doHandshake(ch, sslEngine, true);
|
Link.doHandshake(ch, sslEngine, true);
|
||||||
@ -1369,7 +1358,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
}
|
}
|
||||||
|
|
||||||
public boolean isAgentRebalanceEnabled() {
|
public boolean isAgentRebalanceEnabled() {
|
||||||
return _agentLBEnabled.value();
|
return EnableLB.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusteredAgentRebalanceService _rebalanceService;
|
private ClusteredAgentRebalanceService _rebalanceService;
|
||||||
@ -1379,7 +1368,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
Profiler profilerAgentLB = new Profiler();
|
Profiler profilerAgentLB = new Profiler();
|
||||||
profilerAgentLB.start();
|
profilerAgentLB.start();
|
||||||
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
||||||
if (_agentLBEnabled.value() && !_agentLbHappened) {
|
if (EnableLB.value() && !_agentLbHappened) {
|
||||||
SearchCriteriaService<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
|
SearchCriteriaService<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
|
||||||
sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL);
|
sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL);
|
||||||
sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing);
|
sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing);
|
||||||
@ -1392,12 +1381,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||||||
double managedHostsCount = allManagedRoutingAgents.size();
|
double managedHostsCount = allManagedRoutingAgents.size();
|
||||||
if (allHostsCount > 0.0) {
|
if (allHostsCount > 0.0) {
|
||||||
double load = managedHostsCount / allHostsCount;
|
double load = managedHostsCount / allHostsCount;
|
||||||
if (load >= _connectedAgentsThreshold.value()) {
|
if (load >= ConnectedAgentThreshold.value()) {
|
||||||
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + _connectedAgentsThreshold);
|
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value());
|
||||||
_rebalanceService.scheduleRebalanceAgents();
|
_rebalanceService.scheduleRebalanceAgents();
|
||||||
_agentLbHappened = true;
|
_agentLbHappened = true;
|
||||||
} else {
|
} else {
|
||||||
s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + _connectedAgentsThreshold);
|
s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -45,7 +45,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
@ -159,7 +158,6 @@ import com.cloud.utils.Journal;
|
|||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.Ternary;
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -302,28 +300,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
|
|
||||||
ScheduledExecutorService _executor = null;
|
ScheduledExecutorService _executor = null;
|
||||||
|
|
||||||
@InjectConfig(key = AgentManager.WaitCK)
|
|
||||||
protected ConfigValue<Integer> _operationTimeout;
|
|
||||||
|
|
||||||
@InjectConfig(key = "start.retry")
|
|
||||||
protected ConfigValue<Integer> _retry;
|
|
||||||
protected long _nodeId;
|
protected long _nodeId;
|
||||||
|
|
||||||
@InjectConfig(key = "vm.op.cleanup.wait")
|
|
||||||
protected ConfigValue<Long> _cleanupWait;
|
|
||||||
@InjectConfig(key = "vm.op.cleanup.interval")
|
|
||||||
protected ConfigValue<Long> _cleanupInterval;
|
|
||||||
@InjectConfig(key = "vm.op.cancel.interval")
|
|
||||||
protected ConfigValue<Long> _cancelWait;
|
|
||||||
@InjectConfig(key = "vm.op.wait.interval")
|
|
||||||
protected ConfigValue<Integer> _opWaitInterval;
|
|
||||||
@InjectConfig(key = "vm.op.lock.state.retry")
|
|
||||||
protected ConfigValue<Integer> _lockStateRetry;
|
|
||||||
@InjectConfig(key = "vm.destroy.forcestop")
|
|
||||||
protected ConfigValue<Boolean> _forceStop;
|
|
||||||
@InjectConfig(key = "sync.interval")
|
|
||||||
protected ConfigValue<Integer> _syncInterval;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void registerGuru(VirtualMachine.Type type, VirtualMachineGuru guru) {
|
public void registerGuru(VirtualMachine.Type type, VirtualMachineGuru guru) {
|
||||||
synchronized (_vmGurus) {
|
synchronized (_vmGurus) {
|
||||||
@ -494,7 +472,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean start() {
|
public boolean start() {
|
||||||
_executor.scheduleAtFixedRate(new CleanupTask(), _cleanupInterval.value(), _cleanupInterval.value(), TimeUnit.SECONDS);
|
_executor.scheduleAtFixedRate(new CleanupTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS);
|
||||||
cancelWorkItems(_nodeId);
|
cancelWorkItems(_nodeId);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -556,13 +534,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vo.getSecondsTaskIsInactive() > _cancelWait.value()) {
|
if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) {
|
||||||
s_logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive());
|
s_logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Thread.sleep(_opWaitInterval.value());
|
Thread.sleep(VmOpWaitInterval.value());
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
s_logger.info("Waiting for " + vm + " but is interrupted");
|
s_logger.info("Waiting for " + vm + " but is interrupted");
|
||||||
throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted");
|
throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted");
|
||||||
@ -578,7 +556,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
long vmId = vm.getId();
|
long vmId = vm.getId();
|
||||||
|
|
||||||
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Starting, vm.getType(), vm.getId());
|
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Starting, vm.getType(), vm.getId());
|
||||||
int retry = _lockStateRetry.value();
|
int retry = VmOpLockStateRetry.value();
|
||||||
while (retry-- != 0) {
|
while (retry-- != 0) {
|
||||||
Transaction txn = Transaction.currentTxn();
|
Transaction txn = Transaction.currentTxn();
|
||||||
Ternary<VMInstanceVO, ReservationContext, ItWorkVO> result = null;
|
Ternary<VMInstanceVO, ReservationContext, ItWorkVO> result = null;
|
||||||
@ -729,7 +707,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
boolean reuseVolume = true;
|
boolean reuseVolume = true;
|
||||||
DataCenterDeployment originalPlan = plan;
|
DataCenterDeployment originalPlan = plan;
|
||||||
|
|
||||||
int retry = _retry.value();
|
int retry = StartRetry.value();
|
||||||
while (retry-- != 0) { // It's != so that it can match -1.
|
while (retry-- != 0) { // It's != so that it can match -1.
|
||||||
|
|
||||||
if (reuseVolume) {
|
if (reuseVolume) {
|
||||||
@ -1329,7 +1307,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
s_logger.debug("Destroying vm " + vm);
|
s_logger.debug("Destroying vm " + vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
advanceStop(vm, _forceStop.value());
|
advanceStop(vm, VmDestroyForcestop.value());
|
||||||
|
|
||||||
if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) {
|
if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) {
|
||||||
s_logger.debug("Unable to delete all snapshots for " + vm);
|
s_logger.debug("Unable to delete all snapshots for " + vm);
|
||||||
@ -1865,7 +1843,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
public void run() {
|
public void run() {
|
||||||
s_logger.trace("VM Operation Thread Running");
|
s_logger.trace("VM Operation Thread Running");
|
||||||
try {
|
try {
|
||||||
_workDao.cleanup(_cleanupWait.value());
|
_workDao.cleanup(VmOpCleanupWait.value());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.error("VM Operations failed due to ", e);
|
s_logger.error("VM Operations failed due to ", e);
|
||||||
}
|
}
|
||||||
@ -2575,7 +2553,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initiate the cron job
|
// initiate the cron job
|
||||||
ClusterSyncCommand syncCmd = new ClusterSyncCommand(_syncInterval.value(), clusterId);
|
ClusterSyncCommand syncCmd = new ClusterSyncCommand(ClusterDeltaSyncInterval.value(), clusterId);
|
||||||
try {
|
try {
|
||||||
long seq_no = _agentMgr.send(agentId, new Commands(syncCmd), this);
|
long seq_no = _agentMgr.send(agentId, new Commands(syncCmd), this);
|
||||||
s_logger.debug("Cluster VM sync started with jobid " + seq_no);
|
s_logger.debug("Cluster VM sync started with jobid " + seq_no);
|
||||||
@ -2628,7 +2606,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
lock.addRef();
|
lock.addRef();
|
||||||
List<VMInstanceVO> instances = _vmDao.findVMInTransition(new Date(new Date().getTime() - (_operationTimeout.value() * 1000)), State.Starting, State.Stopping);
|
List<VMInstanceVO> instances = _vmDao.findVMInTransition(new Date(new Date().getTime() - (AgentManager.Wait.value() * 1000)), State.Starting, State.Stopping);
|
||||||
for (VMInstanceVO instance : instances) {
|
for (VMInstanceVO instance : instances) {
|
||||||
State state = instance.getState();
|
State state = instance.getState();
|
||||||
if (state == State.Stopping) {
|
if (state == State.Stopping) {
|
||||||
@ -3229,11 +3207,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = CapacityManager.CpuOverprovisioningFactorCK)
|
|
||||||
ConfigValue<Float> _cpuOverprovisioningFactor;
|
|
||||||
@InjectConfig(key = CapacityManager.MemOverprovisioningFactorCK)
|
|
||||||
ConfigValue<Float> _memOverprovisioningFactor;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public VMInstanceVO reConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) throws ResourceUnavailableException,
|
public VMInstanceVO reConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) throws ResourceUnavailableException,
|
||||||
ConcurrentOperationException {
|
ConcurrentOperationException {
|
||||||
@ -3243,8 +3216,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||||||
ServiceOffering newServiceOffering = _entityMgr.findById(ServiceOffering.class, newServiceofferingId);
|
ServiceOffering newServiceOffering = _entityMgr.findById(ServiceOffering.class, newServiceofferingId);
|
||||||
HostVO hostVo = _hostDao.findById(vm.getHostId());
|
HostVO hostVo = _hostDao.findById(vm.getHostId());
|
||||||
|
|
||||||
Float memoryOvercommitRatio = _memOverprovisioningFactor.valueIn(hostVo.getClusterId());
|
Float memoryOvercommitRatio = CapacityManager.MemOverprovisioningFactor.valueIn(hostVo.getClusterId());
|
||||||
Float cpuOvercommitRatio = _cpuOverprovisioningFactor.valueIn(hostVo.getClusterId());
|
Float cpuOvercommitRatio = CapacityManager.CpuOverprovisioningFactor.valueIn(hostVo.getClusterId());
|
||||||
long minMemory = (long)(newServiceOffering.getRamSize() / memoryOvercommitRatio);
|
long minMemory = (long)(newServiceOffering.getRamSize() / memoryOvercommitRatio);
|
||||||
ScaleVmCommand reconfigureCmd = new ScaleVmCommand(vm.getInstanceName(), newServiceOffering.getCpu(), (int)(newServiceOffering.getSpeed() / cpuOvercommitRatio),
|
ScaleVmCommand reconfigureCmd = new ScaleVmCommand(vm.getInstanceName(), newServiceOffering.getCpu(), (int)(newServiceOffering.getSpeed() / cpuOvercommitRatio),
|
||||||
newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse());
|
newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse());
|
||||||
|
|||||||
@ -44,7 +44,6 @@ import org.apache.cloudstack.context.ServerContexts;
|
|||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.region.PortableIpDao;
|
import org.apache.cloudstack.region.PortableIpDao;
|
||||||
@ -342,20 +341,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
SearchBuilder<IPAddressVO> AssignIpAddressSearch;
|
SearchBuilder<IPAddressVO> AssignIpAddressSearch;
|
||||||
SearchBuilder<IPAddressVO> AssignIpAddressFromPodVlanSearch;
|
SearchBuilder<IPAddressVO> AssignIpAddressFromPodVlanSearch;
|
||||||
|
|
||||||
ConfigValue<Integer> _networkGcWait;
|
|
||||||
ConfigValue<Integer> _networkGcInterval;
|
|
||||||
ConfigValue<Integer> _networkLockTimeout;
|
|
||||||
ConfigValue<String> _domainSuffix;
|
|
||||||
|
|
||||||
HashMap<Long, Long> _lastNetworkIdsToFree = new HashMap<Long, Long>();
|
HashMap<Long, Long> _lastNetworkIdsToFree = new HashMap<Long, Long>();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||||
_networkGcWait = _configDepot.get(NetworkGcWait);
|
|
||||||
_networkGcInterval = _configDepot.get(NetworkGcInterval);
|
|
||||||
_networkLockTimeout = _configDepot.get(NetworkLockTimeout);
|
|
||||||
|
|
||||||
// populate providers
|
// populate providers
|
||||||
Map<Network.Service, Set<Network.Provider>> defaultSharedNetworkOfferingProviders = new HashMap<Network.Service, Set<Network.Provider>>();
|
Map<Network.Service, Set<Network.Provider>> defaultSharedNetworkOfferingProviders = new HashMap<Network.Service, Set<Network.Provider>>();
|
||||||
Set<Network.Provider> defaultProviders = new HashSet<Network.Provider>();
|
Set<Network.Provider> defaultProviders = new HashSet<Network.Provider>();
|
||||||
@ -580,7 +570,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean start() {
|
public boolean start() {
|
||||||
_executor.scheduleWithFixedDelay(new NetworkGarbageCollector(), _networkGcInterval.value(), _networkGcInterval.value(), TimeUnit.SECONDS);
|
_executor.scheduleWithFixedDelay(new NetworkGarbageCollector(), NetworkGcInterval.value(), NetworkGcInterval.value(), TimeUnit.SECONDS);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -920,7 +910,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Acquire lock only when network needs to be implemented
|
// Acquire lock only when network needs to be implemented
|
||||||
network = _networksDao.acquireInLockTable(networkId, _networkLockTimeout.value());
|
network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value());
|
||||||
if (network == null) {
|
if (network == null) {
|
||||||
// see NetworkVO.java
|
// see NetworkVO.java
|
||||||
ConcurrentOperationException ex = new ConcurrentOperationException("Unable to acquire network configuration");
|
ConcurrentOperationException ex = new ConcurrentOperationException("Unable to acquire network configuration");
|
||||||
@ -1783,7 +1773,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
|
|
||||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||||
if (networkDomain == null) {
|
if (networkDomain == null) {
|
||||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + _domainSuffix.valueIn(zoneId);
|
networkDomain = "cs" + Long.toHexString(owner.getId()) + GuestDomainSuffix.valueIn(zoneId);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -1908,7 +1898,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
//do global lock for the network
|
//do global lock for the network
|
||||||
network = _networksDao.acquireInLockTable(networkId, _networkLockTimeout.value());
|
network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value());
|
||||||
if (network == null) {
|
if (network == null) {
|
||||||
s_logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown");
|
s_logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown");
|
||||||
return false;
|
return false;
|
||||||
@ -2235,7 +2225,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||||||
s_logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime);
|
s_logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime);
|
||||||
}
|
}
|
||||||
stillFree.put(networkId, currentTime);
|
stillFree.put(networkId, currentTime);
|
||||||
} else if (time > (currentTime - _networkGcWait.value())) {
|
} else if (time > (currentTime - NetworkGcWait.value())) {
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time);
|
s_logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -47,7 +47,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeAp
|
|||||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.storage.command.CommandResult;
|
import org.apache.cloudstack.storage.command.CommandResult;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
@ -140,8 +139,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
ConfigDepot _configDepot;
|
ConfigDepot _configDepot;
|
||||||
|
|
||||||
private final StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine;
|
private final StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine;
|
||||||
private ConfigValue<Long> _maxVolumeSizeInGb;
|
|
||||||
private ConfigValue<Boolean> _recreateSystemVmEnabled;
|
|
||||||
protected List<StoragePoolAllocator> _storagePoolAllocators;
|
protected List<StoragePoolAllocator> _storagePoolAllocators;
|
||||||
|
|
||||||
public List<StoragePoolAllocator> getStoragePoolAllocators() {
|
public List<StoragePoolAllocator> getStoragePoolAllocators() {
|
||||||
@ -489,8 +486,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
public boolean validateVolumeSizeRange(long size) {
|
public boolean validateVolumeSizeRange(long size) {
|
||||||
if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) {
|
if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) {
|
||||||
throw new InvalidParameterValueException("Please specify a size of at least 1 Gb.");
|
throw new InvalidParameterValueException("Please specify a size of at least 1 Gb.");
|
||||||
} else if (size > (_maxVolumeSizeInGb.value() * 1024 * 1024 * 1024)) {
|
} else if (size > (MaxVolumeSize.value() * 1024 * 1024 * 1024)) {
|
||||||
throw new InvalidParameterValueException("volume size " + size + ", but the maximum size allowed is " + _maxVolumeSizeInGb + " Gb.");
|
throw new InvalidParameterValueException("volume size " + size + ", but the maximum size allowed is " + MaxVolumeSize + " Gb.");
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -909,7 +906,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
}
|
}
|
||||||
|
|
||||||
private List<VolumeTask> getTasks(List<VolumeVO> vols, Map<Volume, StoragePool> destVols) throws StorageUnavailableException {
|
private List<VolumeTask> getTasks(List<VolumeVO> vols, Map<Volume, StoragePool> destVols) throws StorageUnavailableException {
|
||||||
boolean recreate = _recreateSystemVmEnabled.value();
|
boolean recreate = RecreatableSystemVmEnabled.value();
|
||||||
List<VolumeTask> tasks = new ArrayList<VolumeTask>();
|
List<VolumeTask> tasks = new ArrayList<VolumeTask>();
|
||||||
for (VolumeVO vol : vols) {
|
for (VolumeVO vol : vols) {
|
||||||
StoragePoolVO assignedPool = null;
|
StoragePoolVO assignedPool = null;
|
||||||
@ -981,7 +978,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
|
|
||||||
private Pair<VolumeVO, DataStore> recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException {
|
private Pair<VolumeVO, DataStore> recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException {
|
||||||
VolumeVO newVol;
|
VolumeVO newVol;
|
||||||
boolean recreate = _recreateSystemVmEnabled.value();
|
boolean recreate = RecreatableSystemVmEnabled.value();
|
||||||
DataStore destPool = null;
|
DataStore destPool = null;
|
||||||
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
|
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
|
||||||
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
|
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
|
||||||
@ -1120,10 +1117,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
_maxVolumeSizeInGb = _configDepot.get(MaxVolumeSize);
|
|
||||||
|
|
||||||
_recreateSystemVmEnabled = _configDepot.get(RecreatableSystemVmEnabled);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -41,7 +41,6 @@ import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd;
|
|||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
@ -268,14 +267,7 @@ public class VirtualMachineManagerImplTest {
|
|||||||
doReturn(hostVO).when(_hostDao).findById(1L);
|
doReturn(hostVO).when(_hostDao).findById(1L);
|
||||||
doReturn(1L).when(_vmInstance).getDataCenterId();
|
doReturn(1L).when(_vmInstance).getDataCenterId();
|
||||||
doReturn(1L).when(hostVO).getClusterId();
|
doReturn(1L).when(hostVO).getClusterId();
|
||||||
@SuppressWarnings("unchecked")
|
when(CapacityManager.CpuOverprovisioningFactor.valueIn(1L)).thenReturn(1.0f);
|
||||||
ConfigValue<Float> memOverprovisioningFactor = mock(ConfigValue.class);
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
ConfigValue<Float> cpuOverprovisioningFactor = mock(ConfigValue.class);
|
|
||||||
when(_configDepot.get(CapacityManager.MemOverprovisioningFactor)).thenReturn(memOverprovisioningFactor);
|
|
||||||
when(memOverprovisioningFactor.valueIn(1L)).thenReturn(1.0f);
|
|
||||||
when(_configDepot.get(CapacityManager.CpuOverprovisioningFactor)).thenReturn(cpuOverprovisioningFactor);
|
|
||||||
when(cpuOverprovisioningFactor.valueIn(1L)).thenReturn(1.0f);
|
|
||||||
ScaleVmCommand reconfigureCmd = new ScaleVmCommand("myVmName", newServiceOffering.getCpu(),
|
ScaleVmCommand reconfigureCmd = new ScaleVmCommand("myVmName", newServiceOffering.getCpu(),
|
||||||
newServiceOffering.getSpeed(), newServiceOffering.getSpeed(), newServiceOffering.getRamSize(), newServiceOffering.getRamSize(),
|
newServiceOffering.getSpeed(), newServiceOffering.getSpeed(), newServiceOffering.getRamSize(), newServiceOffering.getRamSize(),
|
||||||
newServiceOffering.getLimitCpuUse());
|
newServiceOffering.getLimitCpuUse());
|
||||||
|
|||||||
@ -18,8 +18,50 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.test;
|
package org.apache.cloudstack.storage.test;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.springframework.test.context.ContextConfiguration;
|
||||||
|
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
|
<<<<<<< HEAD
|
||||||
import com.cloud.server.LockMasterListener;
|
import com.cloud.server.LockMasterListener;
|
||||||
|
=======
|
||||||
|
>>>>>>> Made changes to configuration. Eliminated ConfigValue and only use ConfigKey
|
||||||
import com.cloud.dc.ClusterVO;
|
import com.cloud.dc.ClusterVO;
|
||||||
import com.cloud.dc.DataCenter;
|
import com.cloud.dc.DataCenter;
|
||||||
import com.cloud.dc.DataCenterVO;
|
import com.cloud.dc.DataCenterVO;
|
||||||
@ -49,43 +91,6 @@ import com.cloud.user.AccountManager;
|
|||||||
import com.cloud.user.User;
|
import com.cloud.user.User;
|
||||||
import com.cloud.utils.component.ComponentContext;
|
import com.cloud.utils.component.ComponentContext;
|
||||||
import com.cloud.utils.db.Merovingian2;
|
import com.cloud.utils.db.Merovingian2;
|
||||||
import junit.framework.Assert;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
import org.springframework.test.context.ContextConfiguration;
|
|
||||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
@RunWith(SpringJUnit4ClassRunner.class)
|
@RunWith(SpringJUnit4ClassRunner.class)
|
||||||
@ContextConfiguration(locations = { "classpath:/fakeDriverTestContext.xml" })
|
@ContextConfiguration(locations = { "classpath:/fakeDriverTestContext.xml" })
|
||||||
@ -120,7 +125,6 @@ public class EndpointSelectorTest {
|
|||||||
ImageStoreVO imageStore;
|
ImageStoreVO imageStore;
|
||||||
@Inject
|
@Inject
|
||||||
AccountManager accountManager;
|
AccountManager accountManager;
|
||||||
LockMasterListener lockMasterListener;
|
|
||||||
VolumeInfo vol = null;
|
VolumeInfo vol = null;
|
||||||
FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver();
|
FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver();
|
||||||
@Inject
|
@Inject
|
||||||
|
|||||||
@ -16,11 +16,17 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.cluster;
|
package com.cloud.cluster;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
|
||||||
import com.cloud.utils.component.Manager;
|
import com.cloud.utils.component.Manager;
|
||||||
|
|
||||||
public interface ClusterManager extends Manager {
|
public interface ClusterManager extends Manager {
|
||||||
static final String ALERT_SUBJECT = "cluster-alert";
|
static final String ALERT_SUBJECT = "cluster-alert";
|
||||||
|
final ConfigKey<Integer> HeartbeatInterval = new ConfigKey<Integer>(Integer.class, "cluster.heartbeat.interval", "management-server", "1500",
|
||||||
|
"Interval to check for the heart beat between management server nodes", false);
|
||||||
|
final ConfigKey<Integer> HeartbeatThreshold = new ConfigKey<Integer>(Integer.class, "cluster.heartbeat.threshold", "management-server", "150000",
|
||||||
|
"Threshold before self-fence the management server", true);
|
||||||
|
|
||||||
void OnReceiveClusterServicePdu(ClusterServicePdu pdu);
|
void OnReceiveClusterServicePdu(ClusterServicePdu pdu);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -40,8 +46,6 @@ public interface ClusterManager extends Manager {
|
|||||||
*/
|
*/
|
||||||
void broadcast(long agentId, String cmds);
|
void broadcast(long agentId, String cmds);
|
||||||
|
|
||||||
int getHeartbeatThreshold();
|
|
||||||
|
|
||||||
void registerListener(ClusterManagerListener listener);
|
void registerListener(ClusterManagerListener listener);
|
||||||
void unregisterListener(ClusterManagerListener listener);
|
void unregisterListener(ClusterManagerListener listener);
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,6 @@ import org.apache.log4j.Logger;
|
|||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||||
|
|
||||||
@ -77,8 +76,6 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
|
|
||||||
private final List<ClusterManagerListener> _listeners = new ArrayList<ClusterManagerListener>();
|
private final List<ClusterManagerListener> _listeners = new ArrayList<ClusterManagerListener>();
|
||||||
private final Map<Long, ManagementServerHostVO> _activePeers = new HashMap<Long, ManagementServerHostVO>();
|
private final Map<Long, ManagementServerHostVO> _activePeers = new HashMap<Long, ManagementServerHostVO>();
|
||||||
private ConfigValue<Integer> _heartbeatInterval;
|
|
||||||
private ConfigValue<Integer> _heartbeatThreshold;
|
|
||||||
|
|
||||||
private final Map<String, ClusterService> _clusterPeers;
|
private final Map<String, ClusterService> _clusterPeers;
|
||||||
|
|
||||||
@ -354,7 +351,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
public void broadcast(long agentId, String cmds) {
|
public void broadcast(long agentId, String cmds) {
|
||||||
Date cutTime = DateUtil.currentGMTTime();
|
Date cutTime = DateUtil.currentGMTTime();
|
||||||
|
|
||||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold.value()));
|
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - HeartbeatThreshold.value()));
|
||||||
for (ManagementServerHostVO peer : peers) {
|
for (ManagementServerHostVO peer : peers) {
|
||||||
String peerName = Long.toString(peer.getMsid());
|
String peerName = Long.toString(peer.getMsid());
|
||||||
if (getSelfPeerName().equals(peerName)) {
|
if (getSelfPeerName().equals(peerName)) {
|
||||||
@ -534,7 +531,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
return new Runnable() {
|
return new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
Transaction txn = Transaction.open("ClusterHeartBeat");
|
Transaction txn = Transaction.open("ClusterHeartbeat");
|
||||||
try {
|
try {
|
||||||
Profiler profiler = new Profiler();
|
Profiler profiler = new Profiler();
|
||||||
Profiler profilerHeartbeatUpdate = new Profiler();
|
Profiler profilerHeartbeatUpdate = new Profiler();
|
||||||
@ -568,7 +565,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
} finally {
|
} finally {
|
||||||
profiler.stop();
|
profiler.stop();
|
||||||
|
|
||||||
if (profiler.getDuration() >= _heartbeatInterval.value()) {
|
if (profiler.getDuration() >= HeartbeatInterval.value()) {
|
||||||
if(s_logger.isDebugEnabled())
|
if(s_logger.isDebugEnabled())
|
||||||
s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() +
|
s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() +
|
||||||
", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() +
|
", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() +
|
||||||
@ -602,7 +599,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
invalidHeartbeatConnection();
|
invalidHeartbeatConnection();
|
||||||
} finally {
|
} finally {
|
||||||
txn.transitToAutoManagedConnection(Transaction.CLOUD_DB);
|
txn.transitToAutoManagedConnection(Transaction.CLOUD_DB);
|
||||||
txn.close("ClusterHeartBeat");
|
txn.close("ClusterHeartbeat");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -623,7 +620,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
private Connection getHeartbeatConnection() throws SQLException {
|
private Connection getHeartbeatConnection() throws SQLException {
|
||||||
if(_heartbeatConnection == null) {
|
if(_heartbeatConnection == null) {
|
||||||
Connection conn = Transaction.getStandaloneConnectionWithException();
|
Connection conn = Transaction.getStandaloneConnectionWithException();
|
||||||
_heartbeatConnection = new ConnectionConcierge("ClusterManagerHeartBeat", conn, false);
|
_heartbeatConnection = new ConnectionConcierge("ClusterManagerHeartbeat", conn, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _heartbeatConnection.conn();
|
return _heartbeatConnection.conn();
|
||||||
@ -759,7 +756,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
// upon startup, for all inactive management server nodes that we see at startup time, we will send notification also to help upper layer perform
|
// upon startup, for all inactive management server nodes that we see at startup time, we will send notification also to help upper layer perform
|
||||||
// missed cleanup
|
// missed cleanup
|
||||||
Date cutTime = DateUtil.currentGMTTime();
|
Date cutTime = DateUtil.currentGMTTime();
|
||||||
List<ManagementServerHostVO> inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - _heartbeatThreshold.value()));
|
List<ManagementServerHostVO> inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - HeartbeatThreshold.value()));
|
||||||
|
|
||||||
// We don't have foreign key constraints to enforce the mgmt_server_id integrity in host table, when user manually
|
// We don't have foreign key constraints to enforce the mgmt_server_id integrity in host table, when user manually
|
||||||
// remove records from mshost table, this will leave orphan mgmt_serve_id reference in host table.
|
// remove records from mshost table, this will leave orphan mgmt_serve_id reference in host table.
|
||||||
@ -804,7 +801,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
|
|
||||||
Profiler profilerQueryActiveList = new Profiler();
|
Profiler profilerQueryActiveList = new Profiler();
|
||||||
profilerQueryActiveList.start();
|
profilerQueryActiveList.start();
|
||||||
List<ManagementServerHostVO> currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold.value()));
|
List<ManagementServerHostVO> currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - HeartbeatThreshold.value()));
|
||||||
profilerQueryActiveList.stop();
|
profilerQueryActiveList.stop();
|
||||||
|
|
||||||
Profiler profilerSyncClusterInfo = new Profiler();
|
Profiler profilerSyncClusterInfo = new Profiler();
|
||||||
@ -919,7 +916,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
|
|
||||||
profiler.stop();
|
profiler.stop();
|
||||||
|
|
||||||
if (profiler.getDuration() >= _heartbeatInterval.value()) {
|
if (profiler.getDuration() >= HeartbeatInterval.value()) {
|
||||||
if(s_logger.isDebugEnabled())
|
if(s_logger.isDebugEnabled())
|
||||||
s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString()
|
s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString()
|
||||||
+ ", profilerQueryActiveList: " + profilerQueryActiveList.toString()
|
+ ", profilerQueryActiveList: " + profilerQueryActiveList.toString()
|
||||||
@ -987,7 +984,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
_mshostPeerDao.clearPeerInfo(_mshostId);
|
_mshostPeerDao.clearPeerInfo(_mshostId);
|
||||||
|
|
||||||
// use seperate thread for heartbeat updates
|
// use seperate thread for heartbeat updates
|
||||||
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), _heartbeatInterval.value(), _heartbeatInterval.value(), TimeUnit.MILLISECONDS);
|
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS);
|
||||||
_notificationExecutor.submit(getNotificationTask());
|
_notificationExecutor.submit(getNotificationTask());
|
||||||
|
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
@ -1028,20 +1025,12 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected final ConfigKey<Integer> HeartBeatInterval = new ConfigKey<Integer>(Integer.class, "cluster.heartbeat.interval", "management-server",
|
|
||||||
"1500", "Interval to check for the heart beat between management server nodes", false);
|
|
||||||
protected final ConfigKey<Integer> HeartBeatThreshold = new ConfigKey<Integer>(Integer.class, "cluster.heartbeat.threshold", "management-server",
|
|
||||||
"150000", "Threshold before self-fence the management server", true);
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
if(s_logger.isInfoEnabled()) {
|
if(s_logger.isInfoEnabled()) {
|
||||||
s_logger.info("Start configuring cluster manager : " + name);
|
s_logger.info("Start configuring cluster manager : " + name);
|
||||||
}
|
}
|
||||||
|
|
||||||
_heartbeatInterval = _configDepot.get(HeartBeatInterval);
|
|
||||||
_heartbeatThreshold = _configDepot.get(HeartBeatThreshold);
|
|
||||||
|
|
||||||
File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
||||||
Properties dbProps = new Properties();
|
Properties dbProps = new Properties();
|
||||||
try {
|
try {
|
||||||
@ -1095,7 +1084,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
public boolean isManagementNodeAlive(long msid) {
|
public boolean isManagementNodeAlive(long msid) {
|
||||||
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
|
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
|
||||||
if(mshost != null) {
|
if(mshost != null) {
|
||||||
if (mshost.getLastUpdateTime().getTime() >= DateUtil.currentGMTTime().getTime() - _heartbeatThreshold.value()) {
|
if (mshost.getLastUpdateTime().getTime() >= DateUtil.currentGMTTime().getTime() - HeartbeatThreshold.value()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1119,7 +1108,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ConfigKey<?>[] getConfigKeys() {
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
return new ConfigKey<?>[] {HeartBeatInterval, HeartBeatThreshold};
|
return new ConfigKey<?>[] {HeartbeatInterval, HeartbeatThreshold};
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean pingManagementNode(ManagementServerHostVO mshost) {
|
private boolean pingManagementNode(ManagementServerHostVO mshost) {
|
||||||
@ -1167,18 +1156,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getHeartbeatThreshold() {
|
|
||||||
return _heartbeatThreshold.value();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getHeartbeatInterval() {
|
public int getHeartbeatInterval() {
|
||||||
return _heartbeatInterval.value();
|
return HeartbeatInterval.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkConflicts() throws ConfigurationException {
|
private void checkConflicts() throws ConfigurationException {
|
||||||
Date cutTime = DateUtil.currentGMTTime();
|
Date cutTime = DateUtil.currentGMTTime();
|
||||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold.value()));
|
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - HeartbeatThreshold.value()));
|
||||||
for(ManagementServerHostVO peer : peers) {
|
for(ManagementServerHostVO peer : peers) {
|
||||||
String peerIP = peer.getServiceIP().trim();
|
String peerIP = peer.getServiceIP().trim();
|
||||||
if(_clusterNodeIP.equals(peerIP)) {
|
if(_clusterNodeIP.equals(peerIP)) {
|
||||||
|
|||||||
@ -24,44 +24,44 @@ import javax.management.StandardMBean;
|
|||||||
import com.cloud.utils.DateUtil;
|
import com.cloud.utils.DateUtil;
|
||||||
|
|
||||||
public class ClusterManagerMBeanImpl extends StandardMBean implements ClusterManagerMBean {
|
public class ClusterManagerMBeanImpl extends StandardMBean implements ClusterManagerMBean {
|
||||||
private final ClusterManagerImpl _clusterMgr;
|
private final ClusterManagerImpl _clusterMgr;
|
||||||
private final ManagementServerHostVO _mshostVo;
|
private final ManagementServerHostVO _mshostVo;
|
||||||
|
|
||||||
public ClusterManagerMBeanImpl(ClusterManagerImpl clusterMgr, ManagementServerHostVO mshostVo) {
|
public ClusterManagerMBeanImpl(ClusterManagerImpl clusterMgr, ManagementServerHostVO mshostVo) {
|
||||||
super(ClusterManagerMBean.class, false);
|
super(ClusterManagerMBean.class, false);
|
||||||
|
|
||||||
_clusterMgr = clusterMgr;
|
_clusterMgr = clusterMgr;
|
||||||
_mshostVo = mshostVo;
|
_mshostVo = mshostVo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getMsid() {
|
public long getMsid() {
|
||||||
return _mshostVo.getMsid();
|
return _mshostVo.getMsid();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getLastUpdateTime() {
|
public String getLastUpdateTime() {
|
||||||
Date date = _mshostVo.getLastUpdateTime();
|
Date date = _mshostVo.getLastUpdateTime();
|
||||||
return DateUtil.getDateDisplayString(TimeZone.getDefault(), date);
|
return DateUtil.getDateDisplayString(TimeZone.getDefault(), date);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getClusterNodeIP() {
|
public String getClusterNodeIP() {
|
||||||
return _mshostVo.getServiceIP();
|
return _mshostVo.getServiceIP();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getVersion() {
|
public String getVersion() {
|
||||||
return _mshostVo.getVersion();
|
return _mshostVo.getVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getHeartbeatInterval() {
|
public int getHeartbeatInterval() {
|
||||||
return _clusterMgr.getHeartbeatInterval();
|
return _clusterMgr.getHeartbeatInterval();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getHeartbeatThreshold() {
|
public int getHeartbeatThreshold() {
|
||||||
return _clusterMgr.getHeartbeatThreshold();
|
return ClusterManager.HeartbeatThreshold.value();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,10 +18,14 @@ package com.cloud.cluster;
|
|||||||
|
|
||||||
import java.rmi.RemoteException;
|
import java.rmi.RemoteException;
|
||||||
|
|
||||||
import com.cloud.cluster.ClusterService;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
|
|
||||||
import com.cloud.utils.component.Adapter;
|
import com.cloud.utils.component.Adapter;
|
||||||
|
|
||||||
public interface ClusterServiceAdapter extends Adapter {
|
public interface ClusterServiceAdapter extends Adapter {
|
||||||
|
final ConfigKey<Integer> ClusterMessageTimeOut = new ConfigKey<Integer>(Integer.class, "cluster.message.timeout.seconds", "Advance", "300",
|
||||||
|
"Time (in seconds) to wait before a inter-management server message post times out.", true);
|
||||||
|
|
||||||
public ClusterService getPeerService(String strPeer) throws RemoteException;
|
public ClusterService getPeerService(String strPeer) throws RemoteException;
|
||||||
public String getServiceEndpointName(String strPeer);
|
public String getServiceEndpointName(String strPeer);
|
||||||
public int getServicePort();
|
public int getServicePort();
|
||||||
|
|||||||
@ -24,129 +24,117 @@ import java.rmi.RemoteException;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
|
||||||
import javax.ejb.Local;
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
|
|
||||||
import com.cloud.cluster.dao.ManagementServerHostDao;
|
import com.cloud.cluster.dao.ManagementServerHostDao;
|
||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.PropertiesUtil;
|
import com.cloud.utils.PropertiesUtil;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
|
|
||||||
@Component
|
|
||||||
@Local(value={ClusterServiceAdapter.class})
|
|
||||||
public class ClusterServiceServletAdapter extends AdapterBase implements ClusterServiceAdapter {
|
public class ClusterServiceServletAdapter extends AdapterBase implements ClusterServiceAdapter {
|
||||||
|
|
||||||
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class);
|
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class);
|
||||||
private static final int DEFAULT_SERVICE_PORT = 9090;
|
private static final int DEFAULT_SERVICE_PORT = 9090;
|
||||||
private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds
|
private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds
|
||||||
|
|
||||||
@Inject private ClusterManager _manager;
|
@Inject
|
||||||
|
private ClusterManager _manager;
|
||||||
@Inject private ManagementServerHostDao _mshostDao;
|
|
||||||
|
@Inject
|
||||||
|
private ManagementServerHostDao _mshostDao;
|
||||||
@Inject
|
@Inject
|
||||||
protected ConfigDepot _configDepot;
|
protected ConfigDepot _configDepot;
|
||||||
|
|
||||||
private ClusterServiceServletContainer _servletContainer;
|
private ClusterServiceServletContainer _servletContainer;
|
||||||
|
|
||||||
private int _clusterServicePort = DEFAULT_SERVICE_PORT;
|
private int _clusterServicePort = DEFAULT_SERVICE_PORT;
|
||||||
|
|
||||||
private ConfigValue<Integer> _clusterRequestTimeoutSeconds;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterService getPeerService(String strPeer) throws RemoteException {
|
public ClusterService getPeerService(String strPeer) throws RemoteException {
|
||||||
try {
|
try {
|
||||||
init();
|
init();
|
||||||
} catch (ConfigurationException e) {
|
} catch (ConfigurationException e) {
|
||||||
s_logger.error("Unable to init ClusterServiceServletAdapter");
|
s_logger.error("Unable to init ClusterServiceServletAdapter");
|
||||||
throw new RemoteException("Unable to init ClusterServiceServletAdapter");
|
throw new RemoteException("Unable to init ClusterServiceServletAdapter");
|
||||||
}
|
}
|
||||||
|
|
||||||
String serviceUrl = getServiceEndpointName(strPeer);
|
String serviceUrl = getServiceEndpointName(strPeer);
|
||||||
if(serviceUrl == null)
|
if (serviceUrl == null)
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
return new ClusterServiceServletImpl(serviceUrl, _clusterRequestTimeoutSeconds);
|
return new ClusterServiceServletImpl(serviceUrl);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getServiceEndpointName(String strPeer) {
|
|
||||||
try {
|
|
||||||
init();
|
|
||||||
} catch (ConfigurationException e) {
|
|
||||||
s_logger.error("Unable to init ClusterServiceServletAdapter");
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
long msid = Long.parseLong(strPeer);
|
|
||||||
|
|
||||||
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
|
|
||||||
if(mshost == null)
|
|
||||||
return null;
|
|
||||||
|
|
||||||
return composeEndpointName(mshost.getServiceIP(), mshost.getServicePort());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getServicePort() {
|
public String getServiceEndpointName(String strPeer) {
|
||||||
return _clusterServicePort;
|
try {
|
||||||
|
init();
|
||||||
|
} catch (ConfigurationException e) {
|
||||||
|
s_logger.error("Unable to init ClusterServiceServletAdapter");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
long msid = Long.parseLong(strPeer);
|
||||||
|
|
||||||
|
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
|
||||||
|
if (mshost == null)
|
||||||
|
return null;
|
||||||
|
|
||||||
|
return composeEndpointName(mshost.getServiceIP(), mshost.getServicePort());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getServicePort() {
|
||||||
|
return _clusterServicePort;
|
||||||
|
}
|
||||||
|
|
||||||
private String composeEndpointName(String nodeIP, int port) {
|
private String composeEndpointName(String nodeIP, int port) {
|
||||||
StringBuffer sb = new StringBuffer();
|
StringBuffer sb = new StringBuffer();
|
||||||
sb.append("http://").append(nodeIP).append(":").append(port).append("/clusterservice");
|
sb.append("http://").append(nodeIP).append(":").append(port).append("/clusterservice");
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
init();
|
init();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean start() {
|
public boolean start() {
|
||||||
_servletContainer = new ClusterServiceServletContainer();
|
_servletContainer = new ClusterServiceServletContainer();
|
||||||
_servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _clusterServicePort);
|
_servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _clusterServicePort);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean stop() {
|
public boolean stop() {
|
||||||
if(_servletContainer != null)
|
if (_servletContainer != null)
|
||||||
_servletContainer.stop();
|
_servletContainer.stop();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private final ConfigKey<Integer> ClusterMessageTimeOut = new ConfigKey<Integer>(Integer.class, "cluster.message.timeout.seconds", "Advance", "300",
|
|
||||||
"Time (in seconds) to wait before a inter-management server message post times out.", true);
|
|
||||||
|
|
||||||
private void init() throws ConfigurationException {
|
private void init() throws ConfigurationException {
|
||||||
if(_mshostDao != null)
|
if (_mshostDao != null)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
_clusterRequestTimeoutSeconds = _configDepot.get(ClusterMessageTimeOut);
|
|
||||||
s_logger.info("Configure cluster request time out. timeout: " + _clusterRequestTimeoutSeconds + " seconds");
|
|
||||||
|
|
||||||
File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
||||||
Properties dbProps = new Properties();
|
Properties dbProps = new Properties();
|
||||||
try {
|
try {
|
||||||
dbProps.load(new FileInputStream(dbPropsFile));
|
dbProps.load(new FileInputStream(dbPropsFile));
|
||||||
} catch (FileNotFoundException e) {
|
} catch (FileNotFoundException e) {
|
||||||
throw new ConfigurationException("Unable to find db.properties");
|
throw new ConfigurationException("Unable to find db.properties");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ConfigurationException("Unable to load db.properties content");
|
throw new ConfigurationException("Unable to load db.properties content");
|
||||||
}
|
}
|
||||||
|
|
||||||
_clusterServicePort = NumbersUtil.parseInt(dbProps.getProperty("cluster.servlet.port"), DEFAULT_SERVICE_PORT);
|
_clusterServicePort = NumbersUtil.parseInt(dbProps.getProperty("cluster.servlet.port"), DEFAULT_SERVICE_PORT);
|
||||||
if(s_logger.isInfoEnabled())
|
if (s_logger.isInfoEnabled())
|
||||||
s_logger.info("Cluster servlet port : " + _clusterServicePort);
|
s_logger.info("Cluster servlet port : " + _clusterServicePort);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -27,27 +27,23 @@ import org.apache.commons.httpclient.methods.PostMethod;
|
|||||||
import org.apache.commons.httpclient.params.HttpClientParams;
|
import org.apache.commons.httpclient.params.HttpClientParams;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
|
|
||||||
public class ClusterServiceServletImpl implements ClusterService {
|
public class ClusterServiceServletImpl implements ClusterService {
|
||||||
private static final long serialVersionUID = 4574025200012566153L;
|
private static final long serialVersionUID = 4574025200012566153L;
|
||||||
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletImpl.class);
|
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletImpl.class);
|
||||||
|
|
||||||
private String _serviceUrl;
|
private String _serviceUrl;
|
||||||
|
|
||||||
private ConfigValue<Integer> _requestTimeoutSeconds;
|
|
||||||
protected static HttpClient s_client = null;
|
protected static HttpClient s_client = null;
|
||||||
|
|
||||||
public ClusterServiceServletImpl() {
|
public ClusterServiceServletImpl() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClusterServiceServletImpl(String serviceUrl, ConfigValue<Integer> requestTimeoutSeconds) {
|
public ClusterServiceServletImpl(String serviceUrl) {
|
||||||
s_logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + requestTimeoutSeconds.value() + " seconds");
|
s_logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + ClusterServiceAdapter.ClusterMessageTimeOut.value() + " seconds");
|
||||||
|
|
||||||
_serviceUrl = serviceUrl;
|
_serviceUrl = serviceUrl;
|
||||||
_requestTimeoutSeconds = requestTimeoutSeconds;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String execute(ClusterServicePdu pdu) throws RemoteException {
|
public String execute(ClusterServicePdu pdu) throws RemoteException {
|
||||||
|
|
||||||
@ -69,7 +65,7 @@ public class ClusterServiceServletImpl implements ClusterService {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean ping(String callingPeer) throws RemoteException {
|
public boolean ping(String callingPeer) throws RemoteException {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Ping at " + _serviceUrl);
|
s_logger.debug("Ping at " + _serviceUrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,9 +74,9 @@ public class ClusterServiceServletImpl implements ClusterService {
|
|||||||
|
|
||||||
method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_PING));
|
method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_PING));
|
||||||
method.addParameter("callingPeer", callingPeer);
|
method.addParameter("callingPeer", callingPeer);
|
||||||
|
|
||||||
String returnVal = executePostMethod(client, method);
|
String returnVal = executePostMethod(client, method);
|
||||||
if("true".equalsIgnoreCase(returnVal)) {
|
if ("true".equalsIgnoreCase(returnVal)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -92,22 +88,20 @@ public class ClusterServiceServletImpl implements ClusterService {
|
|||||||
try {
|
try {
|
||||||
long startTick = System.currentTimeMillis();
|
long startTick = System.currentTimeMillis();
|
||||||
response = client.executeMethod(method);
|
response = client.executeMethod(method);
|
||||||
if(response == HttpStatus.SC_OK) {
|
if (response == HttpStatus.SC_OK) {
|
||||||
result = method.getResponseBodyAsString();
|
result = method.getResponseBodyAsString();
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: "
|
s_logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: " + (System.currentTimeMillis() - startTick) + " ms");
|
||||||
+ (System.currentTimeMillis() - startTick) + " ms");
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s_logger.error("Invalid response code : " + response + ", from : "
|
s_logger.error("Invalid response code : " + response + ", from : " + _serviceUrl + ", method : " + method.getParameter("method") + " responding time: " +
|
||||||
+ _serviceUrl + ", method : " + method.getParameter("method")
|
(System.currentTimeMillis() - startTick));
|
||||||
+ " responding time: " + (System.currentTimeMillis() - startTick));
|
|
||||||
}
|
}
|
||||||
} catch (HttpException e) {
|
} catch (HttpException e) {
|
||||||
s_logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
|
s_logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
s_logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
|
s_logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method"));
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e);
|
s_logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e);
|
||||||
} finally {
|
} finally {
|
||||||
method.releaseConnection();
|
method.releaseConnection();
|
||||||
@ -115,34 +109,34 @@ public class ClusterServiceServletImpl implements ClusterService {
|
|||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
private HttpClient getHttpClient() {
|
private HttpClient getHttpClient() {
|
||||||
|
|
||||||
if(s_client == null) {
|
if (s_client == null) {
|
||||||
MultiThreadedHttpConnectionManager mgr = new MultiThreadedHttpConnectionManager();
|
MultiThreadedHttpConnectionManager mgr = new MultiThreadedHttpConnectionManager();
|
||||||
mgr.getParams().setDefaultMaxConnectionsPerHost(4);
|
mgr.getParams().setDefaultMaxConnectionsPerHost(4);
|
||||||
|
|
||||||
// TODO make it configurable
|
// TODO make it configurable
|
||||||
mgr.getParams().setMaxTotalConnections(1000);
|
mgr.getParams().setMaxTotalConnections(1000);
|
||||||
|
|
||||||
s_client = new HttpClient(mgr);
|
s_client = new HttpClient(mgr);
|
||||||
HttpClientParams clientParams = new HttpClientParams();
|
HttpClientParams clientParams = new HttpClientParams();
|
||||||
clientParams.setSoTimeout(_requestTimeoutSeconds.value() * 1000);
|
clientParams.setSoTimeout(ClusterServiceAdapter.ClusterMessageTimeOut.value() * 1000);
|
||||||
|
|
||||||
s_client.setParams(clientParams);
|
s_client.setParams(clientParams);
|
||||||
}
|
}
|
||||||
return s_client;
|
return s_client;
|
||||||
}
|
}
|
||||||
|
|
||||||
// for test purpose only
|
// for test purpose only
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
/*
|
/*
|
||||||
ClusterServiceServletImpl service = new ClusterServiceServletImpl("http://localhost:9090/clusterservice", 300);
|
ClusterServiceServletImpl service = new ClusterServiceServletImpl("http://localhost:9090/clusterservice", 300);
|
||||||
try {
|
try {
|
||||||
String result = service.execute("test", 1, "{ p1:v1, p2:v2 }", true);
|
String result = service.execute("test", 1, "{ p1:v1, p2:v2 }", true);
|
||||||
System.out.println(result);
|
System.out.println(result);
|
||||||
} catch (RemoteException e) {
|
} catch (RemoteException e) {
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,18 +21,6 @@ package org.apache.cloudstack.framework.config;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public interface ConfigDepot {
|
public interface ConfigDepot {
|
||||||
/**
|
|
||||||
* Retrieves the global configuration value for key.
|
|
||||||
* @param key name of the key to retrieve.
|
|
||||||
* @return global configuration value even if the key is scoped
|
|
||||||
*/
|
|
||||||
<T> ConfigValue<T> get(ConfigKey<T> key);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves a ConfigValue by its name
|
|
||||||
* @param name name of the config
|
|
||||||
* @return ConfigValue
|
|
||||||
*/
|
|
||||||
ConfigValue<?> get(String name);
|
|
||||||
|
|
||||||
|
ConfigKey<?> get(String paramName);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,11 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.framework.config;
|
package org.apache.cloudstack.framework.config;
|
||||||
|
|
||||||
|
import java.sql.Date;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl;
|
||||||
|
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
||||||
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
|
|
||||||
@ -72,6 +77,13 @@ public class ConfigKey<T> {
|
|||||||
private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global
|
private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global
|
||||||
private final boolean _isDynamic;
|
private final boolean _isDynamic;
|
||||||
private final T _multiplier;
|
private final T _multiplier;
|
||||||
|
T _value = null;
|
||||||
|
|
||||||
|
static ConfigDepotImpl s_depot = null;
|
||||||
|
|
||||||
|
static void init(ConfigDepotImpl depot) {
|
||||||
|
s_depot = depot;
|
||||||
|
}
|
||||||
|
|
||||||
public ConfigKey(String category, Class<T> type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) {
|
public ConfigKey(String category, Class<T> type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) {
|
||||||
this(type, name, category, defaultValue, description, isDynamic, scope, null);
|
this(type, name, category, defaultValue, description, isDynamic, scope, null);
|
||||||
@ -117,4 +129,55 @@ public class ConfigKey<T> {
|
|||||||
|
|
||||||
throw new CloudRuntimeException("Comparing ConfigKey to " + obj.toString());
|
throw new CloudRuntimeException("Comparing ConfigKey to " + obj.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public T value() {
|
||||||
|
if (_value == null || isDynamic()) {
|
||||||
|
ConfigurationVO vo = s_depot != null ? s_depot.global().findById(key()) : null;
|
||||||
|
_value = valueOf(vo != null ? vo.getValue() : defaultValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
return _value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public T valueIn(long id) {
|
||||||
|
String value = s_depot != null ? s_depot.scoped(this).getConfigValue(id, this) : null;
|
||||||
|
if (value == null) {
|
||||||
|
return value();
|
||||||
|
} else {
|
||||||
|
return valueOf(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
protected T valueOf(String value) {
|
||||||
|
Number multiplier = 1;
|
||||||
|
if (multiplier() != null) {
|
||||||
|
multiplier = (Number)multiplier();
|
||||||
|
}
|
||||||
|
Class<T> type = type();
|
||||||
|
if (type.isAssignableFrom(Boolean.class)) {
|
||||||
|
return (T)Boolean.valueOf(value);
|
||||||
|
} else if (type.isAssignableFrom(Integer.class)) {
|
||||||
|
return (T)new Integer(Integer.parseInt(value) * multiplier.intValue());
|
||||||
|
} else if (type.isAssignableFrom(Long.class)) {
|
||||||
|
return (T)new Long(Long.parseLong(value) * multiplier.longValue());
|
||||||
|
} else if (type.isAssignableFrom(Short.class)) {
|
||||||
|
return (T)new Short(Short.parseShort(value));
|
||||||
|
} else if (type.isAssignableFrom(String.class)) {
|
||||||
|
return (T)value;
|
||||||
|
} else if (type.isAssignableFrom(Float.class)) {
|
||||||
|
return (T)new Float(Float.parseFloat(value) * multiplier.floatValue());
|
||||||
|
} else if (type.isAssignableFrom(Double.class)) {
|
||||||
|
return (T)new Double(Double.parseDouble(value) * multiplier.doubleValue());
|
||||||
|
} else if (type.isAssignableFrom(String.class)) {
|
||||||
|
return (T)value;
|
||||||
|
} else if (type.isAssignableFrom(Date.class)) {
|
||||||
|
return (T)Date.valueOf(value);
|
||||||
|
} else if (type.isAssignableFrom(Character.class)) {
|
||||||
|
return (T)new Character(value.charAt(0));
|
||||||
|
} else {
|
||||||
|
throw new CloudRuntimeException("Unsupported data type for config values: " + type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,100 +0,0 @@
|
|||||||
// Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing,
|
|
||||||
// software distributed under the License is distributed on an
|
|
||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
// KIND, either express or implied. See the License for the
|
|
||||||
// specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
package org.apache.cloudstack.framework.config;
|
|
||||||
|
|
||||||
import java.sql.Date;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
|
||||||
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
|
||||||
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is a match set to ConfigKey.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class ConfigValue<T> {
|
|
||||||
|
|
||||||
ConfigKey<T> _config;
|
|
||||||
ConfigurationDao _dao;
|
|
||||||
T _value;
|
|
||||||
ScopedConfigStorage _storage;
|
|
||||||
|
|
||||||
public ConfigValue(ConfigurationDao entityMgr, ConfigKey<T> config) {
|
|
||||||
_dao = entityMgr;
|
|
||||||
_config = config;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConfigValue(ConfigurationDao entityMgr, ConfigKey<T> key, ScopedConfigStorage storage) {
|
|
||||||
this(entityMgr, key);
|
|
||||||
_storage = storage;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConfigKey<T> getConfigKey() {
|
|
||||||
return _config;
|
|
||||||
}
|
|
||||||
|
|
||||||
public T value() {
|
|
||||||
if (_value == null || _config.isDynamic()) {
|
|
||||||
ConfigurationVO vo = _dao.findById(_config.key());
|
|
||||||
_value = valueOf(vo != null ? vo.getValue() : _config.defaultValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
return _value;
|
|
||||||
}
|
|
||||||
|
|
||||||
public T valueIn(long id) {
|
|
||||||
String value = _storage.getConfigValue(id, _config);
|
|
||||||
if (value == null) {
|
|
||||||
return value();
|
|
||||||
} else {
|
|
||||||
return valueOf(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
protected T valueOf(String value) {
|
|
||||||
Number multiplier = 1;
|
|
||||||
if (_config.multiplier() != null) {
|
|
||||||
multiplier = (Number)_config.multiplier();
|
|
||||||
}
|
|
||||||
Class<T> type = _config.type();
|
|
||||||
if (type.isAssignableFrom(Boolean.class)) {
|
|
||||||
return (T)Boolean.valueOf(value);
|
|
||||||
} else if (type.isAssignableFrom(Integer.class)) {
|
|
||||||
return (T)new Integer(Integer.parseInt(value) * multiplier.intValue());
|
|
||||||
} else if (type.isAssignableFrom(Long.class)) {
|
|
||||||
return (T)new Long(Long.parseLong(value) * multiplier.longValue());
|
|
||||||
} else if (type.isAssignableFrom(Short.class)) {
|
|
||||||
return (T)new Short(Short.parseShort(value));
|
|
||||||
} else if (type.isAssignableFrom(String.class)) {
|
|
||||||
return (T)value;
|
|
||||||
} else if (type.isAssignableFrom(Float.class)) {
|
|
||||||
return (T)new Float(Float.parseFloat(value) * multiplier.floatValue());
|
|
||||||
} else if (type.isAssignableFrom(Double.class)) {
|
|
||||||
return (T)new Double(Double.parseDouble(value) * multiplier.doubleValue());
|
|
||||||
} else if (type.isAssignableFrom(String.class)) {
|
|
||||||
return (T)value;
|
|
||||||
} else if (type.isAssignableFrom(Date.class)) {
|
|
||||||
return (T)Date.valueOf(value);
|
|
||||||
} else if (type.isAssignableFrom(Character.class)) {
|
|
||||||
return (T)new Character(value.charAt(0));
|
|
||||||
} else {
|
|
||||||
throw new CloudRuntimeException("Unsupported data type for config values: " + type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -16,12 +16,12 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.framework.config.impl;
|
package org.apache.cloudstack.framework.config.impl;
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
@ -29,14 +29,11 @@ import org.apache.log4j.Logger;
|
|||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepotAdmin;
|
import org.apache.cloudstack.framework.config.ConfigDepotAdmin;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
|
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.ConfigInjector;
|
|
||||||
import com.cloud.utils.component.SystemIntegrityChecker;
|
import com.cloud.utils.component.SystemIntegrityChecker;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
@ -60,14 +57,14 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
* - Figure out the correct categories.
|
* - Figure out the correct categories.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemIntegrityChecker, ConfigInjector {
|
public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemIntegrityChecker {
|
||||||
private final static Logger s_logger = Logger.getLogger(ConfigDepotImpl.class);
|
private final static Logger s_logger = Logger.getLogger(ConfigDepotImpl.class);
|
||||||
@Inject
|
@Inject
|
||||||
ConfigurationDao _configDao;
|
ConfigurationDao _configDao;
|
||||||
@Inject
|
@Inject
|
||||||
List<Configurable> _configurables;
|
List<Configurable> _configurables;
|
||||||
@Inject
|
@Inject
|
||||||
List<ScopedConfigStorage> _scopedStorage;
|
List<ScopedConfigStorage> _scopedStorages;
|
||||||
|
|
||||||
HashMap<String, Pair<String, ConfigKey<?>>> _allKeys = new HashMap<String, Pair<String, ConfigKey<?>>>(1007);
|
HashMap<String, Pair<String, ConfigKey<?>>> _allKeys = new HashMap<String, Pair<String, ConfigKey<?>>>(1007);
|
||||||
|
|
||||||
@ -75,17 +72,9 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemInt
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <T> ConfigValue<T> get(ConfigKey<T> config) {
|
public ConfigKey<?> get(String key) {
|
||||||
if (config.scope() == Scope.Global) {
|
Pair<String, ConfigKey<?>> value = _allKeys.get(key);
|
||||||
return new ConfigValue<T>(_configDao, config);
|
return value != null ? value.second() : null;
|
||||||
} else {
|
|
||||||
for (ScopedConfigStorage storage : _scopedStorage) {
|
|
||||||
if (storage.getScope() == config.scope()) {
|
|
||||||
return new ConfigValue<T>(_configDao, config, storage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
throw new CloudRuntimeException("Unable to find config storage for this scope: " + config.scope());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -121,6 +110,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemInt
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@PostConstruct
|
||||||
public void check() {
|
public void check() {
|
||||||
for (Configurable configurable : _configurables) {
|
for (Configurable configurable : _configurables) {
|
||||||
s_logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName());
|
s_logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName());
|
||||||
@ -135,24 +125,17 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemInt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public ConfigurationDao global() {
|
||||||
public void inject(Field field, Object obj, String key) {
|
return _configDao;
|
||||||
Pair<String, ConfigKey<?>> configKey = _allKeys.get(key);
|
|
||||||
try {
|
|
||||||
field.set(obj, get(configKey.second()));
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
throw new CloudRuntimeException("Unable to inject configuration due to ", e);
|
|
||||||
} catch (IllegalAccessException e) {
|
|
||||||
throw new CloudRuntimeException("Unable to inject configuration due to ", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public ScopedConfigStorage scoped(ConfigKey<?> config) {
|
||||||
public ConfigValue<?> get(String name) {
|
for (ScopedConfigStorage storage : _scopedStorages) {
|
||||||
Pair<String, ConfigKey<?>> configKey = _allKeys.get(name);
|
if (storage.getScope() == config.scope()) {
|
||||||
if (configKey == null) {
|
return storage;
|
||||||
throw new CloudRuntimeException("Unable to find a registered config key for " + name);
|
}
|
||||||
}
|
}
|
||||||
return get(configKey.second());
|
|
||||||
|
throw new CloudRuntimeException("Unable to find config storage for this scope: " + config.scope() + " for " + config.key());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -72,8 +72,8 @@ public class ConfigDepotAdminTest extends TestCase {
|
|||||||
_depotAdmin._configDao = _configDao;
|
_depotAdmin._configDao = _configDao;
|
||||||
_depotAdmin._configurables = new ArrayList<Configurable>();
|
_depotAdmin._configurables = new ArrayList<Configurable>();
|
||||||
_depotAdmin._configurables.add(_configurable);
|
_depotAdmin._configurables.add(_configurable);
|
||||||
_depotAdmin._scopedStorage = new ArrayList<ScopedConfigStorage>();
|
_depotAdmin._scopedStorages = new ArrayList<ScopedConfigStorage>();
|
||||||
_depotAdmin._scopedStorage.add(_scopedStorage);
|
_depotAdmin._scopedStorages.add(_scopedStorage);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|||||||
@ -40,7 +40,6 @@ import org.apache.log4j.Logger;
|
|||||||
import org.apache.cloudstack.api.ApiErrorCode;
|
import org.apache.cloudstack.api.ApiErrorCode;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
import org.apache.cloudstack.framework.jobs.AsyncJob;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher;
|
import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher;
|
||||||
@ -62,7 +61,6 @@ import com.cloud.cluster.ManagementServerHost;
|
|||||||
import com.cloud.utils.DateUtil;
|
import com.cloud.utils.DateUtil;
|
||||||
import com.cloud.utils.Predicate;
|
import com.cloud.utils.Predicate;
|
||||||
import com.cloud.utils.PropertiesUtil;
|
import com.cloud.utils.PropertiesUtil;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -88,32 +86,34 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
private static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class);
|
private static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class);
|
||||||
|
|
||||||
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds
|
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds
|
||||||
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC = 60; // 60 seconds
|
private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC = 60; // 60 seconds
|
||||||
|
|
||||||
private static final int MAX_ONETIME_SCHEDULE_SIZE = 50;
|
private static final int MAX_ONETIME_SCHEDULE_SIZE = 50;
|
||||||
private static final int HEARTBEAT_INTERVAL = 2000;
|
private static final int HEARTBEAT_INTERVAL = 2000;
|
||||||
private static final int GC_INTERVAL = 10000; // 10 seconds
|
private static final int GC_INTERVAL = 10000; // 10 seconds
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
private SyncQueueItemDao _queueItemDao;
|
private SyncQueueItemDao _queueItemDao;
|
||||||
@Inject private SyncQueueManager _queueMgr;
|
@Inject
|
||||||
@Inject private AsyncJobDao _jobDao;
|
private SyncQueueManager _queueMgr;
|
||||||
@Inject private AsyncJobJournalDao _journalDao;
|
@Inject
|
||||||
@Inject private AsyncJobJoinMapDao _joinMapDao;
|
private AsyncJobDao _jobDao;
|
||||||
@Inject private List<AsyncJobDispatcher> _jobDispatchers;
|
@Inject
|
||||||
@Inject private MessageBus _messageBus;
|
private AsyncJobJournalDao _journalDao;
|
||||||
@Inject private AsyncJobMonitor _jobMonitor;
|
@Inject
|
||||||
|
private AsyncJobJoinMapDao _joinMapDao;
|
||||||
|
@Inject
|
||||||
|
private List<AsyncJobDispatcher> _jobDispatchers;
|
||||||
|
@Inject
|
||||||
|
private MessageBus _messageBus;
|
||||||
|
@Inject
|
||||||
|
private AsyncJobMonitor _jobMonitor;
|
||||||
@Inject
|
@Inject
|
||||||
private ConfigDepot _configDepot;
|
private ConfigDepot _configDepot;
|
||||||
|
|
||||||
@InjectConfig(key = "job.expire.minutes")
|
|
||||||
private ConfigValue<Long> _jobExpireSeconds; // 1 day
|
|
||||||
@InjectConfig(key = "job.cancel.threshold.minutes")
|
|
||||||
private ConfigValue<Long> _jobCancelThresholdSeconds; // 1 hour (for cancelling the jobs blocking other jobs)
|
|
||||||
|
|
||||||
private volatile long _executionRunNumber = 1;
|
private volatile long _executionRunNumber = 1;
|
||||||
|
|
||||||
private final ScheduledExecutorService _heartbeatScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat"));
|
private final ScheduledExecutorService _heartbeatScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat"));
|
||||||
private ExecutorService _executor;
|
private ExecutorService _executor;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -124,32 +124,33 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
@Override
|
@Override
|
||||||
public ConfigKey<?>[] getConfigKeys() {
|
public ConfigKey<?>[] getConfigKeys() {
|
||||||
return new ConfigKey<?>[] {JobExpireMinutes, JobCancelThresholdMinutes};
|
return new ConfigKey<?>[] {JobExpireMinutes, JobCancelThresholdMinutes};
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AsyncJobVO getAsyncJob(long jobId) {
|
public AsyncJobVO getAsyncJob(long jobId) {
|
||||||
return _jobDao.findById(jobId);
|
return _jobDao.findById(jobId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<AsyncJobVO> findInstancePendingAsyncJobs(String instanceType, Long accountId) {
|
public List<AsyncJobVO> findInstancePendingAsyncJobs(String instanceType, Long accountId) {
|
||||||
return _jobDao.findInstancePendingAsyncJobs(instanceType, accountId);
|
return _jobDao.findInstancePendingAsyncJobs(instanceType, accountId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public AsyncJob getPseudoJob(long accountId, long userId) {
|
public AsyncJob getPseudoJob(long accountId, long userId) {
|
||||||
AsyncJobVO job = _jobDao.findPseudoJob(Thread.currentThread().getId(), getMsid());
|
AsyncJobVO job = _jobDao.findPseudoJob(Thread.currentThread().getId(), getMsid());
|
||||||
if(job == null) {
|
if (job == null) {
|
||||||
job = new AsyncJobVO();
|
job = new AsyncJobVO();
|
||||||
job.setAccountId(accountId);
|
job.setAccountId(accountId);
|
||||||
job.setUserId(userId);
|
job.setUserId(userId);
|
||||||
job.setInitMsid(getMsid());
|
job.setInitMsid(getMsid());
|
||||||
job.setDispatcher(AsyncJobVO.JOB_DISPATCHER_PSEUDO);
|
job.setDispatcher(AsyncJobVO.JOB_DISPATCHER_PSEUDO);
|
||||||
job.setInstanceType(AsyncJobVO.PSEUDO_JOB_INSTANCE_TYPE);
|
job.setInstanceType(AsyncJobVO.PSEUDO_JOB_INSTANCE_TYPE);
|
||||||
job.setInstanceId(Thread.currentThread().getId());
|
job.setInstanceId(Thread.currentThread().getId());
|
||||||
_jobDao.persist(job);
|
_jobDao.persist(job);
|
||||||
}
|
}
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -174,13 +175,14 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
@Override @DB
|
@Override
|
||||||
public long submitAsyncJob(AsyncJob job, String syncObjType, long syncObjId) {
|
@DB
|
||||||
|
public long submitAsyncJob(AsyncJob job, String syncObjType, long syncObjId) {
|
||||||
Transaction txt = Transaction.currentTxn();
|
Transaction txt = Transaction.currentTxn();
|
||||||
try {
|
try {
|
||||||
@SuppressWarnings("rawtypes")
|
@SuppressWarnings("rawtypes")
|
||||||
GenericDao dao = GenericDaoBase.getDao(job.getClass());
|
GenericDao dao = GenericDaoBase.getDao(job.getClass());
|
||||||
|
|
||||||
txt.start();
|
txt.start();
|
||||||
job.setInitMsid(getMsid());
|
job.setInitMsid(getMsid());
|
||||||
dao.persist(job);
|
dao.persist(job);
|
||||||
@ -188,41 +190,41 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
syncAsyncJobExecution(job, syncObjType, syncObjId, 1);
|
syncAsyncJobExecution(job, syncObjType, syncObjId, 1);
|
||||||
txt.commit();
|
txt.commit();
|
||||||
return job.getId();
|
return job.getId();
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception.";
|
String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception.";
|
||||||
s_logger.warn(errMsg, e);
|
s_logger.warn(errMsg, e);
|
||||||
throw new CloudRuntimeException(errMsg);
|
throw new CloudRuntimeException(errMsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public void completeAsyncJob(long jobId, Status jobStatus, int resultCode, String resultObject) {
|
public void completeAsyncJob(long jobId, Status jobStatus, int resultCode, String resultObject) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus +
|
s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObject);
|
||||||
", resultCode: " + resultCode + ", result: " + resultObject);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction txn = Transaction.currentTxn();
|
Transaction txn = Transaction.currentTxn();
|
||||||
try {
|
try {
|
||||||
txn.start();
|
txn.start();
|
||||||
AsyncJobVO job = _jobDao.findById(jobId);
|
AsyncJobVO job = _jobDao.findById(jobId);
|
||||||
if(job == null) {
|
if (job == null) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus +
|
s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " +
|
||||||
", resultCode: " + resultCode + ", result: " + resultObject);
|
resultObject);
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.rollback();
|
txn.rollback();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(job.getStatus() != JobInfo.Status.IN_PROGRESS) {
|
if (job.getStatus() != JobInfo.Status.IN_PROGRESS) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("job-" + jobId + " is already completed.");
|
s_logger.debug("job-" + jobId + " is already completed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.rollback();
|
txn.rollback();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
job.setCompleteMsid(getMsid());
|
job.setCompleteMsid(getMsid());
|
||||||
@ -239,39 +241,39 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
|
|
||||||
job.setLastUpdated(DateUtil.currentGMTTime());
|
job.setLastUpdated(DateUtil.currentGMTTime());
|
||||||
_jobDao.update(jobId, job);
|
_jobDao.update(jobId, job);
|
||||||
|
|
||||||
List<Long> wakeupList = wakeupByJoinedJobCompletion(jobId);
|
List<Long> wakeupList = wakeupByJoinedJobCompletion(jobId);
|
||||||
_joinMapDao.disjoinAllJobs(jobId);
|
_joinMapDao.disjoinAllJobs(jobId);
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
for(Long id : wakeupList) {
|
for (Long id : wakeupList) {
|
||||||
// TODO, we assume that all jobs in this category is API job only
|
// TODO, we assume that all jobs in this category is API job only
|
||||||
AsyncJobVO jobToWakeup = _jobDao.findById(id);
|
AsyncJobVO jobToWakeup = _jobDao.findById(id);
|
||||||
if (jobToWakeup != null && (jobToWakeup.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0)
|
if (jobToWakeup != null && (jobToWakeup.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0)
|
||||||
scheduleExecution(jobToWakeup, false);
|
scheduleExecution(jobToWakeup, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
_messageBus.publish(null, AsyncJob.Topics.JOB_STATE, PublishScope.GLOBAL, jobId);
|
_messageBus.publish(null, AsyncJob.Topics.JOB_STATE, PublishScope.GLOBAL, jobId);
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.error("Unexpected exception while completing async job-" + jobId, e);
|
s_logger.error("Unexpected exception while completing async job-" + jobId, e);
|
||||||
txn.rollback();
|
txn.rollback();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public void updateAsyncJobStatus(long jobId, int processStatus, String resultObject) {
|
public void updateAsyncJobStatus(long jobId, int processStatus, String resultObject) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus +
|
s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject);
|
||||||
", result: " + resultObject);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction txt = Transaction.currentTxn();
|
Transaction txt = Transaction.currentTxn();
|
||||||
try {
|
try {
|
||||||
txt.start();
|
txt.start();
|
||||||
AsyncJobVO job = _jobDao.findById(jobId);
|
AsyncJobVO job = _jobDao.findById(jobId);
|
||||||
if(job == null) {
|
if (job == null) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus);
|
s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,23 +282,23 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
}
|
}
|
||||||
|
|
||||||
job.setProcessStatus(processStatus);
|
job.setProcessStatus(processStatus);
|
||||||
if(resultObject != null) {
|
if (resultObject != null) {
|
||||||
job.setResult(resultObject);
|
job.setResult(resultObject);
|
||||||
}
|
}
|
||||||
job.setLastUpdated(DateUtil.currentGMTTime());
|
job.setLastUpdated(DateUtil.currentGMTTime());
|
||||||
_jobDao.update(jobId, job);
|
_jobDao.update(jobId, job);
|
||||||
txt.commit();
|
txt.commit();
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e);
|
s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e);
|
||||||
txt.rollback();
|
txt.rollback();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) {
|
public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType +
|
s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId);
|
||||||
", instanceId: " + instanceId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction txt = Transaction.currentTxn();
|
Transaction txt = Transaction.currentTxn();
|
||||||
@ -310,99 +312,101 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
_jobDao.update(jobId, job);
|
_jobDao.update(jobId, job);
|
||||||
|
|
||||||
txt.commit();
|
txt.commit();
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e);
|
s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e);
|
||||||
txt.rollback();
|
txt.rollback();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
public void logJobJournal(long jobId, AsyncJob.JournalType journalType, String
|
@DB
|
||||||
journalText, String journalObjJson) {
|
public void logJobJournal(long jobId, AsyncJob.JournalType journalType, String journalText, String journalObjJson) {
|
||||||
AsyncJobJournalVO journal = new AsyncJobJournalVO();
|
AsyncJobJournalVO journal = new AsyncJobJournalVO();
|
||||||
journal.setJobId(jobId);
|
journal.setJobId(jobId);
|
||||||
journal.setJournalType(journalType);
|
journal.setJournalType(journalType);
|
||||||
journal.setJournalText(journalText);
|
journal.setJournalText(journalText);
|
||||||
journal.setJournalObjJsonString(journalObjJson);
|
journal.setJournalObjJsonString(journalObjJson);
|
||||||
|
|
||||||
_journalDao.persist(journal);
|
_journalDao.persist(journal);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
public void joinJob(long jobId, long joinJobId) {
|
@DB
|
||||||
_joinMapDao.joinJob(jobId, joinJobId, getMsid(), 0, 0, null, null, null);
|
public void joinJob(long jobId, long joinJobId) {
|
||||||
|
_joinMapDao.joinJob(jobId, joinJobId, getMsid(), 0, 0, null, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
public void joinJob(long jobId, long joinJobId, String wakeupHandler, String wakeupDispatcher,
|
@DB
|
||||||
String[] wakeupTopcisOnMessageBus, long wakeupIntervalInMilliSeconds, long timeoutInMilliSeconds) {
|
public void joinJob(long jobId, long joinJobId, String wakeupHandler, String wakeupDispatcher, String[] wakeupTopcisOnMessageBus, long wakeupIntervalInMilliSeconds,
|
||||||
|
long timeoutInMilliSeconds) {
|
||||||
Long syncSourceId = null;
|
|
||||||
AsyncJobExecutionContext context = AsyncJobExecutionContext.getCurrentExecutionContext();
|
Long syncSourceId = null;
|
||||||
assert(context.getJob() != null);
|
AsyncJobExecutionContext context = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||||
if(context.getJob().getSyncSource() != null) {
|
assert (context.getJob() != null);
|
||||||
syncSourceId = context.getJob().getSyncSource().getQueueId();
|
if (context.getJob().getSyncSource() != null) {
|
||||||
}
|
syncSourceId = context.getJob().getSyncSource().getQueueId();
|
||||||
|
}
|
||||||
_joinMapDao.joinJob(jobId, joinJobId, getMsid(),
|
|
||||||
wakeupIntervalInMilliSeconds, timeoutInMilliSeconds,
|
_joinMapDao.joinJob(jobId, joinJobId, getMsid(), wakeupIntervalInMilliSeconds, timeoutInMilliSeconds, syncSourceId, wakeupHandler, wakeupDispatcher);
|
||||||
syncSourceId, wakeupHandler, wakeupDispatcher);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public void disjoinJob(long jobId, long joinedJobId) {
|
public void disjoinJob(long jobId, long joinedJobId) {
|
||||||
_joinMapDao.disjoinJob(jobId, joinedJobId);
|
_joinMapDao.disjoinJob(jobId, joinedJobId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override @DB
|
@Override
|
||||||
|
@DB
|
||||||
public void completeJoin(long joinJobId, JobInfo.Status joinStatus, String joinResult) {
|
public void completeJoin(long joinJobId, JobInfo.Status joinStatus, String joinResult) {
|
||||||
//
|
//
|
||||||
// TODO
|
// TODO
|
||||||
// this is a temporary solution to solve strange MySQL deadlock issue,
|
// this is a temporary solution to solve strange MySQL deadlock issue,
|
||||||
// completeJoin() causes deadlock happens at async_job table
|
// completeJoin() causes deadlock happens at async_job table
|
||||||
// I removed the temporary solution already. I think my changes should fix the deadlock.
|
// I removed the temporary solution already. I think my changes should fix the deadlock.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
------------------------
|
------------------------
|
||||||
LATEST DETECTED DEADLOCK
|
LATEST DETECTED DEADLOCK
|
||||||
------------------------
|
------------------------
|
||||||
130625 20:03:10
|
130625 20:03:10
|
||||||
*** (1) TRANSACTION:
|
*** (1) TRANSACTION:
|
||||||
TRANSACTION 0 98087127, ACTIVE 0 sec, process no 1489, OS thread id 139837829175040 fetching rows, thread declared inside InnoDB 494
|
TRANSACTION 0 98087127, ACTIVE 0 sec, process no 1489, OS thread id 139837829175040 fetching rows, thread declared inside InnoDB 494
|
||||||
mysql tables in use 2, locked 1
|
mysql tables in use 2, locked 1
|
||||||
LOCK WAIT 3 lock struct(s), heap size 368, 2 row lock(s), undo log entries 1
|
LOCK WAIT 3 lock struct(s), heap size 368, 2 row lock(s), undo log entries 1
|
||||||
MySQL thread id 28408, query id 368571321 localhost 127.0.0.1 cloud preparing
|
MySQL thread id 28408, query id 368571321 localhost 127.0.0.1 cloud preparing
|
||||||
UPDATE async_job SET job_pending_signals=1 WHERE id IN (SELECT job_id FROM async_job_join_map WHERE join_job_id = 9)
|
UPDATE async_job SET job_pending_signals=1 WHERE id IN (SELECT job_id FROM async_job_join_map WHERE join_job_id = 9)
|
||||||
*** (1) WAITING FOR THIS LOCK TO BE GRANTED:
|
*** (1) WAITING FOR THIS LOCK TO BE GRANTED:
|
||||||
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087127 lock_mode X locks rec but not gap waiting
|
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087127 lock_mode X locks rec but not gap waiting
|
||||||
Record lock, heap no 9 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
Record lock, heap no 9 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
||||||
0: len 8; hex 0000000000000008; asc ;; 1: len 6; hex 000005d8b0d8; asc ;; 2: len 7; hex 00000009270110; asc ' ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2232222c22706879736963616c6e6574776f726b6964223a; asc {"id":"2","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 66376466396532362d323139622d346338652d393231332d393766653636; asc f7df9e26-219b-4c8e-9213-97fe66;...(truncated); 21: len 30; hex 36623238306364362d663436652d343563322d383833642d333863616439; asc 6b280cd6-f46e-45c2-883d-38cad9;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
0: len 8; hex 0000000000000008; asc ;; 1: len 6; hex 000005d8b0d8; asc ;; 2: len 7; hex 00000009270110; asc ' ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2232222c22706879736963616c6e6574776f726b6964223a; asc {"id":"2","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 66376466396532362d323139622d346338652d393231332d393766653636; asc f7df9e26-219b-4c8e-9213-97fe66;...(truncated); 21: len 30; hex 36623238306364362d663436652d343563322d383833642d333863616439; asc 6b280cd6-f46e-45c2-883d-38cad9;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
||||||
|
|
||||||
*** (2) TRANSACTION:
|
*** (2) TRANSACTION:
|
||||||
TRANSACTION 0 98087128, ACTIVE 0 sec, process no 1489, OS thread id 139837671909120 fetching rows, thread declared inside InnoDB 492
|
TRANSACTION 0 98087128, ACTIVE 0 sec, process no 1489, OS thread id 139837671909120 fetching rows, thread declared inside InnoDB 492
|
||||||
mysql tables in use 2, locked 1
|
mysql tables in use 2, locked 1
|
||||||
3 lock struct(s), heap size 368, 2 row lock(s), undo log entries 1
|
3 lock struct(s), heap size 368, 2 row lock(s), undo log entries 1
|
||||||
MySQL thread id 28406, query id 368571323 localhost 127.0.0.1 cloud preparing
|
MySQL thread id 28406, query id 368571323 localhost 127.0.0.1 cloud preparing
|
||||||
UPDATE async_job SET job_pending_signals=1 WHERE id IN (SELECT job_id FROM async_job_join_map WHERE join_job_id = 8)
|
UPDATE async_job SET job_pending_signals=1 WHERE id IN (SELECT job_id FROM async_job_join_map WHERE join_job_id = 8)
|
||||||
*** (2) HOLDS THE LOCK(S):
|
*** (2) HOLDS THE LOCK(S):
|
||||||
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087128 lock_mode X locks rec but not gap
|
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087128 lock_mode X locks rec but not gap
|
||||||
Record lock, heap no 9 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
Record lock, heap no 9 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
||||||
0: len 8; hex 0000000000000008; asc ;; 1: len 6; hex 000005d8b0d8; asc ;; 2: len 7; hex 00000009270110; asc ' ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2232222c22706879736963616c6e6574776f726b6964223a; asc {"id":"2","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 66376466396532362d323139622d346338652d393231332d393766653636; asc f7df9e26-219b-4c8e-9213-97fe66;...(truncated); 21: len 30; hex 36623238306364362d663436652d343563322d383833642d333863616439; asc 6b280cd6-f46e-45c2-883d-38cad9;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
0: len 8; hex 0000000000000008; asc ;; 1: len 6; hex 000005d8b0d8; asc ;; 2: len 7; hex 00000009270110; asc ' ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2232222c22706879736963616c6e6574776f726b6964223a; asc {"id":"2","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 66376466396532362d323139622d346338652d393231332d393766653636; asc f7df9e26-219b-4c8e-9213-97fe66;...(truncated); 21: len 30; hex 36623238306364362d663436652d343563322d383833642d333863616439; asc 6b280cd6-f46e-45c2-883d-38cad9;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
||||||
|
|
||||||
*** (2) WAITING FOR THIS LOCK TO BE GRANTED:
|
*** (2) WAITING FOR THIS LOCK TO BE GRANTED:
|
||||||
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087128 lock_mode X locks rec but not gap waiting
|
RECORD LOCKS space id 0 page no 1275 n bits 80 index `PRIMARY` of table `cloud`.`async_job` trx id 0 98087128 lock_mode X locks rec but not gap waiting
|
||||||
Record lock, heap no 10 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
Record lock, heap no 10 PHYSICAL RECORD: n_fields 26; compact format; info bits 0
|
||||||
0: len 8; hex 0000000000000009; asc ;; 1: len 6; hex 000005d8b0d7; asc ;; 2: len 7; hex 00000009280110; asc ( ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2233222c22706879736963616c6e6574776f726b6964223a; asc {"id":"3","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 62313065306432342d336233352d343663622d386361622d623933623562; asc b10e0d24-3b35-46cb-8cab-b93b5b;...(truncated); 21: len 30; hex 39353664383563632d383336622d346663612d623738622d646238343739; asc 956d85cc-836b-4fca-b78b-db8479;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
0: len 8; hex 0000000000000009; asc ;; 1: len 6; hex 000005d8b0d7; asc ;; 2: len 7; hex 00000009280110; asc ( ;; 3: len 8; hex 0000000000000002; asc ;; 4: len 8; hex 0000000000000002; asc ;; 5: SQL NULL; 6: SQL NULL; 7: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e636f6d6d; asc org.apache.cloudstack.api.comm;...(truncated); 8: len 30; hex 7b226964223a2233222c22706879736963616c6e6574776f726b6964223a; asc {"id":"3","physicalnetworkid":;...(truncated); 9: len 4; hex 80000000; asc ;; 10: len 4; hex 80000001; asc ;; 11: len 4; hex 80000000; asc ;; 12: len 4; hex 80000000; asc ;; 13: len 30; hex 6f72672e6170616368652e636c6f7564737461636b2e6170692e72657370; asc org.apache.cloudstack.api.resp;...(truncated); 14: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 15: len 8; hex 80001a6f7bb0d0a8; asc o{ ;; 16: len 8; hex 8000124f06cfd5b6; asc O ;; 17: len 8; hex 8000124f06cfd5b6; asc O ;; 18: SQL NULL; 19: SQL NULL; 20: len 30; hex 62313065306432342d336233352d343663622d386361622d623933623562; asc b10e0d24-3b35-46cb-8cab-b93b5b;...(truncated); 21: len 30; hex 39353664383563632d383336622d346663612d623738622d646238343739; asc 956d85cc-836b-4fca-b78b-db8479;...(truncated); 22: SQL NULL; 23: len 21; hex 4170694173796e634a6f6244697370617463686572; asc ApiAsyncJobDispatcher;; 24: SQL NULL; 25: len 4; hex 80000000; asc ;;
|
||||||
|
|
||||||
*** WE ROLL BACK TRANSACTION (2)
|
*** WE ROLL BACK TRANSACTION (2)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_joinMapDao.completeJoin(joinJobId, joinStatus, joinResult, getMsid());
|
_joinMapDao.completeJoin(joinJobId, joinStatus, joinResult, getMsid());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId, long queueSizeLimit) {
|
public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId, long queueSizeLimit) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId);
|
s_logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -412,9 +416,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
// we retry five times until we throw an exception
|
// we retry five times until we throw an exception
|
||||||
Random random = new Random();
|
Random random = new Random();
|
||||||
|
|
||||||
for(int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
queue = _queueMgr.queue(syncObjType, syncObjId, SyncQueueItem.AsyncJobContentType, job.getId(), queueSizeLimit);
|
queue = _queueMgr.queue(syncObjType, syncObjId, SyncQueueItem.AsyncJobContentType, job.getId(), queueSizeLimit);
|
||||||
if(queue != null) {
|
if (queue != null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +435,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
@Override
|
@Override
|
||||||
public AsyncJob queryJob(long jobId, boolean updatePollTime) {
|
public AsyncJob queryJob(long jobId, boolean updatePollTime) {
|
||||||
AsyncJobVO job = _jobDao.findById(jobId);
|
AsyncJobVO job = _jobDao.findById(jobId);
|
||||||
|
|
||||||
if (updatePollTime) {
|
if (updatePollTime) {
|
||||||
job.setLastPolled(DateUtil.currentGMTTime());
|
job.setLastPolled(DateUtil.currentGMTTime());
|
||||||
_jobDao.update(jobId, job);
|
_jobDao.update(jobId, job);
|
||||||
@ -439,7 +443,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private void scheduleExecution(final AsyncJobVO job) {
|
private void scheduleExecution(final AsyncJobVO job) {
|
||||||
scheduleExecution(job, false);
|
scheduleExecution(job, false);
|
||||||
}
|
}
|
||||||
@ -452,10 +455,10 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
_executor.submit(runnable);
|
_executor.submit(runnable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private AsyncJobDispatcher getDispatcher(String dispatcherName) {
|
private AsyncJobDispatcher getDispatcher(String dispatcherName) {
|
||||||
assert (dispatcherName != null && !dispatcherName.isEmpty()) : "Who's not setting the dispatcher when submitting a job? Who am I suppose to call if you do that!";
|
assert (dispatcherName != null && !dispatcherName.isEmpty()) : "Who's not setting the dispatcher when submitting a job? Who am I suppose to call if you do that!";
|
||||||
|
|
||||||
for (AsyncJobDispatcher dispatcher : _jobDispatchers) {
|
for (AsyncJobDispatcher dispatcher : _jobDispatchers) {
|
||||||
if (dispatcherName.equals(dispatcher.getName()))
|
if (dispatcherName.equals(dispatcher.getName()))
|
||||||
return dispatcher;
|
return dispatcher;
|
||||||
@ -463,155 +466,155 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
|
|
||||||
throw new CloudRuntimeException("Unable to find dispatcher name: " + dispatcherName);
|
throw new CloudRuntimeException("Unable to find dispatcher name: " + dispatcherName);
|
||||||
}
|
}
|
||||||
|
|
||||||
private AsyncJobDispatcher getWakeupDispatcher(AsyncJob job) {
|
private AsyncJobDispatcher getWakeupDispatcher(AsyncJob job) {
|
||||||
if(_jobDispatchers != null) {
|
if (_jobDispatchers != null) {
|
||||||
List<AsyncJobJoinMapVO> joinRecords = _joinMapDao.listJoinRecords(job.getId());
|
List<AsyncJobJoinMapVO> joinRecords = _joinMapDao.listJoinRecords(job.getId());
|
||||||
if(joinRecords.size() > 0) {
|
if (joinRecords.size() > 0) {
|
||||||
AsyncJobJoinMapVO joinRecord = joinRecords.get(0);
|
AsyncJobJoinMapVO joinRecord = joinRecords.get(0);
|
||||||
for(AsyncJobDispatcher dispatcher : _jobDispatchers) {
|
for (AsyncJobDispatcher dispatcher : _jobDispatchers) {
|
||||||
if(dispatcher.getName().equals(joinRecord.getWakeupDispatcher()))
|
if (dispatcher.getName().equals(joinRecord.getWakeupDispatcher()))
|
||||||
return dispatcher;
|
return dispatcher;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s_logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore");
|
s_logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getJobRunNumber() {
|
private long getJobRunNumber() {
|
||||||
synchronized(this) {
|
synchronized (this) {
|
||||||
return _executionRunNumber++;
|
return _executionRunNumber++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Runnable getExecutorRunnable(final AsyncJob job) {
|
private Runnable getExecutorRunnable(final AsyncJob job) {
|
||||||
return new Runnable() {
|
return new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
Transaction txn = null;
|
Transaction txn = null;
|
||||||
long runNumber = getJobRunNumber();
|
long runNumber = getJobRunNumber();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
//
|
//
|
||||||
// setup execution environment
|
// setup execution environment
|
||||||
//
|
//
|
||||||
txn = Transaction.open(Transaction.CLOUD_DB);
|
txn = Transaction.open(Transaction.CLOUD_DB);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job));
|
JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job));
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
// Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean register() call
|
// Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean register() call
|
||||||
// is expected to fail under situations
|
// is expected to fail under situations
|
||||||
if(s_logger.isTraceEnabled())
|
if (s_logger.isTraceEnabled())
|
||||||
s_logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
|
s_logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
_jobMonitor.registerActiveTask(runNumber, job.getId());
|
_jobMonitor.registerActiveTask(runNumber, job.getId());
|
||||||
AsyncJobExecutionContext.setCurrentExecutionContext(new AsyncJobExecutionContext(job));
|
AsyncJobExecutionContext.setCurrentExecutionContext(new AsyncJobExecutionContext(job));
|
||||||
|
|
||||||
// execute the job
|
// execute the job
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Executing " + job);
|
s_logger.debug("Executing " + job);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((getAndResetPendingSignals(job) & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) {
|
if ((getAndResetPendingSignals(job) & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) {
|
||||||
AsyncJobDispatcher jobDispatcher = getWakeupDispatcher(job);
|
AsyncJobDispatcher jobDispatcher = getWakeupDispatcher(job);
|
||||||
if(jobDispatcher != null) {
|
if (jobDispatcher != null) {
|
||||||
jobDispatcher.runJob(job);
|
jobDispatcher.runJob(job);
|
||||||
} else {
|
} else {
|
||||||
s_logger.error("Unable to find a wakeup dispatcher from the joined job: " + job);
|
s_logger.error("Unable to find a wakeup dispatcher from the joined job: " + job);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
AsyncJobDispatcher jobDispatcher = getDispatcher(job.getDispatcher());
|
AsyncJobDispatcher jobDispatcher = getDispatcher(job.getDispatcher());
|
||||||
if(jobDispatcher != null) {
|
if (jobDispatcher != null) {
|
||||||
jobDispatcher.runJob(job);
|
jobDispatcher.runJob(job);
|
||||||
} else {
|
} else {
|
||||||
s_logger.error("Unable to find job dispatcher, job will be cancelled");
|
s_logger.error("Unable to find job dispatcher, job will be cancelled");
|
||||||
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
|
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId());
|
s_logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception", e);
|
s_logger.error("Unexpected exception", e);
|
||||||
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
|
completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null);
|
||||||
} finally {
|
} finally {
|
||||||
// guard final clause as well
|
// guard final clause as well
|
||||||
try {
|
try {
|
||||||
AsyncJobVO jobToUpdate = _jobDao.findById(job.getId());
|
AsyncJobVO jobToUpdate = _jobDao.findById(job.getId());
|
||||||
jobToUpdate.setExecutingMsid(null);
|
jobToUpdate.setExecutingMsid(null);
|
||||||
_jobDao.update(job.getId(), jobToUpdate);
|
_jobDao.update(job.getId(), jobToUpdate);
|
||||||
|
|
||||||
if (job.getSyncSource() != null) {
|
if (job.getSyncSource() != null) {
|
||||||
_queueMgr.purgeItem(job.getSyncSource().getId());
|
_queueMgr.purgeItem(job.getSyncSource().getId());
|
||||||
checkQueue(job.getSyncSource().getQueueId());
|
checkQueue(job.getSyncSource().getQueueId());
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
JmxUtil.unregisterMBean("AsyncJobManager", "Active Job " + job.getId());
|
JmxUtil.unregisterMBean("AsyncJobManager", "Active Job " + job.getId());
|
||||||
} catch(Exception e) {
|
} catch (Exception e) {
|
||||||
// Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean unregister() call
|
// Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean unregister() call
|
||||||
// is expected to fail under situations
|
// is expected to fail under situations
|
||||||
if(s_logger.isTraceEnabled())
|
if (s_logger.isTraceEnabled())
|
||||||
s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
|
s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(txn != null)
|
if (txn != null)
|
||||||
txn.close();
|
txn.close();
|
||||||
|
|
||||||
//
|
//
|
||||||
// clean execution environment
|
// clean execution environment
|
||||||
//
|
//
|
||||||
AsyncJobExecutionContext.unregister();
|
AsyncJobExecutionContext.unregister();
|
||||||
_jobMonitor.unregisterActiveTask(runNumber);
|
_jobMonitor.unregisterActiveTask(runNumber);
|
||||||
|
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Double exception", e);
|
s_logger.error("Double exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
private int getAndResetPendingSignals(AsyncJob job) {
|
private int getAndResetPendingSignals(AsyncJob job) {
|
||||||
int signals = job.getPendingSignals();
|
int signals = job.getPendingSignals();
|
||||||
if(signals != 0) {
|
if (signals != 0) {
|
||||||
AsyncJobVO jobRecord = _jobDao.findById(job.getId());
|
AsyncJobVO jobRecord = _jobDao.findById(job.getId());
|
||||||
jobRecord.setPendingSignals(0);
|
jobRecord.setPendingSignals(0);
|
||||||
_jobDao.update(job.getId(), jobRecord);
|
_jobDao.update(job.getId(), jobRecord);
|
||||||
}
|
}
|
||||||
return signals;
|
return signals;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) {
|
private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) {
|
||||||
AsyncJobVO job = _jobDao.findById(item.getContentId());
|
AsyncJobVO job = _jobDao.findById(item.getContentId());
|
||||||
if (job != null) {
|
if (job != null) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Schedule queued job-" + job.getId());
|
s_logger.debug("Schedule queued job-" + job.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
job.setSyncSource(item);
|
job.setSyncSource(item);
|
||||||
|
|
||||||
job.setExecutingMsid(getMsid());
|
job.setExecutingMsid(getMsid());
|
||||||
_jobDao.update(job.getId(), job);
|
_jobDao.update(job.getId(), job);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
scheduleExecution(job);
|
scheduleExecution(job);
|
||||||
} catch(RejectedExecutionException e) {
|
} catch (RejectedExecutionException e) {
|
||||||
s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn");
|
s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn");
|
||||||
_queueMgr.returnItem(item.getId());
|
_queueMgr.returnItem(item.getId());
|
||||||
|
|
||||||
job.setExecutingMsid(null);
|
job.setExecutingMsid(null);
|
||||||
_jobDao.update(job.getId(), job);
|
_jobDao.update(job.getId(), job);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Unable to find related job for queue item: " + item.toString());
|
s_logger.debug("Unable to find related job for queue item: " + item.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -619,58 +622,56 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void releaseSyncSource() {
|
public void releaseSyncSource() {
|
||||||
AsyncJobExecutionContext executionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
AsyncJobExecutionContext executionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||||
assert(executionContext != null);
|
assert (executionContext != null);
|
||||||
|
|
||||||
if(executionContext.getSyncSource() != null) {
|
if (executionContext.getSyncSource() != null) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: "
|
s_logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: " + executionContext.getSyncSource().getContentType() + "-" +
|
||||||
+ executionContext.getSyncSource().getContentType() + "-"
|
executionContext.getSyncSource().getContentId());
|
||||||
+ executionContext.getSyncSource().getContentId());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_queueMgr.purgeItem(executionContext.getSyncSource().getId());
|
_queueMgr.purgeItem(executionContext.getSyncSource().getId());
|
||||||
checkQueue(executionContext.getSyncSource().getQueueId());
|
checkQueue(executionContext.getSyncSource().getQueueId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitAndCheck(AsyncJob job, String[] wakeupTopicsOnMessageBus, long checkIntervalInMilliSeconds,
|
public boolean waitAndCheck(AsyncJob job, String[] wakeupTopicsOnMessageBus, long checkIntervalInMilliSeconds, long timeoutInMiliseconds, Predicate predicate) {
|
||||||
long timeoutInMiliseconds, Predicate predicate) {
|
|
||||||
|
MessageDetector msgDetector = new MessageDetector();
|
||||||
MessageDetector msgDetector = new MessageDetector();
|
|
||||||
String[] topics = Arrays.copyOf(wakeupTopicsOnMessageBus, wakeupTopicsOnMessageBus.length + 1);
|
String[] topics = Arrays.copyOf(wakeupTopicsOnMessageBus, wakeupTopicsOnMessageBus.length + 1);
|
||||||
topics[topics.length - 1] = AsyncJob.Topics.JOB_STATE;
|
topics[topics.length - 1] = AsyncJob.Topics.JOB_STATE;
|
||||||
|
|
||||||
msgDetector.open(_messageBus, topics);
|
msgDetector.open(_messageBus, topics);
|
||||||
try {
|
try {
|
||||||
long startTick = System.currentTimeMillis();
|
long startTick = System.currentTimeMillis();
|
||||||
while(System.currentTimeMillis() - startTick < timeoutInMiliseconds) {
|
while (System.currentTimeMillis() - startTick < timeoutInMiliseconds) {
|
||||||
msgDetector.waitAny(checkIntervalInMilliSeconds);
|
msgDetector.waitAny(checkIntervalInMilliSeconds);
|
||||||
job = _jobDao.findById(job.getId());
|
job = _jobDao.findById(job.getId());
|
||||||
if (job.getStatus().done()) {
|
if (job.getStatus().done()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (predicate.checkCondition()) {
|
if (predicate.checkCondition()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
msgDetector.close();
|
msgDetector.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkQueue(long queueId) {
|
private void checkQueue(long queueId) {
|
||||||
while(true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid());
|
SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid());
|
||||||
if(item != null) {
|
if (item != null) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Executing sync queue item: " + item.toString());
|
s_logger.debug("Executing sync queue item: " + item.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -678,7 +679,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e);
|
s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -689,33 +690,33 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
return new Runnable() {
|
return new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
Transaction txn = Transaction.open("AsyncJobManagerImpl.getHeartbeatTask");
|
Transaction txn = Transaction.open("AsyncJobManagerImpl.getHeartbeatTask");
|
||||||
try {
|
try {
|
||||||
List<SyncQueueItemVO> l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE);
|
List<SyncQueueItemVO> l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE);
|
||||||
if(l != null && l.size() > 0) {
|
if (l != null && l.size() > 0) {
|
||||||
for(SyncQueueItemVO item: l) {
|
for (SyncQueueItemVO item : l) {
|
||||||
if(s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Execute sync-queue item: " + item.toString());
|
s_logger.debug("Execute sync-queue item: " + item.toString());
|
||||||
}
|
}
|
||||||
executeQueueItem(item, false);
|
executeQueueItem(item, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
List<Long> standaloneWakeupJobs = wakeupScan();
|
List<Long> standaloneWakeupJobs = wakeupScan();
|
||||||
for(Long jobId : standaloneWakeupJobs) {
|
for (Long jobId : standaloneWakeupJobs) {
|
||||||
// TODO, we assume that all jobs in this category is API job only
|
// TODO, we assume that all jobs in this category is API job only
|
||||||
AsyncJobVO job = _jobDao.findById(jobId);
|
AsyncJobVO job = _jobDao.findById(jobId);
|
||||||
if (job != null && (job.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0)
|
if (job != null && (job.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0)
|
||||||
scheduleExecution(job, false);
|
scheduleExecution(job, false);
|
||||||
}
|
}
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
|
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
|
||||||
} finally {
|
} finally {
|
||||||
try {
|
try {
|
||||||
txn.close();
|
txn.close();
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception", e);
|
s_logger.error("Unexpected exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -728,7 +729,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
public void run() {
|
public void run() {
|
||||||
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC");
|
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC");
|
||||||
try {
|
try {
|
||||||
if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
|
if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
|
||||||
try {
|
try {
|
||||||
reallyRun();
|
reallyRun();
|
||||||
} finally {
|
} finally {
|
||||||
@ -744,30 +745,29 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
try {
|
try {
|
||||||
s_logger.trace("Begin cleanup expired async-jobs");
|
s_logger.trace("Begin cleanup expired async-jobs");
|
||||||
|
|
||||||
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _jobExpireSeconds.value() * 1000);
|
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - JobExpireMinutes.value() * 1000);
|
||||||
|
|
||||||
// limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute
|
// limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute
|
||||||
// hopefully this will be fast enough to balance potential growth of job table
|
// hopefully this will be fast enough to balance potential growth of job table
|
||||||
//1) Expire unfinished jobs that weren't processed yet
|
//1) Expire unfinished jobs that weren't processed yet
|
||||||
List<AsyncJobVO> l = _jobDao.getExpiredUnfinishedJobs(cutTime, 100);
|
List<AsyncJobVO> l = _jobDao.getExpiredUnfinishedJobs(cutTime, 100);
|
||||||
for(AsyncJobVO job : l) {
|
for (AsyncJobVO job : l) {
|
||||||
s_logger.trace("Expunging unfinished job " + job);
|
s_logger.trace("Expunging unfinished job " + job);
|
||||||
expungeAsyncJob(job);
|
expungeAsyncJob(job);
|
||||||
}
|
}
|
||||||
|
|
||||||
//2) Expunge finished jobs
|
//2) Expunge finished jobs
|
||||||
List<AsyncJobVO> completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100);
|
List<AsyncJobVO> completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100);
|
||||||
for(AsyncJobVO job : completedJobs) {
|
for (AsyncJobVO job : completedJobs) {
|
||||||
s_logger.trace("Expunging completed job " + job);
|
s_logger.trace("Expunging completed job " + job);
|
||||||
expungeAsyncJob(job);
|
expungeAsyncJob(job);
|
||||||
}
|
}
|
||||||
|
|
||||||
// forcefully cancel blocking queue items if they've been staying there for too long
|
// forcefully cancel blocking queue items if they've been staying there for too long
|
||||||
List<SyncQueueItemVO> blockItems = _queueMgr.getBlockedQueueItems(_jobCancelThresholdSeconds.value()
|
List<SyncQueueItemVO> blockItems = _queueMgr.getBlockedQueueItems(JobCancelThresholdMinutes.value() * 1000, false);
|
||||||
* 1000, false);
|
if (blockItems != null && blockItems.size() > 0) {
|
||||||
if(blockItems != null && blockItems.size() > 0) {
|
for (SyncQueueItemVO item : blockItems) {
|
||||||
for(SyncQueueItemVO item : blockItems) {
|
if (item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) {
|
||||||
if(item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) {
|
|
||||||
completeAsyncJob(item.getContentId(), JobInfo.Status.FAILED, 0, "Job is cancelled as it has been blocking others for too long");
|
completeAsyncJob(item.getContentId(), JobInfo.Status.FAILED, 0, "Job is cancelled as it has been blocking others for too long");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,7 +777,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
}
|
}
|
||||||
|
|
||||||
s_logger.trace("End cleanup expired async-jobs");
|
s_logger.trace("End cleanup expired async-jobs");
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
|
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -881,9 +881,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
_jobExpireSeconds = _configDepot.get(JobExpireMinutes);
|
|
||||||
_jobCancelThresholdSeconds = _configDepot.get(JobCancelThresholdMinutes);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
||||||
final Properties dbProps = new Properties();
|
final Properties dbProps = new Properties();
|
||||||
@ -944,7 +941,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
cleanupPendingJobs(items);
|
cleanupPendingJobs(items);
|
||||||
_jobDao.resetJobProcess(msHost.getId(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart");
|
_jobDao.resetJobProcess(msHost.getId(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart");
|
||||||
txn.commit();
|
txn.commit();
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.warn("Unexpected exception ", e);
|
s_logger.warn("Unexpected exception ", e);
|
||||||
} finally {
|
} finally {
|
||||||
txn.close();
|
txn.close();
|
||||||
@ -959,12 +956,12 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||||||
@Override
|
@Override
|
||||||
public boolean start() {
|
public boolean start() {
|
||||||
try {
|
try {
|
||||||
_jobDao.cleanupPseduoJobs(getMsid());
|
_jobDao.cleanupPseduoJobs(getMsid());
|
||||||
|
|
||||||
List<SyncQueueItemVO> l = _queueMgr.getActiveQueueItems(getMsid(), false);
|
List<SyncQueueItemVO> l = _queueMgr.getActiveQueueItems(getMsid(), false);
|
||||||
cleanupPendingJobs(l);
|
cleanupPendingJobs(l);
|
||||||
_jobDao.resetJobProcess(getMsid(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart");
|
_jobDao.resetJobProcess(getMsid(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart");
|
||||||
} catch(Throwable e) {
|
} catch (Throwable e) {
|
||||||
s_logger.error("Unexpected exception " + e.getMessage(), e);
|
s_logger.error("Unexpected exception " + e.getMessage(), e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -160,7 +160,7 @@ public class BigSwitchVnsGuestNetworkGuru extends GuestNetworkGuru {
|
|||||||
implemented.setCidr(network.getCidr());
|
implemented.setCidr(network.getCidr());
|
||||||
}
|
}
|
||||||
|
|
||||||
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), context.getReservationId(), _useSystemGuestVlans.valueIn(network.getAccountId()));
|
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), context.getReservationId(), UseSystemGuestVlans.valueIn(network.getAccountId()));
|
||||||
if (vnet == null) {
|
if (vnet == null) {
|
||||||
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a " +
|
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a " +
|
||||||
"part of network " + network + " implement ", DataCenter.class, dcId);
|
"part of network " + network + " implement ", DataCenter.class, dcId);
|
||||||
|
|||||||
@ -30,7 +30,6 @@ import org.apache.log4j.Logger;
|
|||||||
|
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO;
|
import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO;
|
||||||
import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao;
|
import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao;
|
||||||
@ -344,13 +343,6 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||||||
public void prepareStop(VirtualMachineProfile profile) {
|
public void prepareStop(VirtualMachineProfile profile) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static ConfigValue<Integer> _networkLockTimeout;
|
|
||||||
static ConfigValue<String> _routerTemplateXen;
|
|
||||||
static ConfigValue<String> _routerTemplateKvm;
|
|
||||||
static ConfigValue<String> _routerTemplateVmware;
|
|
||||||
static ConfigValue<String> _routerTemplateHyperV;
|
|
||||||
static ConfigValue<String> _routerTemplateLxc;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||||
final Map<String, String> configs = _configDao.getConfiguration("AgentManager", params);
|
final Map<String, String> configs = _configDao.getConfiguration("AgentManager", params);
|
||||||
@ -359,13 +351,6 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||||||
_instance = "DEFAULT";
|
_instance = "DEFAULT";
|
||||||
}
|
}
|
||||||
|
|
||||||
_networkLockTimeout = _configDepot.get(NetworkOrchestrationService.NetworkLockTimeout);
|
|
||||||
_routerTemplateXen = _configDepot.get(VirtualNetworkApplianceManager.RouterTemplateXen);
|
|
||||||
_routerTemplateKvm = _configDepot.get(VirtualNetworkApplianceManager.RouterTemplateKvm);
|
|
||||||
_routerTemplateVmware = _configDepot.get(VirtualNetworkApplianceManager.RouterTemplateVmware);
|
|
||||||
_routerTemplateHyperV = _configDepot.get(VirtualNetworkApplianceManager.RouterTemplateHyperV);
|
|
||||||
_routerTemplateLxc = _configDepot.get(VirtualNetworkApplianceManager.RouterTemplateLxc);
|
|
||||||
|
|
||||||
_mgmtHost = configs.get("host");
|
_mgmtHost = configs.get("host");
|
||||||
_mgmtCidr = _configDao.getValue(Config.ManagementNetwork.key());
|
_mgmtCidr = _configDao.getValue(Config.ManagementNetwork.key());
|
||||||
|
|
||||||
@ -602,7 +587,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||||||
InsufficientCapacityException, ResourceUnavailableException {
|
InsufficientCapacityException, ResourceUnavailableException {
|
||||||
|
|
||||||
List<DomainRouterVO> internalLbVms = new ArrayList<DomainRouterVO>();
|
List<DomainRouterVO> internalLbVms = new ArrayList<DomainRouterVO>();
|
||||||
Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), _networkLockTimeout.value());
|
Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value());
|
||||||
if (lock == null) {
|
if (lock == null) {
|
||||||
throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId());
|
throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId());
|
||||||
}
|
}
|
||||||
@ -755,19 +740,19 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||||||
} String templateName = null;
|
} String templateName = null;
|
||||||
switch (hType) {
|
switch (hType) {
|
||||||
case XenServer:
|
case XenServer:
|
||||||
templateName = _routerTemplateXen.valueIn(dest.getDataCenter().getId());
|
templateName = VirtualNetworkApplianceManager.RouterTemplateXen.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case KVM:
|
case KVM:
|
||||||
templateName = _routerTemplateKvm.valueIn(dest.getDataCenter().getId());
|
templateName = VirtualNetworkApplianceManager.RouterTemplateKvm.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case VMware:
|
case VMware:
|
||||||
templateName = _routerTemplateVmware.valueIn(dest.getDataCenter().getId());
|
templateName = VirtualNetworkApplianceManager.RouterTemplateVmware.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case Hyperv:
|
case Hyperv:
|
||||||
templateName = _routerTemplateHyperV.valueIn(dest.getDataCenter().getId());
|
templateName = VirtualNetworkApplianceManager.RouterTemplateHyperV.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case LXC:
|
case LXC:
|
||||||
templateName = _routerTemplateLxc.valueIn(dest.getDataCenter().getId());
|
templateName = VirtualNetworkApplianceManager.RouterTemplateLxc.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -93,7 +93,7 @@ public class OvsGuestNetworkGuru extends GuestNetworkGuru {
|
|||||||
protected void allocateVnet(Network network, NetworkVO implemented, long dcId,
|
protected void allocateVnet(Network network, NetworkVO implemented, long dcId,
|
||||||
long physicalNetworkId, String reservationId) throws InsufficientVirtualNetworkCapcityException {
|
long physicalNetworkId, String reservationId) throws InsufficientVirtualNetworkCapcityException {
|
||||||
if (network.getBroadcastUri() == null) {
|
if (network.getBroadcastUri() == null) {
|
||||||
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), reservationId, _useSystemGuestVlans.valueIn(network.getAccountId()));
|
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), reservationId, UseSystemGuestVlans.valueIn(network.getAccountId()));
|
||||||
if (vnet == null) {
|
if (vnet == null) {
|
||||||
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a part of network " + network + " implement ", DataCenter.class, dcId);
|
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a part of network " + network + " implement ", DataCenter.class, dcId);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -46,7 +46,6 @@ import com.sun.mail.smtp.SMTPTransport;
|
|||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
@ -112,10 +111,6 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||||||
|
|
||||||
private Timer _timer = null;
|
private Timer _timer = null;
|
||||||
private long _capacityCheckPeriod = 60L * 60L * 1000L; // one hour by default
|
private long _capacityCheckPeriod = 60L * 60L * 1000L; // one hour by default
|
||||||
private ConfigValue<Double> _memoryCapacityThreshold;
|
|
||||||
private ConfigValue<Double> _cpuCapacityThreshold;
|
|
||||||
private ConfigValue<Double> _storageCapacityThreshold;
|
|
||||||
private ConfigValue<Double> _storageAllocCapacityThreshold;
|
|
||||||
private double _publicIPCapacityThreshold = 0.75;
|
private double _publicIPCapacityThreshold = 0.75;
|
||||||
private double _privateIPCapacityThreshold = 0.75;
|
private double _privateIPCapacityThreshold = 0.75;
|
||||||
private double _secondaryStorageCapacityThreshold = 0.75;
|
private double _secondaryStorageCapacityThreshold = 0.75;
|
||||||
@ -157,11 +152,6 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||||||
String directNetworkPublicIpCapacityThreshold = _configDao.getValue(Config.DirectNetworkPublicIpCapacityThreshold.key());
|
String directNetworkPublicIpCapacityThreshold = _configDao.getValue(Config.DirectNetworkPublicIpCapacityThreshold.key());
|
||||||
String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key());
|
String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key());
|
||||||
|
|
||||||
_storageCapacityThreshold = _configDepot.get(StorageCapacityThreshold);
|
|
||||||
_cpuCapacityThreshold = _configDepot.get(CPUCapacityThreshold);
|
|
||||||
_memoryCapacityThreshold = _configDepot.get(MemoryCapacityThreshold);
|
|
||||||
_storageAllocCapacityThreshold = _configDepot.get(StorageAllocatedCapacityThreshold);
|
|
||||||
|
|
||||||
if (publicIPCapacityThreshold != null) {
|
if (publicIPCapacityThreshold != null) {
|
||||||
_publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold);
|
_publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold);
|
||||||
}
|
}
|
||||||
@ -543,16 +533,16 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||||||
switch (capacityType) {
|
switch (capacityType) {
|
||||||
case Capacity.CAPACITY_TYPE_STORAGE:
|
case Capacity.CAPACITY_TYPE_STORAGE:
|
||||||
capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId()));
|
capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId()));
|
||||||
threshold = _storageCapacityThreshold.valueIn(cluster.getId());
|
threshold = StorageCapacityThreshold.valueIn(cluster.getId());
|
||||||
break;
|
break;
|
||||||
case Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED:
|
case Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED:
|
||||||
threshold = _storageAllocCapacityThreshold.valueIn(cluster.getId());
|
threshold = StorageAllocatedCapacityThreshold.valueIn(cluster.getId());
|
||||||
break;
|
break;
|
||||||
case Capacity.CAPACITY_TYPE_CPU:
|
case Capacity.CAPACITY_TYPE_CPU:
|
||||||
threshold = _cpuCapacityThreshold.valueIn(cluster.getId());
|
threshold = CPUCapacityThreshold.valueIn(cluster.getId());
|
||||||
break;
|
break;
|
||||||
case Capacity.CAPACITY_TYPE_MEMORY:
|
case Capacity.CAPACITY_TYPE_MEMORY:
|
||||||
threshold = _memoryCapacityThreshold.valueIn(cluster.getId());
|
threshold = MemoryCapacityThreshold.valueIn(cluster.getId());
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
threshold = _capacityTypeThresholdMap.get(capacityType);
|
threshold = _capacityTypeThresholdMap.get(capacityType);
|
||||||
|
|||||||
@ -29,8 +29,8 @@ import javax.naming.ConfigurationException;
|
|||||||
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
||||||
@ -76,7 +76,6 @@ import com.cloud.storage.dao.VolumeDao;
|
|||||||
import com.cloud.utils.DateUtil;
|
import com.cloud.utils.DateUtil;
|
||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -130,14 +129,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
|
|||||||
protected UserVmDetailsDao _userVmDetailsDao;
|
protected UserVmDetailsDao _userVmDetailsDao;
|
||||||
@Inject
|
@Inject
|
||||||
ClusterDao _clusterDao;
|
ClusterDao _clusterDao;
|
||||||
|
@Inject
|
||||||
|
ConfigDepot _configDepot;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
ClusterDetailsDao _clusterDetailsDao;
|
ClusterDetailsDao _clusterDetailsDao;
|
||||||
private int _vmCapacityReleaseInterval;
|
private int _vmCapacityReleaseInterval;
|
||||||
private ScheduledExecutorService _executor;
|
private ScheduledExecutorService _executor;
|
||||||
long _extraBytesPerVolume = 0;
|
long _extraBytesPerVolume = 0;
|
||||||
@InjectConfig(key = StorageOverprovisioningFactorCK)
|
|
||||||
private ConfigValue<Double> _storageOverProvisioningFactor;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
MessageBus _messageBus;
|
MessageBus _messageBus;
|
||||||
|
|||||||
@ -40,6 +40,8 @@ import javax.naming.NamingException;
|
|||||||
import javax.naming.directory.DirContext;
|
import javax.naming.directory.DirContext;
|
||||||
import javax.naming.directory.InitialDirContext;
|
import javax.naming.directory.InitialDirContext;
|
||||||
|
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
import org.apache.cloudstack.acl.SecurityChecker;
|
import org.apache.cloudstack.acl.SecurityChecker;
|
||||||
import org.apache.cloudstack.affinity.AffinityGroup;
|
import org.apache.cloudstack.affinity.AffinityGroup;
|
||||||
import org.apache.cloudstack.affinity.AffinityGroupService;
|
import org.apache.cloudstack.affinity.AffinityGroupService;
|
||||||
@ -72,7 +74,6 @@ import org.apache.cloudstack.context.CallContext;
|
|||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
||||||
import org.apache.cloudstack.region.PortableIp;
|
import org.apache.cloudstack.region.PortableIp;
|
||||||
@ -88,7 +89,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
|||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
import com.cloud.alert.AlertManager;
|
import com.cloud.alert.AlertManager;
|
||||||
import com.cloud.api.ApiDBUtils;
|
import com.cloud.api.ApiDBUtils;
|
||||||
@ -190,7 +190,6 @@ import com.cloud.user.dao.AccountDao;
|
|||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -4628,9 +4627,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = NetworkOrchestrationService.NetworkThrottlingRateCK)
|
|
||||||
ConfigValue<Integer> _networkThrottlingRate;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Integer getNetworkOfferingNetworkRate(long networkOfferingId, Long dataCenterId) {
|
public Integer getNetworkOfferingNetworkRate(long networkOfferingId, Long dataCenterId) {
|
||||||
|
|
||||||
@ -4644,7 +4640,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||||||
if (no.getRateMbps() != null) {
|
if (no.getRateMbps() != null) {
|
||||||
networkRate = no.getRateMbps();
|
networkRate = no.getRateMbps();
|
||||||
} else {
|
} else {
|
||||||
networkRate = _networkThrottlingRate.valueIn(dataCenterId);
|
networkRate = NetworkOrchestrationService.NetworkThrottlingRate.valueIn(dataCenterId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// networkRate is unsigned int in netowrkOfferings table, and can't be
|
// networkRate is unsigned int in netowrkOfferings table, and can't be
|
||||||
@ -4767,7 +4763,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||||||
// for domain router service offering, get network rate from
|
// for domain router service offering, get network rate from
|
||||||
if (offering.getSystemVmType() != null
|
if (offering.getSystemVmType() != null
|
||||||
&& offering.getSystemVmType().equalsIgnoreCase(VirtualMachine.Type.DomainRouter.toString())) {
|
&& offering.getSystemVmType().equalsIgnoreCase(VirtualMachine.Type.DomainRouter.toString())) {
|
||||||
networkRate = _networkThrottlingRate.valueIn(dataCenterId);
|
networkRate = NetworkOrchestrationService.NetworkThrottlingRate.valueIn(dataCenterId);
|
||||||
} else {
|
} else {
|
||||||
networkRate = Integer.parseInt(_configDao.getValue(Config.VmNetworkThrottlingRate.key()));
|
networkRate = Integer.parseInt(_configDao.getValue(Config.VmNetworkThrottlingRate.key()));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -995,7 +995,7 @@ VirtualMachineGuru, SystemVmLoadScanHandler<Long>, ResourceStateAdapter {
|
|||||||
|
|
||||||
private synchronized Map<Long, ZoneHostInfo> getZoneHostInfo() {
|
private synchronized Map<Long, ZoneHostInfo> getZoneHostInfo() {
|
||||||
Date cutTime = DateUtil.currentGMTTime();
|
Date cutTime = DateUtil.currentGMTTime();
|
||||||
List<RunningHostCountInfo> l = _hostDao.getRunningHostCounts(new Date(cutTime.getTime() - _clusterMgr.getHeartbeatThreshold()));
|
List<RunningHostCountInfo> l = _hostDao.getRunningHostCounts(new Date(cutTime.getTime() - ClusterManager.HeartbeatThreshold.value()));
|
||||||
|
|
||||||
RunningHostInfoAgregator aggregator = new RunningHostInfoAgregator();
|
RunningHostInfoAgregator aggregator = new RunningHostInfoAgregator();
|
||||||
if (l.size() > 0) {
|
if (l.size() > 0) {
|
||||||
|
|||||||
@ -21,8 +21,6 @@ import java.util.Map;
|
|||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
|
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
import com.cloud.agent.api.to.DiskTO;
|
import com.cloud.agent.api.to.DiskTO;
|
||||||
import com.cloud.agent.api.to.NicTO;
|
import com.cloud.agent.api.to.NicTO;
|
||||||
@ -32,7 +30,6 @@ import com.cloud.server.ConfigurationServer;
|
|||||||
import com.cloud.storage.dao.VMTemplateDetailsDao;
|
import com.cloud.storage.dao.VMTemplateDetailsDao;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.vm.NicProfile;
|
import com.cloud.vm.NicProfile;
|
||||||
import com.cloud.vm.NicVO;
|
import com.cloud.vm.NicVO;
|
||||||
import com.cloud.vm.UserVmManager;
|
import com.cloud.vm.UserVmManager;
|
||||||
@ -51,9 +48,6 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
|
|||||||
@Inject NicSecondaryIpDao _nicSecIpDao;
|
@Inject NicSecondaryIpDao _nicSecIpDao;
|
||||||
@Inject ConfigurationServer _configServer;
|
@Inject ConfigurationServer _configServer;
|
||||||
|
|
||||||
@InjectConfig(key = UserVmManager.EnableDynamicallyScaleVmCK)
|
|
||||||
ConfigValue<Boolean> _enableDynamicallyScaleVm;
|
|
||||||
|
|
||||||
protected HypervisorGuruBase() {
|
protected HypervisorGuruBase() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
@ -130,7 +124,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
|
|||||||
// Workaround to make sure the TO has the UUID we need for Niciri integration
|
// Workaround to make sure the TO has the UUID we need for Niciri integration
|
||||||
VMInstanceVO vmInstance = _virtualMachineDao.findById(to.getId());
|
VMInstanceVO vmInstance = _virtualMachineDao.findById(to.getId());
|
||||||
// check if XStools/VMWare tools are present in the VM and dynamic scaling feature is enabled (per zone/global)
|
// check if XStools/VMWare tools are present in the VM and dynamic scaling feature is enabled (per zone/global)
|
||||||
Boolean isDynamicallyScalable = vmInstance.isDynamicallyScalable() && _enableDynamicallyScaleVm.valueIn(vm.getDataCenterId());
|
Boolean isDynamicallyScalable = vmInstance.isDynamicallyScalable() && UserVmManager.EnableDynamicallyScaleVm.valueIn(vm.getDataCenterId());
|
||||||
to.setEnableDynamicallyScaleVm(isDynamicallyScalable);
|
to.setEnableDynamicallyScaleVm(isDynamicallyScalable);
|
||||||
to.setUuid(vmInstance.getUuid());
|
to.setUuid(vmInstance.getUuid());
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,6 @@ import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
|||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.region.PortableIp;
|
import org.apache.cloudstack.region.PortableIp;
|
||||||
@ -134,7 +133,6 @@ import com.cloud.user.dao.AccountDao;
|
|||||||
import com.cloud.user.dao.UserDao;
|
import com.cloud.user.dao.UserDao;
|
||||||
import com.cloud.utils.Journal;
|
import com.cloud.utils.Journal;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
import com.cloud.utils.db.EntityManager;
|
import com.cloud.utils.db.EntityManager;
|
||||||
@ -163,9 +161,6 @@ import com.cloud.vm.dao.VMInstanceDao;
|
|||||||
public class IpAddressManagerImpl extends ManagerBase implements IpAddressManager, Configurable {
|
public class IpAddressManagerImpl extends ManagerBase implements IpAddressManager, Configurable {
|
||||||
private static final Logger s_logger = Logger.getLogger(IpAddressManagerImpl.class);
|
private static final Logger s_logger = Logger.getLogger(IpAddressManagerImpl.class);
|
||||||
|
|
||||||
@InjectConfig(key = UseSystemPublicIpsCK)
|
|
||||||
ConfigValue<Boolean> _useSystemPublicIps;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
NetworkOrchestrationService _networkMgr = null;
|
NetworkOrchestrationService _networkMgr = null;
|
||||||
@Inject
|
@Inject
|
||||||
@ -727,7 +722,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||||||
// If all the dedicated IPs of the owner are in use fetch an IP from the system pool
|
// If all the dedicated IPs of the owner are in use fetch an IP from the system pool
|
||||||
if (addrs.size() == 0 && fetchFromDedicatedRange) {
|
if (addrs.size() == 0 && fetchFromDedicatedRange) {
|
||||||
// Verify if account is allowed to acquire IPs from the system
|
// Verify if account is allowed to acquire IPs from the system
|
||||||
boolean useSystemIps = _useSystemPublicIps.valueIn(owner.getId());
|
boolean useSystemIps = UseSystemPublicIps.valueIn(owner.getId());
|
||||||
if (useSystemIps && nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) {
|
if (useSystemIps && nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) {
|
||||||
fetchFromDedicatedRange = false;
|
fetchFromDedicatedRange = false;
|
||||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||||
@ -1098,7 +1093,6 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||||||
ipaddr.setAllocatedToAccountId(ipOwner.getId());
|
ipaddr.setAllocatedToAccountId(ipOwner.getId());
|
||||||
ipaddr = _ipAddressDao.persist(ipaddr);
|
ipaddr = _ipAddressDao.persist(ipaddr);
|
||||||
|
|
||||||
String guestType = vlan.getVlanType().toString();
|
|
||||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_PORTABLE_IP_ASSIGN,
|
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_PORTABLE_IP_ASSIGN,
|
||||||
ipaddr.getId(),
|
ipaddr.getId(),
|
||||||
ipaddr.getDataCenterId(),
|
ipaddr.getDataCenterId(),
|
||||||
|
|||||||
@ -140,7 +140,7 @@ public class ExternalGuestNetworkGuru extends GuestNetworkGuru {
|
|||||||
int vlanTag;
|
int vlanTag;
|
||||||
if (config.getBroadcastUri() == null) {
|
if (config.getBroadcastUri() == null) {
|
||||||
String vnet = _dcDao.allocateVnet(zone.getId(), config.getPhysicalNetworkId(), config.getAccountId(), context.getReservationId(),
|
String vnet = _dcDao.allocateVnet(zone.getId(), config.getPhysicalNetworkId(), config.getAccountId(), context.getReservationId(),
|
||||||
_useSystemGuestVlans.valueIn(config.getAccountId()));
|
UseSystemGuestVlans.valueIn(config.getAccountId()));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vlanTag = Integer.parseInt(vnet);
|
vlanTag = Integer.parseInt(vnet);
|
||||||
|
|||||||
@ -28,7 +28,6 @@ import org.apache.log4j.Logger;
|
|||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
|
||||||
@ -69,7 +68,6 @@ import com.cloud.server.ConfigurationServer;
|
|||||||
import com.cloud.user.Account;
|
import com.cloud.user.Account;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
import com.cloud.utils.db.Transaction;
|
import com.cloud.utils.db.Transaction;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
@ -112,9 +110,6 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur
|
|||||||
"If true, when account has dedicated guest vlan range(s), once the vlans dedicated to the account have been consumed vlans will be allocated from the system pool", false,
|
"If true, when account has dedicated guest vlan range(s), once the vlans dedicated to the account have been consumed vlans will be allocated from the system pool", false,
|
||||||
ConfigKey.Scope.Account);
|
ConfigKey.Scope.Account);
|
||||||
|
|
||||||
@InjectConfig(key = "use.system.guest.vlans")
|
|
||||||
ConfigValue<Boolean> _useSystemGuestVlans;
|
|
||||||
|
|
||||||
private static final TrafficType[] _trafficTypes = {TrafficType.Guest};
|
private static final TrafficType[] _trafficTypes = {TrafficType.Guest};
|
||||||
|
|
||||||
// Currently set to anything except STT for the Nicira integration.
|
// Currently set to anything except STT for the Nicira integration.
|
||||||
@ -275,7 +270,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur
|
|||||||
protected void allocateVnet(Network network, NetworkVO implemented, long dcId,
|
protected void allocateVnet(Network network, NetworkVO implemented, long dcId,
|
||||||
long physicalNetworkId, String reservationId) throws InsufficientVirtualNetworkCapcityException {
|
long physicalNetworkId, String reservationId) throws InsufficientVirtualNetworkCapcityException {
|
||||||
if (network.getBroadcastUri() == null) {
|
if (network.getBroadcastUri() == null) {
|
||||||
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), reservationId, _useSystemGuestVlans.valueIn(network.getAccountId()));
|
String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), reservationId, UseSystemGuestVlans.valueIn(network.getAccountId()));
|
||||||
if (vnet == null) {
|
if (vnet == null) {
|
||||||
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a " +
|
throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a " +
|
||||||
"part of network " + network + " implement ", DataCenter.class, dcId);
|
"part of network " + network + " implement ", DataCenter.class, dcId);
|
||||||
|
|||||||
@ -49,7 +49,6 @@ import org.apache.cloudstack.context.ServerContexts;
|
|||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||||
|
|
||||||
@ -221,7 +220,6 @@ import com.cloud.utils.NumbersUtil;
|
|||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.PasswordGenerator;
|
import com.cloud.utils.PasswordGenerator;
|
||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -640,29 +638,11 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
|
|||||||
throw new CloudRuntimeException("Failed to reboot router " + router);
|
throw new CloudRuntimeException("Failed to reboot router " + router);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = RouterTemplateXenCK)
|
|
||||||
ConfigValue<String> _routerTemplateXen;
|
|
||||||
@InjectConfig(key = RouterTemplateKvmCK)
|
|
||||||
ConfigValue<String> _routerTemplateKvm;
|
|
||||||
@InjectConfig(key = RouterTemplateVmwareCK)
|
|
||||||
ConfigValue<String> _routerTemplateVmware;
|
|
||||||
@InjectConfig(key = RouterTemplateHyperVCK)
|
|
||||||
ConfigValue<String> _routerTemplateHyperV;
|
|
||||||
@InjectConfig(key = RouterTemplateLxcCK)
|
|
||||||
ConfigValue<String> _routerTemplateLxc;
|
|
||||||
@InjectConfig(key = NetworkOrchestrationService.NetworkLockTimeoutCK)
|
|
||||||
ConfigValue<Integer> _networkLockTimeout;
|
|
||||||
@InjectConfig(key = "use.external.dns")
|
|
||||||
ConfigValue<Boolean> _useExternalDnsServers;
|
|
||||||
|
|
||||||
static final ConfigKey<Boolean> UseExternalDnsServers = new ConfigKey<Boolean>(Boolean.class, "use.external.dns", "Advanced", "false",
|
static final ConfigKey<Boolean> UseExternalDnsServers = new ConfigKey<Boolean>(Boolean.class, "use.external.dns", "Advanced", "false",
|
||||||
"Bypass internal dns, use external dns1 and dns2", true, ConfigKey.Scope.Zone, null);
|
"Bypass internal dns, use external dns1 and dns2", true, ConfigKey.Scope.Zone, null);
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
|
||||||
|
|
||||||
_useExternalDnsServers = _configDepot.get(UseExternalDnsServers);
|
|
||||||
|
|
||||||
_executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("RouterMonitor"));
|
_executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("RouterMonitor"));
|
||||||
_checkExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("RouterStatusMonitor"));
|
_checkExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("RouterStatusMonitor"));
|
||||||
_networkStatsUpdateExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("NetworkStatsUpdater"));
|
_networkStatsUpdateExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("NetworkStatsUpdater"));
|
||||||
@ -1451,7 +1431,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
|
|||||||
InsufficientCapacityException, ResourceUnavailableException {
|
InsufficientCapacityException, ResourceUnavailableException {
|
||||||
|
|
||||||
List<DomainRouterVO> routers = new ArrayList<DomainRouterVO>();
|
List<DomainRouterVO> routers = new ArrayList<DomainRouterVO>();
|
||||||
Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), _networkLockTimeout.value());
|
Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value());
|
||||||
if (lock == null) {
|
if (lock == null) {
|
||||||
throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId());
|
throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId());
|
||||||
}
|
}
|
||||||
@ -1646,19 +1626,19 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
|
|||||||
String templateName = null;
|
String templateName = null;
|
||||||
switch (hType) {
|
switch (hType) {
|
||||||
case XenServer:
|
case XenServer:
|
||||||
templateName = _routerTemplateXen.valueIn(dest.getDataCenter().getId());
|
templateName = RouterTemplateXen.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case KVM:
|
case KVM:
|
||||||
templateName = _routerTemplateKvm.valueIn(dest.getDataCenter().getId());
|
templateName = RouterTemplateKvm.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case VMware:
|
case VMware:
|
||||||
templateName = _routerTemplateVmware.valueIn(dest.getDataCenter().getId());
|
templateName = RouterTemplateVmware.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case Hyperv:
|
case Hyperv:
|
||||||
templateName = _routerTemplateHyperV.valueIn(dest.getDataCenter().getId());
|
templateName = RouterTemplateHyperV.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
case LXC:
|
case LXC:
|
||||||
templateName = _routerTemplateLxc.valueIn(dest.getDataCenter().getId());
|
templateName = RouterTemplateLxc.valueIn(dest.getDataCenter().getId());
|
||||||
break;
|
break;
|
||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
@ -2176,7 +2156,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
|
|||||||
|
|
||||||
boolean useExtDns = !dnsProvided;
|
boolean useExtDns = !dnsProvided;
|
||||||
/* For backward compatibility */
|
/* For backward compatibility */
|
||||||
useExtDns = _useExternalDnsServers.valueIn(dc.getId());
|
useExtDns = UseExternalDnsServers.valueIn(dc.getId());
|
||||||
|
|
||||||
if (useExtDns) {
|
if (useExtDns) {
|
||||||
buf.append(" useextdns=true");
|
buf.append(" useextdns=true");
|
||||||
|
|||||||
@ -39,7 +39,6 @@ import org.apache.cloudstack.api.command.user.vpc.ListStaticRoutesCmd;
|
|||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
|
||||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
|
||||||
import com.cloud.configuration.Config;
|
import com.cloud.configuration.Config;
|
||||||
@ -198,8 +197,6 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
|||||||
private final List<Service> nonSupportedServices = Arrays.asList(Service.SecurityGroup, Service.Firewall);
|
private final List<Service> nonSupportedServices = Arrays.asList(Service.SecurityGroup, Service.Firewall);
|
||||||
private final List<Provider> supportedProviders = Arrays.asList(Provider.VPCVirtualRouter, Provider.NiciraNvp, Provider.InternalLbVm, Provider.Netscaler);
|
private final List<Provider> supportedProviders = Arrays.asList(Provider.VPCVirtualRouter, Provider.NiciraNvp, Provider.InternalLbVm, Provider.Netscaler);
|
||||||
|
|
||||||
ConfigValue<String> _networkDomain;
|
|
||||||
|
|
||||||
int _cleanupInterval;
|
int _cleanupInterval;
|
||||||
int _maxNetworks;
|
int _maxNetworks;
|
||||||
SearchBuilder<IPAddressVO> IpAddressSearch;
|
SearchBuilder<IPAddressVO> IpAddressSearch;
|
||||||
@ -260,8 +257,6 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
|||||||
String maxNtwks = configs.get(Config.VpcMaxNetworks.key());
|
String maxNtwks = configs.get(Config.VpcMaxNetworks.key());
|
||||||
_maxNetworks = NumbersUtil.parseInt(maxNtwks, 3); // max=3 is default
|
_maxNetworks = NumbersUtil.parseInt(maxNtwks, 3); // max=3 is default
|
||||||
|
|
||||||
_networkDomain = _configDepot.get(NetworkOrchestrationService.GuestDomainSuffix);
|
|
||||||
|
|
||||||
IpAddressSearch = _ipAddressDao.createSearchBuilder();
|
IpAddressSearch = _ipAddressDao.createSearchBuilder();
|
||||||
IpAddressSearch.and("accountId", IpAddressSearch.entity().getAllocatedToAccountId(), Op.EQ);
|
IpAddressSearch.and("accountId", IpAddressSearch.entity().getAllocatedToAccountId(), Op.EQ);
|
||||||
IpAddressSearch.and("dataCenterId", IpAddressSearch.entity().getDataCenterId(), Op.EQ);
|
IpAddressSearch.and("dataCenterId", IpAddressSearch.entity().getDataCenterId(), Op.EQ);
|
||||||
@ -620,7 +615,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
|||||||
|
|
||||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||||
if (networkDomain == null) {
|
if (networkDomain == null) {
|
||||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + _networkDomain.valueIn(zoneId);
|
networkDomain = "cs" + Long.toHexString(owner.getId()) + NetworkOrchestrationService.GuestDomainSuffix.valueIn(zoneId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1772,7 +1767,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected boolean isCidrBlacklisted(String cidr, long zoneId) {
|
protected boolean isCidrBlacklisted(String cidr, long zoneId) {
|
||||||
String routesStr = _networkDomain.valueIn(zoneId);
|
String routesStr = NetworkOrchestrationService.GuestDomainSuffix.valueIn(zoneId);
|
||||||
if (routesStr != null && !routesStr.isEmpty()) {
|
if (routesStr != null && !routesStr.isEmpty()) {
|
||||||
String[] cidrBlackList = routesStr.split(",");
|
String[] cidrBlackList = routesStr.split(",");
|
||||||
|
|
||||||
|
|||||||
@ -31,7 +31,6 @@ import org.apache.cloudstack.api.command.user.vpn.ListRemoteAccessVpnsCmd;
|
|||||||
import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd;
|
import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
|
||||||
@ -75,7 +74,6 @@ import com.cloud.utils.NumbersUtil;
|
|||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.PasswordGenerator;
|
import com.cloud.utils.PasswordGenerator;
|
||||||
import com.cloud.utils.Ternary;
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
import com.cloud.utils.db.Filter;
|
import com.cloud.utils.db.Filter;
|
||||||
@ -93,9 +91,6 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
static final ConfigKey<String> RemoteAccessVpnClientIpRange = new ConfigKey<String>("Network", String.class, RemoteAccessVpnClientIpRangeCK, "10.1.2.1-10.1.2.8",
|
static final ConfigKey<String> RemoteAccessVpnClientIpRange = new ConfigKey<String>("Network", String.class, RemoteAccessVpnClientIpRangeCK, "10.1.2.1-10.1.2.8",
|
||||||
"The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server", false, ConfigKey.Scope.Account);
|
"The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server", false, ConfigKey.Scope.Account);
|
||||||
|
|
||||||
@InjectConfig(key = RemoteAccessVpnClientIpRangeCK)
|
|
||||||
ConfigValue<String> _remoteAccessVpnClientIpRange;
|
|
||||||
|
|
||||||
@Inject AccountDao _accountDao;
|
@Inject AccountDao _accountDao;
|
||||||
@Inject VpnUserDao _vpnUsersDao;
|
@Inject VpnUserDao _vpnUsersDao;
|
||||||
@Inject RemoteAccessVpnDao _remoteAccessVpnDao;
|
@Inject RemoteAccessVpnDao _remoteAccessVpnDao;
|
||||||
@ -166,7 +161,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ipRange == null) {
|
if (ipRange == null) {
|
||||||
ipRange = _remoteAccessVpnClientIpRange.valueIn(ipAddr.getAccountId());
|
ipRange = RemoteAccessVpnClientIpRange.valueIn(ipAddr.getAccountId());
|
||||||
}
|
}
|
||||||
String[] range = ipRange.split("-");
|
String[] range = ipRange.split("-");
|
||||||
if (range.length != 2) {
|
if (range.length != 2) {
|
||||||
@ -208,7 +203,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void validateRemoteAccessVpnConfiguration() throws ConfigurationException {
|
private void validateRemoteAccessVpnConfiguration() throws ConfigurationException {
|
||||||
String ipRange = _remoteAccessVpnClientIpRange.value();
|
String ipRange = RemoteAccessVpnClientIpRange.value();
|
||||||
if (ipRange == null) {
|
if (ipRange == null) {
|
||||||
s_logger.warn("Remote Access VPN global configuration missing client ip range -- ignoring");
|
s_logger.warn("Remote Access VPN global configuration missing client ip range -- ignoring");
|
||||||
return;
|
return;
|
||||||
|
|||||||
@ -46,7 +46,6 @@ import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
|
|||||||
import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
|
import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
|
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.region.dao.RegionDao;
|
import org.apache.cloudstack.region.dao.RegionDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
@ -136,7 +135,6 @@ import com.cloud.user.AccountManager;
|
|||||||
import com.cloud.user.User;
|
import com.cloud.user.User;
|
||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.UriUtils;
|
import com.cloud.utils.UriUtils;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.Manager;
|
import com.cloud.utils.component.Manager;
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -167,11 +165,6 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
|
|
||||||
Gson _gson;
|
Gson _gson;
|
||||||
|
|
||||||
@InjectConfig(key = CapacityManager.CpuOverprovisioningFactorCK)
|
|
||||||
ConfigValue<Float> _cpuOverprovisioningFactor;
|
|
||||||
@InjectConfig(key = CapacityManager.MemOverprovisioningFactorCK)
|
|
||||||
ConfigValue<Float> _memOverprovisioningFactor;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
AccountManager _accountMgr;
|
AccountManager _accountMgr;
|
||||||
@Inject
|
@Inject
|
||||||
@ -478,8 +471,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
|
|
||||||
if (clusterType == Cluster.ClusterType.CloudManaged) {
|
if (clusterType == Cluster.ClusterType.CloudManaged) {
|
||||||
Map<String, String> details = new HashMap<String, String>();
|
Map<String, String> details = new HashMap<String, String>();
|
||||||
details.put("cpuOvercommitRatio", _cpuOverprovisioningFactor.value().toString());
|
details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
|
||||||
details.put("memoryOvercommitRatio", _memOverprovisioningFactor.value().toString());
|
details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
|
||||||
_clusterDetailsDao.persist(cluster.getId(), details);
|
_clusterDetailsDao.persist(cluster.getId(), details);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -489,8 +482,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
details.put("url", url);
|
details.put("url", url);
|
||||||
details.put("username", username);
|
details.put("username", username);
|
||||||
details.put("password", password);
|
details.put("password", password);
|
||||||
details.put("cpuOvercommitRatio", _cpuOverprovisioningFactor.value().toString());
|
details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
|
||||||
details.put("memoryOvercommitRatio", _memOverprovisioningFactor.value().toString());
|
details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
|
||||||
_clusterDetailsDao.persist(cluster.getId(), details);
|
_clusterDetailsDao.persist(cluster.getId(), details);
|
||||||
|
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
|
|||||||
@ -430,7 +430,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||||
@ -558,7 +557,6 @@ import com.cloud.utils.Pair;
|
|||||||
import com.cloud.utils.PasswordGenerator;
|
import com.cloud.utils.PasswordGenerator;
|
||||||
import com.cloud.utils.Ternary;
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.component.ComponentLifecycle;
|
import com.cloud.utils.component.ComponentLifecycle;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||||
@ -3238,9 +3236,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
|
|
||||||
return cloudParams;
|
return cloudParams;
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = TemplateManager.AllowPublicUserTemplatesCK)
|
|
||||||
ConfigValue<Boolean> _allowPublicUserTemplates;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, Object> listCapabilities(ListCapabilitiesCmd cmd) {
|
public Map<String, Object> listCapabilities(ListCapabilitiesCmd cmd) {
|
||||||
@ -3265,7 +3260,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
|
|
||||||
long diskOffMaxSize = Long.valueOf(_configDao.getValue(Config.CustomDiskOfferingMaxSize.key()));
|
long diskOffMaxSize = Long.valueOf(_configDao.getValue(Config.CustomDiskOfferingMaxSize.key()));
|
||||||
|
|
||||||
boolean userPublicTemplateEnabled = _allowPublicUserTemplates.valueIn(caller.getId());
|
boolean userPublicTemplateEnabled = TemplateManager.AllowPublicUserTemplates.valueIn(caller.getId());
|
||||||
|
|
||||||
// add some parameters UI needs to handle API throttling
|
// add some parameters UI needs to handle API throttling
|
||||||
boolean apiLimitEnabled = Boolean.parseBoolean(_configDao.getValue(Config.ApiLimitEnabled.key()));
|
boolean apiLimitEnabled = Boolean.parseBoolean(_configDao.getValue(Config.ApiLimitEnabled.key()));
|
||||||
|
|||||||
@ -73,7 +73,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
|
||||||
@ -153,7 +152,6 @@ import com.cloud.utils.Pair;
|
|||||||
import com.cloud.utils.StringUtils;
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.UriUtils;
|
import com.cloud.utils.UriUtils;
|
||||||
import com.cloud.utils.component.ComponentContext;
|
import com.cloud.utils.component.ComponentContext;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -858,12 +856,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
listener.hostConnect(hostId, pool.getId());
|
listener.hostConnect(hostId, pool.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = CapacityManager.StorageOverprovisioningFactorCK)
|
|
||||||
ConfigValue<Double> _storageOverprovisioningFactor;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BigDecimal getStorageOverProvisioningFactor(Long dcId) {
|
public BigDecimal getStorageOverProvisioningFactor(Long dcId) {
|
||||||
return new BigDecimal(_storageOverprovisioningFactor.valueIn(dcId));
|
return new BigDecimal(CapacityManager.StorageOverprovisioningFactor.valueIn(dcId));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -1462,12 +1457,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = CapacityManager.StorageCapacityDisableThresholdCK)
|
|
||||||
ConfigValue<Float> _storageCapacityDisableThreshold;
|
|
||||||
|
|
||||||
private boolean checkUsagedSpace(StoragePool pool) {
|
private boolean checkUsagedSpace(StoragePool pool) {
|
||||||
StatsCollector sc = StatsCollector.getInstance();
|
StatsCollector sc = StatsCollector.getInstance();
|
||||||
double storageUsedThreshold = _storageCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
double storageUsedThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
||||||
if (sc != null) {
|
if (sc != null) {
|
||||||
long totalSize = pool.getCapacityBytes();
|
long totalSize = pool.getCapacityBytes();
|
||||||
StorageStats stats = sc.getStoragePoolStats(pool.getId());
|
StorageStats stats = sc.getStoragePoolStats(pool.getId());
|
||||||
@ -1537,9 +1529,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
return futureIops <= pool.getCapacityIops();
|
return futureIops <= pool.getCapacityIops();
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = CapacityManager.StorageAllocatedCapacityDisableThresholdCK)
|
|
||||||
ConfigValue<Double> _storageAllocatedCapacityDisableThreshold;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean storagePoolHasEnoughSpace(List<Volume> volumes,
|
public boolean storagePoolHasEnoughSpace(List<Volume> volumes,
|
||||||
StoragePool pool) {
|
StoragePool pool) {
|
||||||
@ -1575,7 +1564,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
totalOverProvCapacity = pool.getCapacityBytes();
|
totalOverProvCapacity = pool.getCapacityBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
double storageAllocatedThreshold = _storageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
||||||
if (s_logger.isDebugEnabled()) {
|
if (s_logger.isDebugEnabled()) {
|
||||||
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity
|
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity
|
||||||
+ ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: "
|
+ ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: "
|
||||||
|
|||||||
@ -763,7 +763,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
|
|||||||
|
|
||||||
private synchronized Map<Long, ZoneHostInfo> getZoneHostInfo() {
|
private synchronized Map<Long, ZoneHostInfo> getZoneHostInfo() {
|
||||||
Date cutTime = DateUtil.currentGMTTime();
|
Date cutTime = DateUtil.currentGMTTime();
|
||||||
List<RunningHostCountInfo> l = _hostDao.getRunningHostCounts(new Date(cutTime.getTime() - _clusterMgr.getHeartbeatThreshold()));
|
List<RunningHostCountInfo> l = _hostDao.getRunningHostCounts(new Date(cutTime.getTime() - ClusterManager.HeartbeatThreshold.value()));
|
||||||
|
|
||||||
RunningHostInfoAgregator aggregator = new RunningHostInfoAgregator();
|
RunningHostInfoAgregator aggregator = new RunningHostInfoAgregator();
|
||||||
if (l.size() > 0) {
|
if (l.size() > 0) {
|
||||||
|
|||||||
@ -30,7 +30,6 @@ import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
|
|||||||
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
|
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
|
||||||
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
|
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||||
@ -66,7 +65,6 @@ import com.cloud.user.dao.AccountDao;
|
|||||||
import com.cloud.user.dao.UserDao;
|
import com.cloud.user.dao.UserDao;
|
||||||
import com.cloud.utils.EnumUtils;
|
import com.cloud.utils.EnumUtils;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.vm.UserVmVO;
|
import com.cloud.vm.UserVmVO;
|
||||||
import com.cloud.vm.dao.UserVmDao;
|
import com.cloud.vm.dao.UserVmDao;
|
||||||
@ -92,10 +90,6 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||||||
@Inject ConfigurationServer _configServer;
|
@Inject ConfigurationServer _configServer;
|
||||||
@Inject ProjectManager _projectMgr;
|
@Inject ProjectManager _projectMgr;
|
||||||
|
|
||||||
@InjectConfig(key = TemplateManager.AllowPublicUserTemplatesCK)
|
|
||||||
ConfigValue<Boolean> _allowPublicUserTemplates;
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean stop() {
|
public boolean stop() {
|
||||||
return true;
|
return true;
|
||||||
@ -175,7 +169,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check whether owner can create public templates
|
// check whether owner can create public templates
|
||||||
boolean allowPublicUserTemplates = _allowPublicUserTemplates.valueIn(templateOwner.getId());
|
boolean allowPublicUserTemplates = TemplateManager.AllowPublicUserTemplates.valueIn(templateOwner.getId());
|
||||||
if (!isAdmin && !allowPublicUserTemplates && isPublic) {
|
if (!isAdmin && !allowPublicUserTemplates && isPublic) {
|
||||||
throw new InvalidParameterValueException("Only private templates/ISO can be created.");
|
throw new InvalidParameterValueException("Only private templates/ISO can be created.");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -70,7 +70,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
import org.apache.cloudstack.framework.async.AsyncCallFuture;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
||||||
@ -173,7 +172,6 @@ import com.cloud.utils.EnumUtils;
|
|||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.component.AdapterBase;
|
import com.cloud.utils.component.AdapterBase;
|
||||||
import com.cloud.utils.component.InjectConfig;
|
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.DB;
|
import com.cloud.utils.db.DB;
|
||||||
@ -189,8 +187,6 @@ import com.cloud.vm.dao.VMInstanceDao;
|
|||||||
@Local(value = { TemplateManager.class, TemplateApiService.class })
|
@Local(value = { TemplateManager.class, TemplateApiService.class })
|
||||||
public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable {
|
public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable {
|
||||||
private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class);
|
private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class);
|
||||||
@InjectConfig(key = TemplateManager.AllowPublicUserTemplatesCK)
|
|
||||||
ConfigValue<Boolean> _allowPublicUserTemplates;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
VMTemplateDao _tmpltDao;
|
VMTemplateDao _tmpltDao;
|
||||||
@ -1220,7 +1216,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||||||
boolean isAdmin = _accountMgr.isAdmin(caller.getType());
|
boolean isAdmin = _accountMgr.isAdmin(caller.getType());
|
||||||
// check configuration parameter(allow.public.user.templates) value for
|
// check configuration parameter(allow.public.user.templates) value for
|
||||||
// the template owner
|
// the template owner
|
||||||
boolean allowPublicUserTemplates = _allowPublicUserTemplates.valueIn(template.getAccountId());
|
boolean allowPublicUserTemplates = AllowPublicUserTemplates.valueIn(template.getAccountId());
|
||||||
if (!isAdmin && !allowPublicUserTemplates && isPublic != null && isPublic) {
|
if (!isAdmin && !allowPublicUserTemplates && isPublic != null && isPublic) {
|
||||||
throw new InvalidParameterValueException("Only private " + mediaType + "s can be created.");
|
throw new InvalidParameterValueException("Only private " + mediaType + "s can be created.");
|
||||||
}
|
}
|
||||||
@ -1482,7 +1478,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||||||
}
|
}
|
||||||
boolean isDynamicScalingEnabled = cmd.isDynamicallyScalable();
|
boolean isDynamicScalingEnabled = cmd.isDynamicallyScalable();
|
||||||
// check whether template owner can create public templates
|
// check whether template owner can create public templates
|
||||||
boolean allowPublicUserTemplates = _allowPublicUserTemplates.valueIn(templateOwner.getId());
|
boolean allowPublicUserTemplates = AllowPublicUserTemplates.valueIn(templateOwner.getId());
|
||||||
if (!isAdmin && !allowPublicUserTemplates && isPublic) {
|
if (!isAdmin && !allowPublicUserTemplates && isPublic) {
|
||||||
throw new PermissionDeniedException("Failed to create template " + name + ", only private templates can be created.");
|
throw new PermissionDeniedException("Failed to create template " + name + ", only private templates can be created.");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -69,7 +69,6 @@ import org.apache.cloudstack.engine.service.api.OrchestrationService;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
|
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
import org.apache.cloudstack.framework.config.ConfigValue;
|
|
||||||
import org.apache.cloudstack.framework.config.Configurable;
|
import org.apache.cloudstack.framework.config.Configurable;
|
||||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
||||||
@ -272,9 +271,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
linked
|
linked
|
||||||
}
|
}
|
||||||
|
|
||||||
@InjectConfig(key = EnableDynamicallyScaleVmCK)
|
|
||||||
ConfigValue<Boolean> _enableDynamicallyScaleVm;
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
EntityManager _entityMgr;
|
EntityManager _entityMgr;
|
||||||
@Inject
|
@Inject
|
||||||
@ -1270,7 +1266,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||||||
if (vmInstance.getState().equals(State.Running)) {
|
if (vmInstance.getState().equals(State.Running)) {
|
||||||
int retry = _scaleRetry;
|
int retry = _scaleRetry;
|
||||||
ExcludeList excludes = new ExcludeList();
|
ExcludeList excludes = new ExcludeList();
|
||||||
boolean enableDynamicallyScaleVm = _enableDynamicallyScaleVm.valueIn(vmInstance.getDataCenterId());
|
boolean enableDynamicallyScaleVm = EnableDynamicallyScaleVm.valueIn(vmInstance.getDataCenterId());
|
||||||
if(!enableDynamicallyScaleVm){
|
if(!enableDynamicallyScaleVm){
|
||||||
throw new PermissionDeniedException("Dynamically scaling virtual machines is disabled for this zone, please contact your admin");
|
throw new PermissionDeniedException("Dynamically scaling virtual machines is disabled for this zone, please contact your admin");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,42 +16,25 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.utils.component;
|
package com.cloud.utils.component;
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.annotation.PostConstruct;
|
|
||||||
import javax.inject.Inject;
|
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import com.cloud.utils.ReflectUtil;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
public class ComponentLifecycleBase implements ComponentLifecycle {
|
public class ComponentLifecycleBase implements ComponentLifecycle {
|
||||||
|
private static final Logger s_logger = Logger.getLogger(ComponentLifecycleBase.class);
|
||||||
|
|
||||||
protected String _name;
|
protected String _name;
|
||||||
protected int _runLevel;
|
protected int _runLevel;
|
||||||
protected Map<String, Object> _configParams = new HashMap<String, Object>();
|
protected Map<String, Object> _configParams = new HashMap<String, Object>();
|
||||||
@Inject
|
|
||||||
protected ConfigInjector _configInjector;
|
|
||||||
|
|
||||||
public ComponentLifecycleBase() {
|
public ComponentLifecycleBase() {
|
||||||
_name = this.getClass().getSimpleName();
|
_name = this.getClass().getSimpleName();
|
||||||
_runLevel = RUN_LEVEL_COMPONENT;
|
_runLevel = RUN_LEVEL_COMPONENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@PostConstruct
|
|
||||||
protected void injectConfigs() {
|
|
||||||
if (_configInjector != null) {
|
|
||||||
for (Field field : ReflectUtil.getAllFieldsForClass(this.getClass(), Object.class)) {
|
|
||||||
InjectConfig config = field.getAnnotation(InjectConfig.class);
|
|
||||||
if (config != null) {
|
|
||||||
field.setAccessible(true);
|
|
||||||
_configInjector.inject(field, this, config.key());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return _name;
|
return _name;
|
||||||
|
|||||||
@ -1,27 +0,0 @@
|
|||||||
// Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing,
|
|
||||||
// software distributed under the License is distributed on an
|
|
||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
// KIND, either express or implied. See the License for the
|
|
||||||
// specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
package com.cloud.utils.component;
|
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface can be implemented by someone who knows how to inject configurations.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public interface ConfigInjector {
|
|
||||||
void inject(Field field, Object obj, String key);
|
|
||||||
}
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
// Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing,
|
|
||||||
// software distributed under the License is distributed on an
|
|
||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
// KIND, either express or implied. See the License for the
|
|
||||||
// specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
package com.cloud.utils.component;
|
|
||||||
|
|
||||||
import static java.lang.annotation.ElementType.FIELD;
|
|
||||||
import static java.lang.annotation.RetentionPolicy.RUNTIME;
|
|
||||||
|
|
||||||
import java.lang.annotation.Retention;
|
|
||||||
import java.lang.annotation.Target;
|
|
||||||
|
|
||||||
@Target({FIELD})
|
|
||||||
@Retention(RUNTIME)
|
|
||||||
public @interface InjectConfig {
|
|
||||||
String key();
|
|
||||||
}
|
|
||||||
Loading…
x
Reference in New Issue
Block a user