Log4j2 refactor cloud engine orchestration module (#8742)

Co-authored-by: klaus.freitas.scclouds <klaus.freitas@scclouds.com.br>
This commit is contained in:
Klaus de Freitas Dornsbach 2024-04-24 04:31:02 -03:00 committed by GitHub
parent 4fe2b66e9d
commit e330d76a72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 745 additions and 1174 deletions

View File

@ -105,6 +105,8 @@ public abstract class AgentAttache {
}
};
protected static String LOG_SEQ_FORMATTED_STRING;
protected final long _id;
protected String _name = null;
protected final ConcurrentHashMap<Long, Listener> _waitForList;
@ -137,6 +139,7 @@ public abstract class AgentAttache {
_requests = new LinkedList<Request>();
_agentMgr = agentMgr;
_nextSequence = new Long(s_rand.nextInt(Short.MAX_VALUE)).longValue() << 48;
LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id);
}
public synchronized long getNextSequence() {
@ -197,9 +200,7 @@ public abstract class AgentAttache {
}
protected synchronized void cancel(final long seq) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Cancelling."));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling.");
final Listener listener = _waitForList.remove(seq);
if (listener != null) {
listener.processDisconnect(_id, Status.Disconnected);
@ -218,14 +219,8 @@ public abstract class AgentAttache {
return Collections.binarySearch(_requests, seq, s_seqComparator);
}
protected String log(final long seq, final String msg) {
return "Seq " + _id + "-" + seq + ": " + msg;
}
protected void registerListener(final long seq, final Listener listener) {
if (logger.isTraceEnabled()) {
logger.trace(log(seq, "Registering listener"));
}
logger.trace(LOG_SEQ_FORMATTED_STRING, seq, "Registering listener");
if (listener.getTimeout() != -1) {
s_listenerExecutor.schedule(new Alarm(seq), listener.getTimeout(), TimeUnit.SECONDS);
}
@ -233,9 +228,7 @@ public abstract class AgentAttache {
}
protected Listener unregisterListener(final long sequence) {
if (logger.isTraceEnabled()) {
logger.trace(log(sequence, "Unregistering listener"));
}
logger.trace(LOG_SEQ_FORMATTED_STRING, sequence, "Unregistering listener");
return _waitForList.remove(sequence);
}
@ -267,7 +260,7 @@ public abstract class AgentAttache {
final Listener monitor = entry.getValue();
if (!monitor.isRecurring()) {
//TODO - remove this debug statement later
logger.debug("Listener is " + entry.getValue() + " waiting on " + entry.getKey());
logger.debug("Listener is {} waiting on {}", entry.getValue(), entry.getKey());
nonRecurringListenersList.add(monitor);
}
}
@ -290,15 +283,10 @@ public abstract class AgentAttache {
if (answers[0] != null && answers[0].getResult()) {
processed = true;
}
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Unable to find listener."));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Unable to find listener.");
} else {
processed = monitor.processAnswers(_id, seq, answers);
if (logger.isTraceEnabled()) {
logger.trace(log(seq, (processed ? "" : " did not ") + " processed "));
}
logger.trace(LOG_SEQ_FORMATTED_STRING, seq, (processed ? "" : " did not ") + " processed ");
if (!monitor.isRecurring()) {
unregisterListener(seq);
}
@ -324,9 +312,7 @@ public abstract class AgentAttache {
final Map.Entry<Long, Listener> entry = it.next();
it.remove();
final Listener monitor = entry.getValue();
if (logger.isDebugEnabled()) {
logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass()));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass());
monitor.processDisconnect(_id, state);
}
}
@ -357,9 +343,8 @@ public abstract class AgentAttache {
long seq = req.getSequence();
if (listener != null) {
registerListener(seq, listener);
} else if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Routed from " + req.getManagementServerId()));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Routed from " + req.getManagementServerId());
synchronized (this) {
try {
@ -381,16 +366,14 @@ public abstract class AgentAttache {
if (req.executeInSequence() && _currentSequence == null) {
_currentSequence = seq;
if (logger.isTraceEnabled()) {
logger.trace(log(seq, " is current sequence"));
}
logger.trace(LOG_SEQ_FORMATTED_STRING, seq, " is current sequence");
}
} catch (AgentUnavailableException e) {
logger.info(log(seq, "Unable to send due to " + e.getMessage()));
logger.info(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage());
cancel(seq);
throw e;
} catch (Exception e) {
logger.warn(log(seq, "Unable to send due to "), e);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage(), e);
cancel(seq);
throw new AgentUnavailableException("Problem due to other exception " + e.getMessage(), _id);
}
@ -409,20 +392,16 @@ public abstract class AgentAttache {
try {
answers = sl.waitFor(wait);
} catch (final InterruptedException e) {
logger.debug(log(seq, "Interrupted"));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Interrupted");
}
if (answers != null) {
if (logger.isDebugEnabled()) {
new Response(req, answers).logD("Received: ", false);
}
new Response(req, answers).logD("Received: ", false);
return answers;
}
answers = sl.getAnswers(); // Try it again.
if (answers != null) {
if (logger.isDebugEnabled()) {
new Response(req, answers).logD("Received after timeout: ", true);
}
new Response(req, answers).logD("Received after timeout: ", true);
_agentMgr.notifyAnswersToMonitors(_id, seq, answers);
return answers;
@ -430,21 +409,16 @@ public abstract class AgentAttache {
final Long current = _currentSequence;
if (current != null && seq != current) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Waited too long."));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waited too long.");
throw new OperationTimedoutException(req.getCommands(), _id, seq, wait, false);
}
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Waiting some more time because this is the current command"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waiting some more time because this is the current command");
}
throw new OperationTimedoutException(req.getCommands(), _id, seq, wait * 2, true);
} catch (OperationTimedoutException e) {
logger.warn(log(seq, "Timed out on " + req.toString()));
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Timed out on " + req.toString());
cancel(seq);
final Long current = _currentSequence;
if (req.executeInSequence() && (current != null && current == seq)) {
@ -452,7 +426,7 @@ public abstract class AgentAttache {
}
throw e;
} catch (Exception e) {
logger.warn(log(seq, "Exception while waiting for answer"), e);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception while waiting for answer", e);
cancel(seq);
final Long current = _currentSequence;
if (req.executeInSequence() && (current != null && current == seq)) {
@ -467,22 +441,16 @@ public abstract class AgentAttache {
protected synchronized void sendNext(final long seq) {
_currentSequence = null;
if (_requests.isEmpty()) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "No more commands found"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "No more commands found");
return;
}
Request req = _requests.pop();
if (logger.isDebugEnabled()) {
logger.debug(log(req.getSequence(), "Sending now. is current sequence."));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Sending now. is current sequence.");
try {
send(req);
} catch (AgentUnavailableException e) {
if (logger.isDebugEnabled()) {
logger.debug(log(req.getSequence(), "Unable to send the next sequence"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Unable to send the next sequence");
cancel(req.getSequence());
}
_currentSequence = req.getSequence();

View File

@ -208,12 +208,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
logger.info("Ping Timeout is " + mgmtServiceConf.getPingTimeout());
logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout());
final int threads = DirectAgentLoadSize.value();
_nodeId = ManagementServerNode.getManagementServerId();
logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId);
final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
_hostDao.markHostsAsDisconnected(_nodeId, lastPing);
@ -229,13 +229,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_connectExecutor.allowCoreThreadTimeOut(true);
_connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService);
logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers");
logger.info("Listening on {} with {} workers.", Port.value(), Workers.value());
// executes all agent commands other than cron and ping
_directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent"));
// executes cron and ping agent commands
_cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob"));
logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value());
logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value());
_directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0
_monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor"));
@ -266,9 +266,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_cmdMonitors.add(new Pair<Integer, Listener>(_monitorId, listener));
}
}
if (logger.isDebugEnabled()) {
logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId);
}
logger.debug("Registering listener {} with id {}", listener.getClass().getSimpleName(), _monitorId);
return _monitorId;
}
}
@ -288,7 +286,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public void unregisterForHostEvents(final int id) {
logger.debug("Deregistering " + id);
logger.debug("Deregistering {}", id);
_hostMonitors.remove(id);
}
@ -303,16 +301,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId());
logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId());
return new AgentControlAnswer(cmd);
}
public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) {
for (final Pair<Integer, Listener> listener : _cmdMonitors) {
final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds);
if (logger.isTraceEnabled()) {
logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass());
}
logger.trace("SeqA {}-{}: {} by {}", attache.getId(), sequence, (processed ? "processed" : "not processed"), listener.getClass());
}
}
@ -349,9 +345,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(),
host.getUuid(), e.getLocalizedMessage());
logger.error(errorMsg);
if (logger.isDebugEnabled()) {
logger.debug(errorMsg, e);
}
logger.debug(errorMsg, e);
}
if (answer != null) {
return answer;
@ -372,7 +366,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (answers != null && answers[0] instanceof UnsupportedAnswer) {
logger.warn("Unsupported Command: " + answers[0].getDetails());
logger.warn("Unsupported Command: {}", answers[0].getDetails());
return answers[0];
}
@ -469,15 +463,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Long hostId = agent.getId();
final HostVO host = _hostDao.findById(hostId);
if (host != null && host.getType() != null && !host.getType().isVirtual()) {
if (logger.isDebugEnabled()) {
logger.debug("checking if agent (" + hostId + ") is alive");
}
logger.debug("Checking if agent ({}) is alive", hostId);
final Answer answer = easySend(hostId, new CheckHealthCommand());
if (answer != null && answer.getResult()) {
final Status status = Status.Up;
if (logger.isDebugEnabled()) {
logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status);
}
logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status);
return status;
}
return _haMgr.investigate(hostId);
@ -491,7 +481,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
final AgentAttache agent = findAttache(hostId);
if (agent == null) {
logger.debug("Unable to find agent for " + hostId);
logger.debug("Unable to find agent for {}", hostId);
throw new AgentUnavailableException("Unable to find agent ", hostId);
}
@ -519,9 +509,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
return;
}
final long hostId = attache.getId();
if (logger.isDebugEnabled()) {
logger.debug("Remove Agent : " + hostId);
}
logger.debug("Remove Agent : {}", hostId);
AgentAttache removed = null;
boolean conflict = false;
synchronized (_agents) {
@ -533,16 +521,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
if (conflict) {
logger.debug("Agent for host " + hostId + " is created when it is being disconnected");
logger.debug("Agent for host {} is created when it is being disconnected", hostId);
}
if (removed != null) {
removed.disconnect(nextState);
}
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (logger.isDebugEnabled()) {
logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
}
logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName());
monitor.second().processDisconnect(hostId, nextState);
}
}
@ -550,9 +536,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public void notifyMonitorsOfNewlyAddedHost(long hostId) {
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (logger.isDebugEnabled()) {
logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName());
}
logger.debug("Sending host added to listener: {}", monitor.second().getClass().getSimpleName());
monitor.second().processHostAdded(hostId);
}
@ -562,9 +546,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final long hostId = attache.getId();
final HostVO host = _hostDao.findById(hostId);
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (logger.isDebugEnabled()) {
logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
}
logger.debug("Sending Connect to listener: {}", monitor.second().getClass().getSimpleName());
for (int i = 0; i < cmd.length; i++) {
try {
monitor.second().processConnect(host, cmd[i], forRebalance);
@ -584,7 +566,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
} else {
logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
}
@ -607,7 +589,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
Map<String, String> detailsMap = readyAnswer.getDetailsMap();
if (detailsMap != null) {
String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
logger.debug(String.format("Got HOST_UEFI_ENABLE [%s] for hostId [%s]:", uefiEnabled, host.getUuid()));
logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid());
if (uefiEnabled != null) {
_hostDao.loadDetails(host);
if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
@ -701,7 +683,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
resource.configure(host.getName(), params);
} catch (final ConfigurationException e) {
logger.warn("Unable to configure resource due to " + e.getMessage());
logger.warn("Unable to configure resource due to {}", e.getMessage());
return null;
}
@ -724,14 +706,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
// load the respective discoverer
final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType());
if (discoverer == null) {
logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType());
logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType());
resource = loadResourcesWithoutHypervisor(host);
} else {
resource = discoverer.reloadResource(host);
}
if (resource == null) {
logger.warn("Unable to load the resource: " + host.getId());
logger.warn("Unable to load the resource: {}", host.getId());
return false;
}
@ -757,7 +739,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException {
logger.debug("create DirectAgentAttache for " + host.getId());
logger.debug("create DirectAgentAttache for {}", host.getId());
final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates());
AgentAttache old = null;
@ -778,14 +760,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_connection.stop();
}
logger.info("Disconnecting agents: " + _agents.size());
logger.info("Disconnecting agents: {}", _agents.size());
synchronized (_agents) {
for (final AgentAttache agent : _agents.values()) {
final HostVO host = _hostDao.findById(agent.getId());
if (host == null) {
if (logger.isDebugEnabled()) {
logger.debug("Cant not find host " + agent.getId());
}
logger.debug("Cant not find host {}", agent.getId());
} else {
if (!agent.forForward()) {
agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId);
@ -803,9 +783,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Status currentStatus = host.getStatus();
Status nextStatus;
if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Host %s is already %s", host.getUuid(), currentStatus));
}
logger.debug("Host {} is already {}", host.getUuid(), currentStatus);
nextStatus = currentStatus;
} else {
try {
@ -815,10 +793,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
logger.debug(err);
throw new CloudRuntimeException(err);
}
if (logger.isDebugEnabled()) {
logger.debug(String.format("The next status of agent %s is %s, current status is %s", host.getUuid(), nextStatus, currentStatus));
}
logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus);
}
return nextStatus;
}
@ -830,20 +805,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
GlobalLock joinLock = getHostJoinLock(hostId);
if (joinLock.lock(60)) {
try {
logger.info(String.format("Host %d is disconnecting with event %s", hostId, event));
logger.info("Host {} is disconnecting with event {}", hostId, event);
Status nextStatus = null;
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
logger.warn(String.format("Can't find host with %d", hostId));
logger.warn("Can't find host with {}", hostId);
nextStatus = Status.Removed;
} else {
nextStatus = getNextStatusOnDisconnection(host, event);
caService.purgeHostCertificate(host);
}
if (logger.isDebugEnabled()) {
logger.debug(String.format("Deregistering link for %d with state %s", hostId, nextStatus));
}
logger.debug("Deregistering link for {} with state {}", hostId, nextStatus);
removeAgent(attache, nextStatus);
@ -878,22 +850,22 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (nextStatus == Status.Alert) {
/* OK, we are going to the bad status, let's see what happened */
logger.info("Investigating why host " + hostId + " has disconnected with event " + event);
logger.info("Investigating why host {} has disconnected with event", hostId, event);
Status determinedState = investigate(attache);
// if state cannot be determined do nothing and bail out
if (determinedState == null) {
if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state");
logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value());
determinedState = Status.Alert;
} else {
logger.warn("Agent " + hostId + " state cannot be determined, do nothing");
logger.warn("Agent {} state cannot be determined, do nothing", hostId);
return false;
}
}
final Status currentStatus = host.getStatus();
logger.info("The agent from host " + hostId + " state determined is " + determinedState);
logger.info("The agent from host {} state determined is {}", hostId, determinedState);
if (determinedState == Status.Down) {
final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs";
@ -908,15 +880,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
agentStatusTransitTo(host, Status.Event.Ping, _nodeId);
return false;
} else if (determinedState == Status.Disconnected) {
logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName() +
logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() +
'-' + host.getResourceState());
if (currentStatus == Status.Disconnected ||
(currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) {
if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected.");
logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId());
event = Status.Event.WaitedTooLong;
} else {
logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet.");
logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId());
return false;
}
} else if (currentStatus == Status.Up) {
@ -939,7 +911,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
"In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName());
}
} else {
logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened");
logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId());
}
}
handleDisconnectWithoutInvestigation(attache, event, true, true);
@ -980,12 +952,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
final Host h = _hostDao.findById(hostId);
if (h == null || h.getRemoved() != null) {
logger.debug("Host with id " + hostId + " doesn't exist");
logger.debug("Host with id {} doesn't exist", hostId);
return null;
}
final Status status = h.getStatus();
if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) {
logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up");
logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId);
return null;
}
final Answer answer = send(hostId, cmd);
@ -995,7 +967,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (logger.isDebugEnabled() && answer.getDetails() != null) {
logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails());
logger.debug("Details from executing {}: {}", cmd.getClass(), answer.getDetails());
}
return answer;
@ -1004,7 +976,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
logger.warn(e.getMessage());
return null;
} catch (final OperationTimedoutException e) {
logger.warn("Operation timed out: " + e.getMessage());
logger.warn("Operation timed out: {}", e.getMessage());
return null;
} catch (final Exception e) {
logger.warn("Exception while sending", e);
@ -1035,7 +1007,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (host.getStatus() == Status.Disconnected) {
logger.debug("Host is already disconnected, no work to be done: " + hostId);
logger.debug("Host is already disconnected, no work to be done: {}", hostId);
return;
}
@ -1053,9 +1025,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) {
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (logger.isDebugEnabled()) {
logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName());
}
logger.debug("Sending host about to be removed to listener: {}", monitor.second().getClass().getSimpleName());
monitor.second().processHostAboutToBeRemoved(hostId);
}
@ -1064,9 +1034,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) {
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (logger.isDebugEnabled()) {
logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName());
}
logger.debug("Sending host removed to listener: {}", monitor.second().getClass().getSimpleName());
monitor.second().processHostRemoved(hostId, clusterId);
}
@ -1074,9 +1042,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
if (logger.isDebugEnabled()) {
logger.debug("Received agent disconnect event for host " + hostId);
}
logger.debug("Received agent disconnect event for host {}", hostId);
AgentAttache attache = null;
attache = findAttache(hostId);
if (attache != null) {
@ -1088,7 +1054,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
reconnect(hostId);
} catch (CloudRuntimeException e) {
logger.debug("Error on shutdown request for hostID: " + hostId, e);
logger.debug("Error on shutdown request for hostID: {}", hostId, e);
return false;
}
return true;
@ -1103,7 +1069,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException {
logger.debug("create ConnectedAgentAttache for " + host.getId());
logger.debug("create ConnectedAgentAttache for {}", host.getId());
final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
@ -1141,7 +1107,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
ready.setMsHostList(newMSList);
ready.setLbAlgorithm(indirectAgentLB.getLBAlgorithmName());
ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId()));
logger.debug("Agent's management server host list is not up to date, sending list update:" + newMSList);
logger.debug("Agent's management server host list is not up to date, sending list update: {}", newMSList);
}
attache = createAttacheForConnect(host, link);
@ -1202,28 +1168,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
protected void runInContext() {
try {
if (logger.isDebugEnabled()) {
logger.debug("Simulating start for resource " + resource.getName() + " id " + id);
}
logger.debug("Simulating start for resource {} id {}", resource.getName(), id);
if (tapLoadingAgents(id, TapAgentsAction.Add)) {
try {
final AgentAttache agentattache = findAttache(id);
if (agentattache == null) {
logger.debug("Creating agent for host " + id);
logger.debug("Creating agent for host {}", id);
_resourceMgr.createHostAndAgent(id, resource, details, false, null, false);
logger.debug("Completed creating agent for host " + id);
logger.debug("Completed creating agent for host {}", id);
} else {
logger.debug("Agent already created in another thread for host " + id + ", ignore this");
logger.debug("Agent already created in another thread for host {}, ignore this", id);
}
} finally {
tapLoadingAgents(id, TapAgentsAction.Del);
}
} else {
logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this");
logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id);
}
} catch (final Exception e) {
logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e);
logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e);
}
}
}
@ -1249,7 +1213,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final AgentAttache attache = handleConnectedAgent(_link, startups, _request);
if (attache == null) {
logger.warn("Unable to create attache for agent: " + _request);
logger.warn("Unable to create attache for agent: {}", _request);
}
}
}
@ -1272,7 +1236,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
link.send(response.toBytes());
} catch (final ClosedChannelException e) {
logger.debug("Failed to send startupanswer: " + e.toString());
logger.debug("Failed to send startupanswer: {}", e.toString());
}
_connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request));
}
@ -1288,25 +1252,25 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
HostVO host = _hostDao.findById(hostId);
if (host == null) {
logger.error(String.format("Unable to find host with ID: %s", hostId));
logger.error("Unable to find host with ID: {}", hostId);
return;
}
if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) {
logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " +
"received for the host %s", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()));
logger.debug("{} is disabled for the cluster {}, cannot process the health check result " +
"received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName());
return;
}
ResourceState.Event resourceEvent = hostHealthCheckResult ? ResourceState.Event.Enable : ResourceState.Event.Disable;
try {
logger.info(String.format("Host health check %s, auto %s KVM host: %s",
logger.info("Host health check {}, auto {} KVM host: {}",
hostHealthCheckResult ? "succeeds" : "fails",
hostHealthCheckResult ? "enabling" : "disabling",
host.getName()));
host.getName());
_resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent);
} catch (NoTransitionException e) {
logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e);
logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e);
}
}
@ -1336,7 +1300,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (attache == null) {
if (!(cmd instanceof StartupCommand)) {
logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
logger.warn("Throwing away a request because it came through as the first command on a connect: {}", request);
} else {
// submit the task for execution
request.logD("Scheduling the first command ");
@ -1353,14 +1317,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (logger.isDebugEnabled()) {
if (cmd instanceof PingRoutingCommand) {
logD = false;
logger.debug("Ping from Routing host " + hostId + "(" + hostName + ")");
logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
logger.debug("Ping from Routing host {}({})", hostId, hostName);
logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
} else if (cmd instanceof PingCommand) {
logD = false;
logger.debug("Ping from " + hostId + "(" + hostName + ")");
logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
logger.debug("Ping from {}({})", hostId, hostName);
logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request);
} else {
logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request);
}
}
@ -1385,7 +1349,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else if (cmd instanceof ShutdownCommand) {
final ShutdownCommand shutdown = (ShutdownCommand)cmd;
final String reason = shutdown.getReason();
logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail());
logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail());
if (reason.equals(ShutdownCommand.Update)) {
// disconnectWithoutInvestigation(attache, Event.UpdateNeeded);
throw new CloudRuntimeException("Agent update not implemented");
@ -1423,7 +1387,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
}
} else {
logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB");
logger.debug("Not processing {} for agent id={}; can't find the host in the DB", PingRoutingCommand.class.getSimpleName(), cmdHostId);
}
}
if (host!= null && host.getStatus() != Status.Up && gatewayAccessible) {
@ -1433,9 +1397,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else if (cmd instanceof ReadyAnswer) {
final HostVO host = _hostDao.findById(attache.getId());
if (host == null) {
if (logger.isDebugEnabled()) {
logger.debug("Cant not find host " + attache.getId());
}
logger.debug("Cant not find host {}", attache.getId());
}
answer = new Answer(cmd);
} else {
@ -1450,26 +1412,24 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
final Response response = new Response(request, answers, _nodeId, attache.getId());
if (logger.isDebugEnabled()) {
if (logD) {
logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
} else {
logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
}
if (logD) {
logger.debug("SeqA {}-: Sending {}", attache.getId(), response.getSequence(), response);
} else {
logger.trace("SeqA {}-: Sending {}" + attache.getId(), response.getSequence(), response);
}
try {
link.send(response.toBytes());
} catch (final ClosedChannelException e) {
logger.warn("Unable to send response because connection is closed: " + response);
logger.warn("Unable to send response because connection is closed: {}", response);
}
}
protected void processResponse(final Link link, final Response response) {
final AgentAttache attache = (AgentAttache)link.attachment();
if (attache == null) {
logger.warn("Unable to process: " + response);
logger.warn("Unable to process: {}", response);
} else if (!attache.processAnswers(response.getSequence(), response)) {
logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response);
logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response);
}
}
@ -1502,7 +1462,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (attache != null) {
disconnectWithInvestigation(attache, Event.AgentDisconnected);
} else {
logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done.");
logger.info("Connection from {} closed but no cleanup was done.", link.getIpAddress());
link.close();
link.terminated();
}
@ -1539,20 +1499,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) {
try {
_agentStatusLock.lock();
if (logger.isDebugEnabled()) {
final ResourceState state = host.getResourceState();
final StringBuilder msg = new StringBuilder("Transition:");
msg.append("[Resource state = ").append(state);
msg.append(", Agent event = ").append(e.toString());
msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]");
logger.debug(msg);
}
logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName());
host.setManagementServerId(msId);
try {
return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
} catch (final NoTransitionException e1) {
logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", management server id is " + msId);
logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId);
throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage());
}
} finally {
@ -1581,7 +1534,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
protected boolean isHostOwnerSwitched(final long hostId) {
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
logger.warn("Can't find the host " + hostId);
logger.warn("Can't find the host {}", hostId);
return false;
}
return isHostOwnerSwitched(host);
@ -1606,7 +1559,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else {
/* Agent is still in connecting process, don't allow to disconnect right away */
if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) {
logger.info("Host " + hostId + " is being loaded so no disconnects needed.");
logger.info("Host {} is being loaded no disconnects needed.", hostId);
return;
}
@ -1705,17 +1658,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
/*
* Host is in non-operation state, so no investigation and direct put agent to Disconnected
*/
logger.debug("Ping timeout but agent " + agentId + " is in resource state of " + resourceState + ", so no investigation");
logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState);
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
final HostVO host = _hostDao.findById(agentId);
if (host != null
&& (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) {
logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId());
logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId());
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
logger.debug("Ping timeout for agent " + agentId + ", do invstigation");
logger.debug("Ping timeout for agent {}, do investigation", agentId);
disconnectWithInvestigation(agentId, Event.PingTimeout);
}
}
@ -1754,7 +1707,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (agentsBehind.size() > 0) {
logger.info("Found the following agents behind on ping: " + agentsBehind);
logger.info("Found the following agents behind on ping: {}", agentsBehind);
}
return agentsBehind;
@ -1937,7 +1890,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
for (Long hostId : hostIds) {
Answer answer = easySend(hostId, cmds);
if (answer == null || !answer.getResult()) {
logger.error("Error sending parameters to agent " + hostId);
logger.error("Error sending parameters to agent {}", hostId);
}
}
}

View File

@ -90,7 +90,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
String peerName = synchronous.getPeer();
if (peerName != null) {
if (s_clusteredAgentMgr != null) {
logger.debug(log(seq, "Forwarding to peer to cancel due to timeout"));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Forwarding to peer to cancel due to timeout");
s_clusteredAgentMgr.cancel(peerName, _id, seq, "Timed Out");
} else {
logger.error("Unable to forward cancel, ClusteredAgentAttache is not properly initialized");
@ -105,29 +105,21 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
@Override
public void routeToAgent(final byte[] data) throws AgentUnavailableException {
if (logger.isDebugEnabled()) {
logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data)));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Routing from " + Request.getManagementServerId(data));
if (_link == null) {
if (logger.isDebugEnabled()) {
logger.debug(log(Request.getSequence(data), "Link is closed"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Link is closed");
throw new AgentUnavailableException("Link is closed", _id);
}
try {
_link.send(data);
} catch (ClosedChannelException e) {
if (logger.isDebugEnabled()) {
logger.debug(log(Request.getSequence(data), "Channel is closed"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Channel is closed");
throw new AgentUnavailableException("Channel to agent is closed", _id);
} catch (NullPointerException e) {
if (logger.isDebugEnabled()) {
logger.debug(log(Request.getSequence(data), "Link is closed"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Link is closed");
// Note: since this block is not in synchronized. It is possible for _link to become null.
throw new AgentUnavailableException("Channel to agent is null", _id);
}
@ -148,9 +140,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
if (_transferMode) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: "));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Holding request as the corresponding agent is in transfer mode: ");
synchronized (this) {
addRequestToTransfer(req);
@ -174,9 +164,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
ch = s_clusteredAgentMgr.connectToPeer(peerName, ch);
if (ch == null) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Unable to forward " + req.toString()));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Unable to forward " + req.toString());
continue;
}
@ -187,7 +175,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
try {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Forwarding " + req.toString() + " to " + peerName));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Forwarding " + req.toString() + " to " + peerName);
}
if (req.executeInSequence() && listener != null && listener instanceof SynchronousListener) {
SynchronousListener synchronous = (SynchronousListener)listener;
@ -198,7 +186,7 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout
return;
} catch (IOException e) {
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Error on connecting to management node: " + req.toString() + " try = " + i));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Error on connecting to management node: " + req.toString() + " try = " + i);
}
if (logger.isInfoEnabled()) {

View File

@ -152,7 +152,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
_sslEngines = new HashMap<String, SSLEngine>(7);
_nodeId = ManagementServerNode.getManagementServerId();
logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): {}", _nodeId);
ClusteredAgentAttache.initialize(this);
@ -170,9 +170,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return false;
}
_timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value());
if (logger.isDebugEnabled()) {
logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
}
logger.debug("Scheduled direct agent scan task to run at an interval of {} seconds", ScanInterval.value());
// Schedule tasks for agent rebalancing
if (isAgentRebalanceEnabled()) {
@ -186,9 +184,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
public void scheduleHostScanTask() {
_timer.schedule(new DirectAgentScanTimerTask(), 0);
if (logger.isDebugEnabled()) {
logger.debug("Scheduled a direct agent scan task");
}
logger.debug("Scheduled a direct agent scan task");
}
private void runDirectAgentScanTimerTask() {
@ -196,9 +192,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
private void scanDirectAgentToLoad() {
if (logger.isTraceEnabled()) {
logger.trace("Begin scanning directly connected hosts");
}
logger.trace("Begin scanning directly connected hosts");
// for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
final long cutSeconds = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
@ -208,35 +202,29 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (hosts != null) {
hosts.addAll(appliances);
if (hosts.size() > 0) {
logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them...");
logger.debug("Found {} unmanaged direct hosts, processing connect for them...", hosts.size());
for (final HostVO host : hosts) {
try {
final AgentAttache agentattache = findAttache(host.getId());
if (agentattache != null) {
// already loaded, skip
if (agentattache.forForward()) {
if (logger.isInfoEnabled()) {
logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host");
}
logger.info("{} is detected down, but we have a forward attache running, disconnect this one before launching the host", host);
removeAgent(agentattache, Status.Disconnected);
} else {
continue;
}
}
if (logger.isDebugEnabled()) {
logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")");
}
logger.debug("Loading directly connected host {}({})", host.getId(), host.getName());
loadDirectlyConnectedHost(host, false);
} catch (final Throwable e) {
logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e);
logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e);
}
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("End scanning directly connected hosts");
}
logger.trace("End scanning directly connected hosts");
}
private class DirectAgentScanTimerTask extends ManagedContextTimerTask {
@ -245,7 +233,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
runDirectAgentScanTimerTask();
} catch (final Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
logger.error("Unexpected exception {}", e.getMessage(), e);
}
}
}
@ -256,7 +244,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
protected AgentAttache createAttache(final long id) {
logger.debug("create forwarding ClusteredAgentAttache for " + id);
logger.debug("create forwarding ClusteredAgentAttache for {}", id);
final HostVO host = _hostDao.findById(id);
final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName());
AgentAttache old = null;
@ -265,9 +253,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
_agents.put(id, attache);
}
if (old != null) {
if (logger.isDebugEnabled()) {
logger.debug("Remove stale agent attache from current management server");
}
logger.debug("Remove stale agent attache from current management server");
removeAgent(old, Status.Removed);
}
return attache;
@ -275,7 +261,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) {
logger.debug("create ClusteredAgentAttache for " + host.getId());
logger.debug("create ClusteredAgentAttache for {}", host.getId());
final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
AgentAttache old = null;
@ -291,7 +277,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) {
logger.debug(String.format("Create ClusteredDirectAgentAttache for %s.", host));
logger.debug("Create ClusteredDirectAgentAttache for {}.", host);
final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates());
AgentAttache old = null;
synchronized (_agents) {
@ -335,9 +321,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
if (logger.isDebugEnabled()) {
logger.debug("Received agent disconnect event for host " + hostId);
}
logger.debug("Received agent disconnect event for host {}", hostId);
final AgentAttache attache = findAttache(hostId);
if (attache != null) {
// don't process disconnect if the host is being rebalanced
@ -345,7 +329,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId);
if (transferVO != null) {
if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) {
logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId);
logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId);
return true;
}
}
@ -354,7 +338,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// don't process disconnect if the disconnect came for the host via delayed cluster notification,
// but the host has already reconnected to the current management server
if (!attache.forForward()) {
logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId);
logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId);
return true;
}
@ -387,26 +371,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// notifies MS peers to schedule a host scan task immediately, triggered during addHost operation
public void notifyNodesInClusterToScheduleHostScanTask() {
if (logger.isDebugEnabled()) {
logger.debug("Notifying other MS nodes to run host scan task");
}
logger.debug("Notifying other MS nodes to run host scan task");
final Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()};
_clusterMgr.broadcast(0, _gson.toJson(cmds));
}
protected void logT(final byte[] bytes, final String msg) {
logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
+ (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
logger.trace("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
protected void logD(final byte[] bytes, final String msg) {
logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
+ (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
logger.debug("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
protected void logI(final byte[] bytes, final String msg) {
logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": "
+ (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
logger.info("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes),
Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
public boolean routeToPeer(final String peer, final byte[] bytes) {
@ -430,9 +412,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return false;
}
try {
if (logger.isDebugEnabled()) {
logD(bytes, "Routing to peer");
}
logD(bytes, "Routing to peer");
Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine);
return true;
} catch (final IOException e) {
@ -469,7 +449,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
ch.close();
} catch (final IOException e) {
logger.warn("Unable to close peer socket connection to " + peerName);
logger.warn("Unable to close peer socket connection to {}", peerName);
}
}
_peers.remove(peerName);
@ -485,13 +465,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
prevCh.close();
} catch (final Exception e) {
logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage());
logger.info("[ignored] failed to get close resource for previous channel Socket: {}", e.getLocalizedMessage());
}
}
if (ch == null || ch == prevCh) {
final ManagementServerHost ms = _clusterMgr.getPeer(peerName);
if (ms == null) {
logger.info("Unable to find peer: " + peerName);
logger.info("Unable to find peer: {}", peerName);
return null;
}
final String ip = ms.getServiceIP();
@ -518,14 +498,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
ch1.close();
throw new IOException(String.format("SSL: Handshake failed with peer management server '%s' on %s:%d ", peerName, ip, port));
}
logger.info(String.format("SSL: Handshake done with peer management server '%s' on %s:%d ", peerName, ip, port));
logger.info("SSL: Handshake done with peer management server '{}' on {}:{} ", peerName, ip, port);
} catch (final Exception e) {
ch1.close();
throw new IOException("SSL: Fail to init SSL! " + e);
}
if (logger.isDebugEnabled()) {
logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip);
}
logger.debug("Connection to peer opened: {}, ip: {}", peerName, ip);
_peers.put(peerName, ch1);
_sslEngines.put(peerName, sslEngine);
return ch1;
@ -534,17 +512,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
ch1.close();
} catch (final IOException ex) {
logger.error("failed to close failed peer socket: " + ex);
logger.error("failed to close failed peer socket: {}", ex);
}
}
logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e);
logger.warn("Unable to connect to peer management server: {}, ip {} due to {}", peerName, ip, e.getMessage(), e);
return null;
}
}
if (logger.isTraceEnabled()) {
logger.trace("Found open channel for peer: " + peerName);
}
logger.trace("Found open channel for peer: {}", peerName);
return ch;
}
}
@ -569,9 +545,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
AgentAttache agent = findAttache(hostId);
if (agent == null || !agent.forForward()) {
if (isHostOwnerSwitched(host)) {
if (logger.isDebugEnabled()) {
logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache");
}
logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId);
agent = createAttache(hostId);
}
}
@ -589,10 +563,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (_peers != null) {
for (final SocketChannel ch : _peers.values()) {
try {
logger.info("Closing: " + ch.toString());
logger.info("Closing: {}", ch.toString());
ch.close();
} catch (final IOException e) {
logger.info("[ignored] error on closing channel: " + ch.toString(), e);
logger.info("[ignored] error on closing channel: {}", ch.toString(), e);
}
}
}
@ -649,9 +623,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final Request req = Request.parse(data);
final Command[] cmds = req.getCommands();
final CancelCommand cancel = (CancelCommand)cmds[0];
if (logger.isDebugEnabled()) {
logD(data, "Cancel request received");
}
logD(data, "Cancel request received");
agent.cancel(cancel.getSequence());
final Long current = agent._currentSequence;
// if the request is the current request, always have to trigger sending next request in
@ -697,9 +669,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final AgentAttache attache = (AgentAttache)link.attachment();
if (attache != null) {
attache.sendNext(Request.getSequence(data));
} else if (logger.isDebugEnabled()) {
logD(data, "No attache to process " + Request.parse(data).toString());
}
logD(data, "No attache to process " + Request.parse(data).toString());
}
return;
} else {
@ -710,11 +681,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final Response response = Response.parse(data);
final AgentAttache attache = findAttache(response.getAgentId());
if (attache == null) {
logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString());
logger.info("SeqA {}-{} Unable to find attache to forward {}", response.getAgentId(), response.getSequence(), response.toString());
return;
}
if (!attache.processAnswers(response.getSequence(), response)) {
logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString());
logger.info("SeqA {}-{}: Response is not processed: {}", attache.getId(), response.getSequence(), response.toString());
}
}
return;
@ -741,12 +712,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public void onManagementNodeLeft(final List<? extends ManagementServerHost> nodeList, final long selfNodeId) {
for (final ManagementServerHost vo : nodeList) {
logger.info("Marking hosts as disconnected on Management server" + vo.getMsid());
logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid());
final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout();
_hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing);
outOfBandManagementDao.expireServerOwnership(vo.getMsid());
haConfigDao.expireServerOwnership(vo.getMsid());
logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid());
logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid());
cleanupTransferMap(vo.getMsid());
}
}
@ -773,7 +744,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
} catch (final Exception e) {
logger.warn("Unable to rebalance host id=" + agentId, e);
logger.warn("Unable to rebalance host id={}", agentId, e);
}
}
return result;
@ -806,19 +777,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
if (!cancelled) {
startRebalanceAgents();
if (logger.isInfoEnabled()) {
logger.info("The agent load balancer task is now being cancelled");
}
logger.info("The agent load balancer task is now being cancelled");
cancelled = true;
}
} catch (final Throwable e) {
logger.error("Unexpected exception " + e.toString(), e);
logger.error("Unexpected exception {}", e.toString(), e);
}
}
}
public void startRebalanceAgents() {
logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
logger.debug("Management server {} is asking other peers to rebalance their agents", _nodeId);
final List<ManagementServerHostVO> allMS = _mshostDao.listBy(ManagementServerHost.State.Up);
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getManagementServerId(), Op.NNULL);
@ -830,17 +799,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) {
avLoad = allManagedAgents.size() / allMS.size();
} else {
if (logger.isDebugEnabled()) {
logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is "
+ allManagedAgents.size());
}
logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is {};" +
"number of managed agents is {}", allMS.size(), allManagedAgents.size());
return;
}
if (avLoad == 0L) {
if (logger.isDebugEnabled()) {
logger.debug("As calculated average load is less than 1, rounding it to 1");
}
logger.debug("As calculated average load is less than 1, rounding it to 1");
avLoad = 1;
}
@ -852,20 +817,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad);
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
break;
} else {
logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
}
logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
}
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid());
for (final HostVO host : hostsToRebalance) {
final long hostId = host.getId();
logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId);
boolean result = true;
if (_hostTransferDao.findById(hostId) != null) {
logger.warn("Somebody else is already rebalancing host id: " + hostId);
logger.warn("Somebody else is already rebalancing host id: {}", hostId);
continue;
}
@ -874,19 +838,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId);
final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance);
if (answer == null) {
logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid());
logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid());
result = false;
}
} catch (final Exception ex) {
logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex);
logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex);
result = false;
} finally {
if (transfer != null) {
final HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId);
if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) {
if (logger.isDebugEnabled()) {
logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
}
logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
// just remove the mapping (if exists) as nothing was done on the peer management
// server yet
_hostTransferDao.remove(transfer.getId());
@ -895,7 +857,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
}
} else {
logger.debug("Found no hosts to rebalance from the management server " + node.getMsid());
logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid());
}
}
}
@ -909,16 +871,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final Command[] cmds = commands.toCommands();
try {
if (logger.isDebugEnabled()) {
logger.debug("Forwarding " + cmds[0].toString() + " to " + peer);
}
logger.debug("Forwarding {} to {}", cmds[0].toString(), peer);
final String peerName = Long.toString(peer);
final String cmdStr = _gson.toJson(cmds);
final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true);
final Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
return answers;
} catch (final Exception e) {
logger.warn("Caught exception while talking to " + currentOwnerId, e);
logger.warn("Caught exception while talking to {}", currentOwnerId, e);
return null;
}
}
@ -942,9 +902,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return null;
}
if (logger.isDebugEnabled()) {
logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId);
}
logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId);
final Command[] cmds = new Command[1];
cmds[0] = new ChangeAgentCommand(agentId, event);
@ -955,9 +913,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
if (logger.isDebugEnabled()) {
logger.debug("Result for agent change is " + answers[0].getResult());
}
logger.debug("Result for agent change is {}", answers[0].getResult());
return answers[0].getResult();
}
@ -967,12 +923,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (logger.isTraceEnabled()) {
logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId);
}
logger.trace("Clustered agent transfer scan check, management server id: {}", _nodeId);
synchronized (_agentToTransferIds) {
if (_agentToTransferIds.size() > 0) {
logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer");
logger.debug("Found {} agents to transfer", _agentToTransferIds.size());
// for (Long hostId : _agentToTransferIds) {
for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext();) {
final Long hostId = iterator.next();
@ -988,14 +942,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut));
if (transferMap == null) {
logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host");
logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId);
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId);
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
@ -1003,7 +957,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host");
logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms);
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
@ -1014,19 +968,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
_executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner()));
} catch (final RejectedExecutionException ex) {
logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId);
continue;
}
} else {
logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is "
+ attache.getNonRecurringListenersSize());
logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}",
hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
}
}
} else {
if (logger.isTraceEnabled()) {
logger.trace("Found no agents to be transferred by the management server " + _nodeId);
}
logger.trace("Found no agents to be transferred by the management server {}", _nodeId);
}
}
@ -1038,7 +990,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) {
logger.debug("Adding agent " + hostId + " to the list of agents to transfer");
logger.debug("Adding agent {} to the list of agents to transfer", hostId);
synchronized (_agentToTransferIds) {
return _agentToTransferIds.add(hostId);
}
@ -1060,24 +1012,22 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
} catch (final Exception ex) {
logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex);
result = false;
}
if (result) {
logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId);
logger.debug("Successfully transferred host id={} to management server {}", hostId, futureOwnerId);
finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted);
} else {
logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
logger.warn("Failed to transfer host id={} to management server {}", hostId, futureOwnerId);
finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
}
} else if (futureOwnerId == _nodeId) {
final HostVO host = _hostDao.findById(hostId);
try {
if (logger.isDebugEnabled()) {
logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
}
logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName());
final AgentAttache attache = findAttache(hostId);
if (attache != null) {
@ -1085,24 +1035,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
if (result) {
if (logger.isDebugEnabled()) {
logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
}
logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId);
result = loadDirectlyConnectedHost(host, true);
} else {
logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName());
}
} catch (final Exception ex) {
logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:",
ex);
logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex);
result = false;
}
if (result) {
logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
} else {
logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId);
}
}
@ -1112,13 +1059,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) {
final boolean success = event == Event.RebalanceCompleted ? true : false;
if (logger.isDebugEnabled()) {
logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
}
logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event);
final AgentAttache attache = findAttache(hostId);
if (attache == null || !(attache instanceof ClusteredAgentAttache)) {
logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already");
logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId);
_hostTransferDao.completeAgentTransfer(hostId);
return;
}
@ -1133,7 +1078,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// 2) Get all transfer requests and route them to peer
Request requestToTransfer = forwardAttache.getRequestToTransfer();
while (requestToTransfer != null) {
logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId);
logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId);
final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes());
if (!routeResult) {
logD(requestToTransfer.getBytes(), "Failed to route request to peer");
@ -1142,23 +1087,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
requestToTransfer = forwardAttache.getRequestToTransfer();
}
logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId);
logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId);
} else {
failRebalance(hostId);
}
logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance");
logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId);
_hostTransferDao.completeAgentTransfer(hostId);
}
protected void failRebalance(final long hostId) {
try {
logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId);
_hostTransferDao.completeAgentTransfer(hostId);
handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true);
} catch (final Exception ex) {
logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId);
}
}
@ -1176,18 +1121,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true);
final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
if (forwardAttache == null) {
logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId);
return false;
}
logger.debug("Putting agent id=" + hostId + " to transfer mode");
logger.debug("Putting agent id={} to transfer mode", hostId);
forwardAttache.setTransferMode(true);
_agents.put(hostId, forwardAttache);
} else {
if (attache == null) {
logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing");
logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId);
} else {
logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize()
+ ", can't start host rebalancing");
logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing",
hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize());
}
return false;
}
@ -1223,29 +1168,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing host id=" + hostId);
}
logger.debug("Rebalancing host id={}", hostId);
rebalanceHost(hostId, currentOwnerId, futureOwnerId);
} catch (final Exception e) {
logger.warn("Unable to rebalance host id=" + hostId, e);
logger.warn("Unable to rebalance host id={}", hostId, e);
}
}
}
private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskCommand cmd) {
if (logger.isDebugEnabled()) {
logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd));
}
logger.debug("Intercepting resource manager command: {}", _gson.toJson(cmd));
try {
scheduleHostScanTask();
} catch (final Exception e) {
// Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan
// happens at fixed intervals anyways. So handling any exceptions that may be thrown
logger.warn(
"Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + ", ignoring as regular host scan happens at fixed interval anyways",
e);
logger.warn("Exception happened while trying to schedule host scan task on mgmt server {}, ignoring as regular host scan happens at fixed " +
"interval anyways", _clusterMgr.getSelfPeerName(), e);
return null;
}
@ -1271,9 +1211,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public String dispatch(final ClusterServicePdu pdu) {
if (logger.isDebugEnabled()) {
logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
}
logger.debug("Dispatch ->{}, json: {}", pdu.getAgentId(), pdu.getJsonPackage());
Command[] cmds = null;
try {
@ -1286,15 +1224,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted
final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0];
if (logger.isDebugEnabled()) {
logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
}
logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent());
boolean result = false;
try {
result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent());
if (logger.isDebugEnabled()) {
logger.debug("Result is " + result);
}
logger.debug("Result is {}", result);
} catch (final AgentUnavailableException e) {
logger.warn("Agent is unavailable", e);
@ -1307,15 +1241,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) {
final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0];
if (logger.isDebugEnabled()) {
logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
}
logger.debug("Intercepting command for agent rebalancing: agent {} event: {}", cmd.getAgentId(), cmd.getEvent());
boolean result = false;
try {
result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner());
if (logger.isDebugEnabled()) {
logger.debug("Result is " + result);
}
logger.debug("Result is {}", result);
} catch (final AgentUnavailableException e) {
logger.warn("Agent is unavailable", e);
@ -1330,12 +1260,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) {
final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0];
logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId());
boolean result = false;
try {
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent());
logger.debug("Result is " + result);
logger.debug("Result is {}", result);
} catch (final AgentUnavailableException ex) {
logger.warn("Agent is unavailable", ex);
return null;
@ -1354,25 +1284,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
final long startTick = System.currentTimeMillis();
if (logger.isDebugEnabled()) {
logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
}
logger.debug("Dispatch -> {}, json: {}", pdu.getAgentId(), pdu.getJsonPackage());
final Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError());
if (answers != null) {
final String jsonReturn = _gson.toJson(answers);
if (logger.isDebugEnabled()) {
logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: "
+ jsonReturn);
}
logger.debug("Completed dispatching -> {}, json: {} in {} ms, return result: {}", pdu.getAgentId(),
pdu.getJsonPackage(), (System.currentTimeMillis() - startTick), jsonReturn);
return jsonReturn;
} else {
if (logger.isDebugEnabled()) {
logger.debug(
"Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result");
}
logger.debug("Completed dispatching -> {}, json: {} in {} ms, return null result", pdu.getAgentId(),
pdu.getJsonPackage(), (System.currentTimeMillis() - startTick));
}
} catch (final AgentUnavailableException e) {
logger.warn("Agent is unavailable", e);
@ -1432,9 +1356,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (logger.isTraceEnabled()) {
logger.trace("Agent rebalance task check, management server id:" + _nodeId);
}
logger.trace("Agent rebalance task check, management server id:{}", _nodeId);
// initiate agent lb task will be scheduled and executed only once, and only when number of agents
// loaded exceeds _connectedAgentsThreshold
if (!_agentLbHappened) {
@ -1451,11 +1373,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (allHostsCount > 0.0) {
final double load = managedHostsCount / allHostsCount;
if (load > ConnectedAgentThreshold.value()) {
logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value());
logger.debug("Scheduling agent rebalancing task as the average agent load {} is more than the threshold {}", load, ConnectedAgentThreshold.value());
scheduleRebalanceAgents();
_agentLbHappened = true;
} else {
logger.debug("Not scheduling agent rebalancing task as the average load " + load + " has not crossed the threshold " + ConnectedAgentThreshold.value());
logger.debug("Not scheduling agent rebalancing task as the average load {} has not crossed the threshold", load, ConnectedAgentThreshold.value());
}
}
}
@ -1469,14 +1391,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public void rescan() {
// schedule a scan task immediately
if (logger.isDebugEnabled()) {
logger.debug("Scheduling a host scan task");
}
logger.debug("Scheduling a host scan task");
// schedule host scan task on current MS
scheduleHostScanTask();
if (logger.isDebugEnabled()) {
logger.debug("Notifying all peer MS to schedule host scan task");
}
logger.debug("Notifying all peer MS to schedule host scan task");
}
@Override

View File

@ -98,7 +98,7 @@ public class ConnectedAgentAttache extends AgentAttache {
assert _link == null : "Duh...Says you....Forgot to call disconnect()!";
synchronized (this) {
if (_link != null) {
logger.warn("Lost attache " + _id + "(" + _name + ")");
logger.warn("Lost attache {} ({})", _id, _name);
disconnect(Status.Alert);
}
}

View File

@ -60,9 +60,7 @@ public class DirectAgentAttache extends AgentAttache {
@Override
public void disconnect(Status state) {
if (logger.isDebugEnabled()) {
logger.debug("Processing disconnect " + _id + "(" + _name + ")");
}
logger.debug("Processing disconnect {}({})", _id, _name);
for (ScheduledFuture<?> future : _futures) {
future.cancel(false);
@ -117,7 +115,7 @@ public class DirectAgentAttache extends AgentAttache {
if (answers != null && answers[0] instanceof StartupAnswer) {
StartupAnswer startup = (StartupAnswer)answers[0];
int interval = startup.getPingInterval();
logger.info("StartupAnswer received " + startup.getHostId() + " Interval = " + interval);
logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval);
_futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS));
}
}
@ -128,7 +126,7 @@ public class DirectAgentAttache extends AgentAttache {
assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?";
synchronized (this) {
if (_resource != null) {
logger.warn("Lost attache for " + _id + "(" + _name + ")");
logger.warn("Lost attache for {}({})", _id, _name);
disconnect(Status.Alert);
}
}
@ -142,9 +140,7 @@ public class DirectAgentAttache extends AgentAttache {
}
private synchronized void scheduleFromQueue() {
if (logger.isTraceEnabled()) {
logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get());
}
logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get());
while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) {
_outstandingTaskCount.incrementAndGet();
_agentMgr.getDirectAgentPool().execute(tasks.remove());
@ -156,7 +152,7 @@ public class DirectAgentAttache extends AgentAttache {
protected synchronized void runInContext() {
try {
if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
logger.warn("PingTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out");
logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
return;
}
@ -171,25 +167,21 @@ public class DirectAgentAttache extends AgentAttache {
}
if (cmd == null) {
logger.warn("Unable to get current status on " + _id + "(" + _name + ")");
logger.warn("Unable to get current status on {}({})", _id, _name);
return;
}
if (cmd.getContextParam("logid") != null) {
ThreadContext.put("logcontextid", cmd.getContextParam("logid"));
}
if (logger.isDebugEnabled()) {
logger.debug("Ping from " + _id + "(" + _name + ")");
}
logger.debug("Ping from {}({})", _id, _name);
long seq = _seq++;
if (logger.isTraceEnabled()) {
logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString());
}
logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString());
_agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd});
} else {
logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")");
logger.debug("Unable to send ping because agent is disconnected {}", _id, _name);
}
} catch (Exception e) {
logger.warn("Unable to complete the ping task", e);
@ -218,7 +210,7 @@ public class DirectAgentAttache extends AgentAttache {
Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
processAnswers(seq, resp);
} catch (Exception e) {
logger.warn(log(seq, "Exception caught in bailout "), e);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught in bailout ", e);
}
}
@ -227,7 +219,7 @@ public class DirectAgentAttache extends AgentAttache {
long seq = _req.getSequence();
try {
if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) {
logger.warn("CronTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out");
logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap());
bailout();
return;
}
@ -236,9 +228,7 @@ public class DirectAgentAttache extends AgentAttache {
Command[] cmds = _req.getCommands();
boolean stopOnError = _req.stopOnError();
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Executing request"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request");
ArrayList<Answer> answers = new ArrayList<Answer>(cmds.length);
for (int i = 0; i < cmds.length; i++) {
Answer answer = null;
@ -257,26 +247,24 @@ public class DirectAgentAttache extends AgentAttache {
answer = new Answer(cmds[i], false, "Agent is disconnected");
}
} catch (Exception e) {
logger.warn(log(seq, "Exception Caught while executing command"), e);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception Caught while executing command", e);
answer = new Answer(cmds[i], false, e.toString());
}
answers.add(answer);
if (!answer.getResult() && stopOnError) {
if (i < cmds.length - 1 && logger.isDebugEnabled()) {
logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error."));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error.");
}
break;
}
}
Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Response Received: "));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: ");
processAnswers(seq, resp);
} catch (Exception e) {
logger.warn(log(seq, "Exception caught "), e);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught ", e);
} finally {
_outstandingCronTaskCount.decrementAndGet();
}
@ -298,9 +286,7 @@ public class DirectAgentAttache extends AgentAttache {
Command[] cmds = _req.getCommands();
boolean stopOnError = _req.stopOnError();
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Executing request"));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request");
ArrayList<Answer> answers = new ArrayList<Answer>(cmds.length);
for (int i = 0; i < cmds.length; i++) {
Answer answer = null;
@ -320,27 +306,25 @@ public class DirectAgentAttache extends AgentAttache {
}
} catch (Throwable t) {
// Catch Throwable as all exceptions will otherwise be eaten by the executor framework
logger.warn(log(seq, "Throwable caught while executing command"), t);
logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught while executing command", t);
answer = new Answer(cmds[i], false, t.toString());
}
answers.add(answer);
if (!answer.getResult() && stopOnError) {
if (i < cmds.length - 1 && logger.isDebugEnabled()) {
logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error."));
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error.");
}
break;
}
}
Response resp = new Response(_req, answers.toArray(new Answer[answers.size()]));
if (logger.isDebugEnabled()) {
logger.debug(log(seq, "Response Received: "));
}
logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: ");
processAnswers(seq, resp);
} catch (Throwable t) {
// This is pretty serious as processAnswers might not be called and the calling process is stuck waiting for the full timeout
logger.error(log(seq, "Throwable caught in runInContext, this will cause the management to become unpredictable"), t);
logger.error(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught in runInContext, this will cause the management to become unpredictable", t);
} finally {
_outstandingTaskCount.decrementAndGet();
scheduleFromQueue();

View File

@ -71,8 +71,7 @@ public class SynchronousListener implements Listener {
@Override
public synchronized boolean processDisconnect(long agentId, Status state) {
if (logger.isTraceEnabled())
logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters");
logger.trace("Agent disconnected, agent id: {}, state: {}. Will notify waiters", agentId, state);
_disconnected = true;
notifyAll();
@ -128,10 +127,8 @@ public class SynchronousListener implements Listener {
}
profiler.stop();
if (logger.isTraceEnabled()) {
logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " +
(_answers != null ? _answers[0].toString() : "null"));
}
logger.trace("Synchronized command - sending completed, time: {}, answer: {}", profiler.getDurationInMillis(),
(_answers != null ? _answers[0].toString() : "null"));
return _answers;
}

View File

@ -47,13 +47,13 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
@Override
public void resetHostSyncState(long hostId) {
logger.info("Reset VM power state sync for host: " + hostId);
logger.info("Reset VM power state sync for host: {}.", hostId);
_instanceDao.resetHostPowerStateTracking(hostId);
}
@Override
public void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report) {
logger.debug("Process host VM state report. host: " + hostId);
logger.debug("Process host VM state report. host: {}.", hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo, false);
@ -61,8 +61,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
@Override
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force) {
if (logger.isDebugEnabled())
logger.debug("Process host VM state report from ping process. host: " + hostId);
logger.debug("Process host VM state report from ping process. host: {}.", hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo, force);
@ -70,25 +69,18 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo, boolean force) {
if (logger.isDebugEnabled()) {
logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size());
}
logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size());
for (Map.Entry<Long, VirtualMachine.PowerState> entry : translatedInfo.entrySet()) {
if (logger.isDebugEnabled())
logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue());
logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) {
if (logger.isInfoEnabled()) {
logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue());
}
logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue());
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey());
} else {
if (logger.isTraceEnabled()) {
logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey());
}
logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey());
}
}
@ -107,9 +99,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// here we need to be wary of out of band migration as opposed to other, more unexpected state changes
if (vmsThatAreMissingReport.size() > 0) {
Date currentTime = DateUtil.currentGMTTime();
if (logger.isDebugEnabled()) {
logger.debug("Run missing VM report. current time: " + currentTime.getTime());
}
logger.debug("Run missing VM report. current time: {}", currentTime.getTime());
// 2 times of sync-update interval for graceful period
long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L;
@ -119,7 +109,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// Make sure powerState is up to date for missing VMs
try {
if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) {
logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId());
logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId());
_instanceDao.resetVmPowerStateTracking(instance.getId());
continue;
}
@ -130,52 +120,44 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
Date vmStateUpdateTime = instance.getPowerStateUpdateTime();
if (vmStateUpdateTime == null) {
logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId());
logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId());
vmStateUpdateTime = instance.getUpdateTime();
if (vmStateUpdateTime == null) {
logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId());
logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId());
vmStateUpdateTime = instance.getCreated();
}
}
if (logger.isInfoEnabled()) {
String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime);
logger.debug(
String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s"
, hostId
, instance.getId()
, instance.getUuid()
, VirtualMachine.PowerState.PowerReportMissing
, lastTime));
}
String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime);
logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}"
, hostId
, instance.getId()
, instance.getUuid()
, VirtualMachine.PowerState.PowerReportMissing
, lastTime);
long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime();
if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period");
logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate);
// this is were a race condition might have happened if we don't re-fetch the instance;
// between the startime of this job and the currentTime of this missing-branch
// an update might have occurred that we should not override in case of out of band migration
if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) {
if (logger.isDebugEnabled()) {
logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing ");
}
logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId());
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId());
} else {
if (logger.isDebugEnabled()) {
logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId());
}
logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId());
}
} else {
logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet");
logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate);
}
}
}
if (logger.isDebugEnabled())
logger.debug("Done with process of VM state report. host: " + hostId);
logger.debug("Done with process of VM state report. host: {}", hostId);
}
@Override
@ -190,7 +172,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
if (vm != null) {
map.put(vm.getId(), entry.getValue().getState());
} else {
logger.debug("Unable to find matched VM in CloudStack DB. name: " + entry.getKey());
logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey());
}
}

View File

@ -63,24 +63,23 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
try {
workClz = Class.forName(job.getCmd());
} catch (ClassNotFoundException e) {
logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e);
logger.error("VM work class {} is not found, job origin: {}", cmd, job.getRelated(), e);
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, e.getMessage());
return;
}
work = VmWorkSerializer.deserialize(workClz, job.getCmdInfo());
if(work == null) {
logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
logger.error("Unable to deserialize VM work {}, job info: {}, job origin: {}", job.getCmd(), job.getCmdInfo(), job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work");
return;
}
if (logger.isDebugEnabled())
logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
logger.debug("Run VM work job: {} for VM {}, job origin: {}", cmd, work.getVmId(), job.getRelated());
try {
if (_handlers == null || _handlers.isEmpty()) {
logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()
+ ", job origin: " + job.getRelated());
logger.error("Invalid startup configuration, no work job handler is found. cmd: {}, job info: {}, job origin: {}", job.getCmd(), job.getCmdInfo(),
job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found");
return;
}
@ -88,8 +87,8 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
VmWorkJobHandler handler = _handlers.get(work.getHandlerName());
if (handler == null) {
logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd()
+ ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
logger.error("Unable to find work job handler. handler name: {}, job cmd: {}, job info: {}, job origin: {}", work.getHandlerName(), job.getCmd(),
job.getCmdInfo(), job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler");
return;
}
@ -103,14 +102,13 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
CallContext.unregister();
}
} finally {
if (logger.isDebugEnabled())
logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
logger.debug("Done with run of VM work job: {} for VM {}, job origin: {}", cmd, work.getVmId(), job.getRelated());
}
} catch(InvalidParameterValueException e) {
logger.error("Unable to complete " + job + ", job origin:" + job.getRelated());
logger.error("Unable to complete {}, job origin: {}", job, job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));
} catch(Throwable e) {
logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e);
logger.error("Unable to complete {}, job origin: {}", job, job.getRelated(), e);
//RuntimeException ex = new RuntimeException("Job failed due to exception " + e.getMessage());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));

View File

@ -214,9 +214,7 @@ public class VMEntityManagerImpl implements VMEntityManager {
if (reservationId != null) {
return reservationId;
} else {
if (logger.isDebugEnabled()) {
logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
}
logger.debug("Cannot finalize the VM reservation for this destination found, retrying");
exclude.addHost(dest.getHost().getId());
continue;
}

View File

@ -97,15 +97,15 @@ public class DataMigrationUtility {
boolean isReady = true;
for (TemplateDataStoreVO template : templates) {
isReady &= (Arrays.asList(validStates).contains(template.getState()));
logger.trace(String.format("template state: %s", template.getState()));
logger.trace("template state: {}", template.getState());
}
for (SnapshotDataStoreVO snapshot : snapshots) {
isReady &= (Arrays.asList(validStates).contains(snapshot.getState()));
logger.trace(String.format("snapshot state: %s", snapshot.getState()));
logger.trace("snapshot state: {}", snapshot.getState());
}
for (VolumeDataStoreVO volume : volumes) {
isReady &= (Arrays.asList(validStates).contains(volume.getState()));
logger.trace(String.format("volume state: %s", volume.getState()));
logger.trace("volume state: {}", volume.getState());
}
return isReady;
}

View File

@ -159,7 +159,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
storageCapacities.put(srcDataStoreId, new Pair<>(null, null));
if (migrationPolicy == MigrationPolicy.COMPLETE) {
logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId()));
logger.debug("Setting source image store: {} to read-only", srcDatastore.getId());
storageService.updateImageStoreStatus(srcDataStoreId, true);
}
@ -200,7 +200,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) {
logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId));
logger.debug("{}: {} too large to be migrated to {}", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId);
skipped += 1;
continue;
}
@ -267,7 +267,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destImgStoreId).first()) {
logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId));
logger.debug("{}: {} too large to be migrated to {}", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId);
continue;
}
@ -302,7 +302,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
boolean success = true;
if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) {
if (migrationPolicy == MigrationPolicy.BALANCE) {
logger.debug("Migration completed : data stores have been balanced ");
logger.debug("Migration completed : data stores have been balanced");
if (destDatastoreId == srcDatastore.getId()) {
message = "Seems like source datastore has more free capacity than the destination(s)";
}
@ -353,7 +353,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
task.setTemplateChain(templateChains);
}
futures.add((executor.submit(task)));
logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()));
return storageCapacities;
}
@ -510,10 +510,10 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
Pair<Long, Long> imageStoreCapacity = storageCapacities.get(destStoreId);
long usedCapacity = imageStoreCapacity.second() - imageStoreCapacity.first();
if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= CapacityManager.SecondaryStorageCapacityThreshold.value()) {
logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file");
logger.debug("image store: {} has sufficient capacity to proceed with migration of file.", destStoreId);
return true;
}
logger.debug("Image store capacity threshold exceeded, migration not possible");
logger.debug("Image store capacity threshold exceeded, migration not possible.");
return false;
}

View File

@ -349,13 +349,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (storagePool.isPresent()) {
String storagePoolToString = getReflectOnlySelectedFields(storagePool.get());
logger.debug(String.format("The storage pool [%s] was specified for this account [%s] and will be used for allocation.", storagePoolToString, vm.getAccountId()));
logger.debug("The storage pool [{}] was specified for this account [{}] and will be used for allocation.", storagePoolToString, vm.getAccountId());
} else {
String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value();
storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList);
storagePool.ifPresent(pool -> logger.debug(String.format("The storage pool [%s] was specified in the Global Settings and will be used for allocation.",
getReflectOnlySelectedFields(pool))));
storagePool.ifPresent(pool -> logger.debug("The storage pool [{}] was specified in the Global Settings and will be used for allocation.",
getReflectOnlySelectedFields(pool)));
}
return storagePool;
}
@ -378,23 +378,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
StorageUtil.traceLogStoragePools(poolList, logger, "pools to choose from: ");
// Check if the preferred storage pool can be used. If yes, use it.
Optional<StoragePool> storagePool = getPreferredStoragePool(poolList, vm);
if (logger.isTraceEnabled()) {
logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent()));
}
logger.trace("we have a preferred pool: {}", storagePool.isPresent());
StoragePool storage;
if (storagePool.isPresent()) {
storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary);
logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will"
+ " use it.", vm, storage, allocator.getClass().getSimpleName()));
logger.debug("VM [{}] has a preferred storage pool [{}]. Volume Orchestrator found this storage using Storage Pool Allocator [{}] and will"
+ " use it.", vm, storage, allocator.getClass().getSimpleName());
} else {
storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
+ " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator.getClass().getSimpleName()));
logger.debug("VM [{}] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool"
+ " [{}], which was discovered using Storage Pool Allocator [{}].", vm, storage, allocator.getClass().getSimpleName());
}
return storage;
}
logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator.getClass().getSimpleName()));
logger.debug("Could not find any available Storage Pool using Storage Pool Allocator [{}].", allocator.getClass().getSimpleName());
}
logger.info("Volume Orchestrator could not find any available Storage Pool.");
return null;
@ -543,9 +541,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (pool != null) {
String poolToString = getReflectOnlySelectedFields(pool);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Found a suitable pool [%s] to create the volume [%s] in.", poolToString, volumeToString));
}
logger.debug("Found a suitable pool [{}] to create the volume [{}] in.", poolToString, volumeToString);
break;
}
}
@ -706,10 +702,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
String poolToString = getReflectOnlySelectedFields(pool);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Trying to create volume [%s] on storage pool [%s].",
volumeToString, poolToString));
}
logger.debug("Trying to create volume [{}] on storage pool [{}].",
volumeToString, poolToString);
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for Vmware case
@ -725,7 +719,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeApiResult result = future.get();
if (result.isFailed()) {
if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) {
logger.debug(String.format("Retrying to deploy template [%s] for VMware, attempt 2/2. ", templateToString));
logger.debug("Retrying to deploy template [{}] for VMware, attempt 2/2. ", templateToString);
continue;
} else {
String msg = String.format("Failed to create volume [%s] due to [%s].", volumeToString, result.getResult());
@ -891,11 +885,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} else {
rootDisksize = rootDisksize * 1024 * 1024 * 1024;
if (rootDisksize > size) {
logger.debug(String.format("Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), name));
logger.debug("Using root disk size of [{}] bytes for the volume [{}].", toHumanReadableSize(rootDisksize), name);
size = rootDisksize;
} else {
logger.debug(String.format("The specified root disk size of [%s] bytes is smaller than the template. Using root disk size of [%s] bytes for the volume [%s].",
toHumanReadableSize(rootDisksize), size, name));
logger.debug("The specified root disk size of [{}] bytes is smaller than the template. Using root disk size of [{}] bytes for the volume [{}].",
toHumanReadableSize(rootDisksize), size, name);
}
}
}
@ -985,8 +979,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (template.isDeployAsIs() && vm.getType() != VirtualMachine.Type.SecondaryStorageVm) {
List<SecondaryStorageVmVO> runningSSVMs = secondaryStorageVmDao.getSecStorageVmListInStates(null, vm.getDataCenterId(), State.Running);
if (CollectionUtils.isEmpty(runningSSVMs)) {
logger.info(String.format("Could not find a running SSVM in datacenter [%s] for deploying VM as is. Not deploying VM [%s] as is.",
vm.getDataCenterId(), vm));
logger.info("Could not find a running SSVM in datacenter [{}] for deploying VM as is. Not deploying VM [{}] as is.",
vm.getDataCenterId(), vm);
} else {
UserVmDetailVO configurationDetail = userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_AS_IS_CONFIGURATION);
if (configurationDetail != null) {
@ -1020,7 +1014,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
volumeSize = templateAsIsDisks.get(number).getVirtualSize();
deviceId = templateAsIsDisks.get(number).getDiskNumber();
}
logger.info(String.format("Adding disk object [%s] to VM [%s]", volumeName, vm));
logger.info("Adding disk object [{}] to VM [{}]", volumeName, vm);
DiskProfile diskProfile = allocateTemplatedVolume(type, volumeName, offering, volumeSize, minIops, maxIops,
template, vm, owner, deviceId, configurationId);
profiles.add(diskProfile);
@ -1117,18 +1111,14 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId());
DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId());
if (logger.isTraceEnabled()) {
logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()));
}
logger.trace("storage-pool {}/{} is associated with pod {}",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId());
Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn();
Pod pod = _entityMgr.findById(Pod.class, podId);
ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId());
DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volumeInfo.getDiskOfferingId());
Long clusterId = storagePool.getClusterId();
if (logger.isTraceEnabled()) {
logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId));
}
logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId);
Long hostId = vm.getHostId();
if (hostId == null && storagePool.isLocal()) {
List<StoragePoolHostVO> poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
@ -1172,9 +1162,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
Long volTemplateId = existingVolume.getTemplateId();
long vmTemplateId = vm.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("switchVolume: Old volume's templateId [%s] does not match the VM's templateId [%s]. Updating templateId in the new volume.", volTemplateId, vmTemplateId));
}
logger.debug("switchVolume: Old volume's templateId [{}] does not match the VM's templateId [{}]. Updating templateId in the new volume.", volTemplateId, vmTemplateId);
templateIdToUse = vmTemplateId;
}
@ -1186,16 +1174,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
stateTransitTo(existingVolume, Volume.Event.DestroyRequested);
} catch (NoTransitionException e) {
logger.error(String.format("Unable to destroy existing volume [%s] due to [%s].", volumeToString, e.getMessage()));
logger.error("Unable to destroy existing volume [{}] due to [{}].", volumeToString, e.getMessage());
}
// In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk
if (vm.getHypervisorType() == HypervisorType.VMware) {
logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString));
logger.info("Trying to expunge volume [{}] from primary data storage.", volumeToString);
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId()));
try {
future.get();
} catch (Exception e) {
logger.error(String.format("Failed to expunge volume [%s] from primary data storage due to [%s].", volumeToString, e.getMessage()));
logger.error("Failed to expunge volume [{}] from primary data storage due to [{}].", volumeToString, e.getMessage());
logger.debug("Exception: ", e);
}
}
@ -1222,9 +1210,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
HostVO host = _hostDao.findById(hostId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Releasing [%s] volumes for VM [%s] from host [%s].", volumesForVm.size(), _userVmDao.findById(vmId), host));
}
logger.debug("Releasing [{}] volumes for VM [{}] from host [{}].", volumesForVm.size(), _userVmDao.findById(vmId), host);
for (VolumeVO volumeForVm : volumesForVm) {
VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId());
@ -1247,9 +1233,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
public void cleanupVolumes(long vmId) throws ConcurrentOperationException {
VMInstanceVO vm = _userVmDao.findById(vmId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Cleaning storage for VM [%s].", vm));
}
logger.debug("Cleaning storage for VM [{}].", vm);
final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
final List<VolumeVO> toBeExpunged = new ArrayList<VolumeVO>();
@ -1265,13 +1249,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (!volumeAlreadyDestroyed) {
destroyVolumeInContext(vol);
} else {
logger.debug(String.format("Skipping destroy for the volume [%s] as it is in [%s] state.", volumeToString, vol.getState().toString()));
logger.debug("Skipping destroy for the volume [{}] as it is in [{}] state.", volumeToString, vol.getState().toString());
}
toBeExpunged.add(vol);
} else {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Detaching volume [%s].", volumeToString));
}
logger.debug("Detaching volume [{}].", volumeToString);
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
_volumeApiService.detachVolumeViaDestroyVM(vmId, vol.getId());
}
@ -1289,7 +1271,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
logger.error(String.format("Failed to expunge volume [%s] due to [%s].", expungeToString, e.getMessage()));
logger.error("Failed to expunge volume [{}] due to [{}].", expungeToString, e.getMessage());
logger.debug("Exception: ", e);
}
}
@ -1407,12 +1389,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult()));
logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult());
return null;
}
return result.getVolume();
} catch (InterruptedException | ExecutionException e) {
logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, e.getMessage()));
logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage());
logger.debug("Exception: ", e);
return null;
}
@ -1453,7 +1435,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
throw new CloudRuntimeException(msg);
}
} catch (InterruptedException | ExecutionException e) {
logger.error(String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, e.getMessage()));
logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage());
logger.debug("Exception: ", e);
}
}
@ -1475,7 +1457,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (volume.getPoolId() == pool.getId()) {
logger.debug(String.format("Volume [%s] already is on the elected storage pool [%s].", volumeToString, poolToString));
logger.debug("Volume [{}] already is on the elected storage pool [{}].", volumeToString, poolToString);
continue;
}
volumeStoragePoolMap.put(volume, volumeToPool.get(volume));
@ -1485,9 +1467,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
logger.debug("No volume needs to be migrated.");
return true;
}
if (logger.isDebugEnabled()) {
logger.debug("Offline VM migration was not done up the stack in VirtualMachineManager. Trying to migrate the VM here.");
}
logger.debug("Offline VM migration was not done up the stack in VirtualMachineManager. Trying to migrate the VM here.");
for (Map.Entry<Volume, StoragePool> entry : volumeStoragePoolMap.entrySet()) {
Volume result = migrateVolume(entry.getKey(), entry.getValue());
if (result == null) {
@ -1500,9 +1480,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override
public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
if (logger.isDebugEnabled()) {
logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine()));
}
logger.debug("Preparing to migrate [{}] volumes for VM [{}].", vols.size(), vm.getVirtualMachine());
for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
@ -1624,17 +1602,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
tasks.add(task);
} else {
if (vol.isRecreatable()) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Volume [%s] will be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString));
}
logger.debug("Volume [{}] will be recreated on storage pool [{}], assigned by deploymentPlanner.", volToString, assignedPoolToString);
VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
tasks.add(task);
} else {
if (assignedPool.getId() != vol.getPoolId()) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Mismatch with the storage pool [%s] assigned by deploymentPlanner and the one associated with the volume [%s].",
assignedPoolToString, volToString));
}
logger.debug("Mismatch with the storage pool [{}] assigned by deploymentPlanner and the one associated with the volume [{}].",
assignedPoolToString, volToString);
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
if (diskOffering.isUseLocalStorage()) {
// Currently migration of local volume is not supported so bail out
@ -1653,10 +1627,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
storageMigrationEnabled = StorageMigrationEnabled.value();
}
if (storageMigrationEnabled) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Shared volume [%s] will be migrated to the storage pool [%s], assigned by deploymentPlanner.",
volToString, assignedPoolToString));
}
logger.debug("Shared volume [{}] will be migrated to the storage pool [{}], assigned by deploymentPlanner.",
volToString, assignedPoolToString);
VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool);
tasks.add(task);
} else {
@ -1679,10 +1651,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
if (logger.isDebugEnabled()) {
logger.debug(String.format("No need to recreate the volume [%s] since it already has an assigned pool: [%s]. Adding disk to the VM.",
volToString, pool.getUuid()));
}
logger.debug("No need to recreate the volume [{}] since it already has an assigned pool: [{}]. Adding disk to the VM.",
volToString, pool.getUuid());
VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
tasks.add(task);
}
@ -1695,7 +1665,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (Objects.equals(originalEntry.getSize(), updateEntry.getSize())) {
return;
}
logger.debug(String.format("Size mismatch found for %s after creation, old size: %d, new size: %d. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize()));
logger.debug("Size mismatch found for {} after creation, old size: {}, new size: {}. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize());
if (ObjectUtils.anyNull(originalEntry.getSize(), updateEntry.getSize())) {
_resourceLimitMgr.recalculateResourceCount(updateEntry.getAccountId(), updateEntry.getDomainId(),
ResourceType.primary_storage.getOrdinal());
@ -1717,7 +1687,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
String destPoolToString = getReflectOnlySelectedFields(destPool);
logger.debug(String.format("Existing pool: [%s].", destPoolToString));
logger.debug("Existing pool: [{}].", destPoolToString);
} else {
StoragePool pool = dest.getStorageForDisks().get(vol);
destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
@ -1739,9 +1709,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
dest.getStorageForDisks().put(newVol, poolWithOldVol);
dest.getStorageForDisks().remove(vol);
}
if (logger.isDebugEnabled()) {
logger.debug(String.format("Created new volume [%s] from old volume [%s].", newVolToString, volToString));
}
logger.debug("Created new volume [{}] from old volume [{}].", newVolToString, volToString);
}
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
Long templateId = newVol.getTemplateId();
@ -1762,7 +1730,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} else {
final VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId);
if (template == null) {
logger.error(String.format("Failed to find template: %d for %s", templateId, volume));
logger.error("Failed to find template: {} for {}", templateId, volume);
throw new CloudRuntimeException(String.format("Failed to find template for volume ID: %s", volume.getUuid()));
}
TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
@ -1774,8 +1742,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (!primaryDataStore.isManaged()) {
templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId());
} else {
logger.debug(String.format("Directly downloading template [%s] on host [%s] and copying it to the managed storage pool [%s].",
templateId, dest.getHost().getUuid(), destPool.getUuid()));
logger.debug("Directly downloading template [{}] on host [{}] and copying it to the managed storage pool [{}].",
templateId, dest.getHost().getUuid(), destPool.getUuid());
templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId());
}
@ -2029,12 +1997,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
String volumeToString = getReflectOnlySelectedFields(volume);
if (volume.getState().equals(Volume.State.Creating)) {
logger.debug(String.format("Removing volume [%s], as it was leftover from the last management server stop.", volumeToString));
logger.debug("Removing volume [{}], as it was leftover from the last management server stop.", volumeToString);
_volsDao.remove(volume.getId());
}
if (volume.getState().equals(Volume.State.Attaching)) {
logger.warn(String.format("Volume [%s] failed to attach to the VM [%s] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId)));
logger.warn("Volume [{}] failed to attach to the VM [{}] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId));
volume.setState(Volume.State.Ready);
_volsDao.update(volumeId, volume);
}
@ -2057,11 +2025,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (duplicateVol != null) {
String duplicateVolToString = getReflectOnlySelectedFields(duplicateVol);
logger.debug(String.format("Removing volume [%s] from storage pool [%s] because it's duplicated.", duplicateVolToString, destPoolToString));
logger.debug("Removing volume [{}] from storage pool [{}] because it's duplicated.", duplicateVolToString, destPoolToString);
_volsDao.remove(duplicateVol.getId());
}
logger.debug(String.format("Changing volume [%s] state from Migrating to Ready in case of migration failure.", volumeToString));
logger.debug("Changing volume [{}] state from Migrating to Ready in case of migration failure.", volumeToString);
volume.setState(Volume.State.Ready);
_volsDao.update(volumeId, volume);
}
@ -2075,7 +2043,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
String volumeToString = getReflectOnlySelectedFields(volume);
if (volume.getState() == Volume.State.Snapshotting) {
logger.debug(String.format("Changing volume [%s] state back to Ready.", volumeToString));
logger.debug("Changing volume [{}] state back to Ready.", volumeToString);
volume.setState(Volume.State.Ready);
_volsDao.update(volume.getId(), volume);
}
@ -2099,7 +2067,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId());
}
} catch (Exception e) {
logger.error(String.format("Clean up job failed due to [%s]. Will continue with other clean up jobs.", e.getMessage()));
logger.error("Clean up job failed due to [{}]. Will continue with other clean up jobs.", e.getMessage());
logger.debug("Exception: ", e);
}
}
@ -2176,8 +2144,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (needUpdate) {
logger.info(String.format("Updating volume's disk chain info. Volume: [%s]. Path: [%s] -> [%s], Disk Chain Info: [%s] -> [%s].",
volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo));
logger.info("Updating volume's disk chain info. Volume: [{}]. Path: [{}] -> [{}], Disk Chain Info: [{}] -> [{}].",
volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo);
vol.setPath(path);
vol.setChainInfo(chainInfo);
if (updatedDataStoreUUID != null) {
@ -2289,7 +2257,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override
public void unmanageVolumes(long vmId) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Unmanaging storage for VM [%s].", _userVmDao.findById(vmId)));
logger.debug("Unmanaging storage for VM [{}].", _userVmDao.findById(vmId));
}
final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
@ -2302,7 +2270,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged
|| vol.getState() == Volume.State.Expunging);
if (volumeAlreadyDestroyed) {
logger.debug(String.format("Skipping Destroy for the volume [%s] as it is in [%s] state.", volToString, vol.getState().toString()));
logger.debug("Skipping Destroy for the volume [{}] as it is in [{}] state.", volToString, vol.getState().toString());
} else {
volService.unmanageVolume(vol.getId());
}