Solved jira ticket: CLOUDSTACK-8750

This commit is contained in:
Rafael Weingartner 2015-08-28 22:35:08 -03:00
parent f5a7395cc2
commit 3818257a68
361 changed files with 5859 additions and 6550 deletions

View File

@ -25,7 +25,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
@ -33,21 +32,20 @@ import com.cloud.utils.component.AdapterBase;
@Local(value = Processor.class)
public class IsoProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(IsoProcessor.class);
StorageLayer _storage;
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
if (format != null) {
s_logger.debug("We don't handle conversion from " + format + " to ISO.");
logger.debug("We don't handle conversion from " + format + " to ISO.");
return null;
}
String isoPath = templatePath + File.separator + templateName + "." + ImageFormat.ISO.getFileExtension();
if (!_storage.exists(isoPath)) {
s_logger.debug("Unable to find the iso file: " + isoPath);
logger.debug("Unable to find the iso file: " + isoPath);
return null;
}

View File

@ -26,7 +26,6 @@ import javax.ejb.Local;
import javax.naming.ConfigurationException;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
@ -38,39 +37,38 @@ import com.cloud.utils.script.Script;
@Local(value = Processor.class)
public class OVAProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(OVAProcessor.class);
StorageLayer _storage;
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
if (format != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("We currently don't handle conversion from " + format + " to OVA.");
if (logger.isInfoEnabled()) {
logger.info("We currently don't handle conversion from " + format + " to OVA.");
}
return null;
}
s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName);
logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName);
String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension();
if (!_storage.exists(templateFilePath)) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Unable to find the vmware template file: " + templateFilePath);
if (logger.isInfoEnabled()) {
logger.info("Unable to find the vmware template file: " + templateFilePath);
}
return null;
}
s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName);
logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName);
String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension();
File templateFile = new File(templateFileFullPath);
Script command = new Script("tar", 0, s_logger);
Script command = new Script("tar", 0, logger);
command.add("--no-same-owner");
command.add("-xf", templateFileFullPath);
command.setWorkDir(templateFile.getParent());
String result = command.execute();
if (result != null) {
s_logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName);
logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName);
return null;
}
@ -91,7 +89,7 @@ public class OVAProcessor extends AdapterBase implements Processor {
long size = getTemplateVirtualSize(file.getParent(), file.getName());
return size;
} catch (Exception e) {
s_logger.info("[ignored]"
logger.info("[ignored]"
+ "failed to get virtual template size for ova: " + e.getLocalizedMessage());
}
return file.length();
@ -105,7 +103,7 @@ public class OVAProcessor extends AdapterBase implements Processor {
String ovfFileName = getOVFFilePath(templateFileFullPath);
if (ovfFileName == null) {
String msg = "Unable to locate OVF file in template package directory: " + templatePath;
s_logger.error(msg);
logger.error(msg);
throw new InternalErrorException(msg);
}
try {
@ -130,7 +128,7 @@ public class OVAProcessor extends AdapterBase implements Processor {
return virtualSize;
} catch (Exception e) {
String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e;
s_logger.error(msg);
logger.error(msg);
throw new InternalErrorException(msg);
}
}

View File

@ -27,7 +27,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
@ -36,7 +35,6 @@ import com.cloud.utils.component.AdapterBase;
@Local(value = Processor.class)
public class QCOW2Processor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(QCOW2Processor.class);
private static final int VIRTUALSIZE_HEADER_LOCATION = 24;
private StorageLayer _storage;
@ -44,14 +42,14 @@ public class QCOW2Processor extends AdapterBase implements Processor {
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
if (format != null) {
s_logger.debug("We currently don't handle conversion from " + format + " to QCOW2.");
logger.debug("We currently don't handle conversion from " + format + " to QCOW2.");
return null;
}
String qcow2Path = templatePath + File.separator + templateName + "." + ImageFormat.QCOW2.getFileExtension();
if (!_storage.exists(qcow2Path)) {
s_logger.debug("Unable to find the qcow2 file: " + qcow2Path);
logger.debug("Unable to find the qcow2 file: " + qcow2Path);
return null;
}
@ -66,7 +64,7 @@ public class QCOW2Processor extends AdapterBase implements Processor {
try {
info.virtualSize = getVirtualSize(qcow2File);
} catch (IOException e) {
s_logger.error("Unable to get virtual size from " + qcow2File.getName());
logger.error("Unable to get virtual size from " + qcow2File.getName());
return null;
}

View File

@ -25,7 +25,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.exception.InternalErrorException;
import com.cloud.storage.Storage.ImageFormat;
@ -34,7 +33,6 @@ import com.cloud.utils.component.AdapterBase;
@Local(value = Processor.class)
public class RawImageProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(RawImageProcessor.class);
StorageLayer _storage;
@Override
@ -50,13 +48,13 @@ public class RawImageProcessor extends AdapterBase implements Processor {
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
if (format != null) {
s_logger.debug("We currently don't handle conversion from " + format + " to raw image.");
logger.debug("We currently don't handle conversion from " + format + " to raw image.");
return null;
}
String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension();
if (!_storage.exists(imgPath)) {
s_logger.debug("Unable to find raw image:" + imgPath);
logger.debug("Unable to find raw image:" + imgPath);
return null;
}
FormatInfo info = new FormatInfo();
@ -64,7 +62,7 @@ public class RawImageProcessor extends AdapterBase implements Processor {
info.filename = templateName + "." + ImageFormat.RAW.getFileExtension();
info.size = _storage.getSize(imgPath);
info.virtualSize = info.size;
s_logger.debug("Process raw image " + info.filename + " successfully");
logger.debug("Process raw image " + info.filename + " successfully");
return info;
}

View File

@ -22,7 +22,6 @@ package com.cloud.storage.template;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
import com.cloud.utils.component.AdapterBase;
import org.apache.log4j.Logger;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
@ -31,21 +30,20 @@ import java.util.Map;
@Local(value = Processor.class)
public class TARProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(TARProcessor.class);
private StorageLayer _storage;
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
if (format != null) {
s_logger.debug("We currently don't handle conversion from " + format + " to TAR.");
logger.debug("We currently don't handle conversion from " + format + " to TAR.");
return null;
}
String tarPath = templatePath + File.separator + templateName + "." + ImageFormat.TAR.getFileExtension();
if (!_storage.exists(tarPath)) {
s_logger.debug("Unable to find the tar file: " + tarPath);
logger.debug("Unable to find the tar file: " + tarPath);
return null;
}

View File

@ -27,7 +27,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
@ -43,7 +42,6 @@ import com.cloud.utils.component.AdapterBase;
@Local(value = Processor.class)
public class VhdProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(VhdProcessor.class);
StorageLayer _storage;
private int vhdFooterSize = 512;
private int vhdFooterCreatorAppOffset = 28;
@ -54,13 +52,13 @@ public class VhdProcessor extends AdapterBase implements Processor {
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
if (format != null) {
s_logger.debug("We currently don't handle conversion from " + format + " to VHD.");
logger.debug("We currently don't handle conversion from " + format + " to VHD.");
return null;
}
String vhdPath = templatePath + File.separator + templateName + "." + ImageFormat.VHD.getFileExtension();
if (!_storage.exists(vhdPath)) {
s_logger.debug("Unable to find the vhd file: " + vhdPath);
logger.debug("Unable to find the vhd file: " + vhdPath);
return null;
}
@ -74,7 +72,7 @@ public class VhdProcessor extends AdapterBase implements Processor {
try {
info.virtualSize = getVirtualSize(vhdFile);
} catch (IOException e) {
s_logger.error("Unable to get the virtual size for " + vhdPath);
logger.error("Unable to get the virtual size for " + vhdPath);
return null;
}

View File

@ -31,7 +31,6 @@ import java.util.regex.Pattern;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.exception.InternalErrorException;
import com.cloud.storage.Storage.ImageFormat;
@ -40,24 +39,23 @@ import com.cloud.utils.component.AdapterBase;
@Local(value = Processor.class)
public class VmdkProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(VmdkProcessor.class);
StorageLayer _storage;
@Override
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
if (format != null) {
if (s_logger.isInfoEnabled()) {
s_logger.info("We currently don't handle conversion from " + format + " to VMDK.");
if (logger.isInfoEnabled()) {
logger.info("We currently don't handle conversion from " + format + " to VMDK.");
}
return null;
}
s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName);
logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName);
String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.VMDK.getFileExtension();
if (!_storage.exists(templateFilePath)) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Unable to find the vmware template file: " + templateFilePath);
if (logger.isInfoEnabled()) {
logger.info("Unable to find the vmware template file: " + templateFilePath);
}
return null;
}
@ -77,7 +75,7 @@ public class VmdkProcessor extends AdapterBase implements Processor {
long size = getTemplateVirtualSize(file.getParent(), file.getName());
return size;
} catch (Exception e) {
s_logger.info("[ignored]"
logger.info("[ignored]"
+ "failed to get template virtual size for vmdk: " + e.getLocalizedMessage());
}
return file.length();
@ -103,15 +101,15 @@ public class VmdkProcessor extends AdapterBase implements Processor {
}
} catch(FileNotFoundException ex) {
String msg = "Unable to open file '" + templateFileFullPath + "' " + ex.toString();
s_logger.error(msg);
logger.error(msg);
throw new InternalErrorException(msg);
} catch(IOException ex) {
String msg = "Unable read open file '" + templateFileFullPath + "' " + ex.toString();
s_logger.error(msg);
logger.error(msg);
throw new InternalErrorException(msg);
}
s_logger.debug("vmdk file had size="+virtualSize);
logger.debug("vmdk file had size="+virtualSize);
return virtualSize;
}

View File

@ -45,7 +45,6 @@ import org.apache.cloudstack.framework.jobs.AsyncJob;
import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.Listener;
@ -116,8 +115,6 @@ import com.cloud.utils.time.InaccurateClock;
**/
@Local(value = {AgentManager.class})
public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, Configurable {
protected static final Logger s_logger = Logger.getLogger(AgentManagerImpl.class);
protected static final Logger status_logger = Logger.getLogger(Status.class);
/**
* _agents is a ConcurrentHashMap, but it is used from within a synchronized block.
@ -200,12 +197,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
s_logger.info("Ping Timeout is " + PingTimeout.value());
logger.info("Ping Timeout is " + PingTimeout.value());
final int threads = DirectAgentLoadSize.value();
_nodeId = ManagementServerNode.getManagementServerId();
s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId);
final long lastPing = (System.currentTimeMillis() >> 10) - (long)(PingTimeout.value() * PingInterval.value());
_hostDao.markHostsAsDisconnected(_nodeId, lastPing);
@ -219,13 +216,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_connectExecutor.allowCoreThreadTimeOut(true);
_connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this);
s_logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers");
logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers");
// executes all agent commands other than cron and ping
_directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent"));
// executes cron and ping agent commands
_cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob"));
s_logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value());
logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value());
_directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0
_monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor"));
@ -260,8 +257,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_cmdMonitors.add(new Pair<Integer, Listener>(_monitorId, listener));
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId);
if (logger.isDebugEnabled()) {
logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId);
}
return _monitorId;
}
@ -282,7 +279,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public void unregisterForHostEvents(final int id) {
s_logger.debug("Deregistering " + id);
logger.debug("Deregistering " + id);
_hostMonitors.remove(id);
}
@ -297,15 +294,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
s_logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId());
logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId());
return new AgentControlAnswer(cmd);
}
public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) {
for (final Pair<Integer, Listener> listener : _cmdMonitors) {
final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass());
if (logger.isTraceEnabled()) {
logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass());
}
}
}
@ -365,7 +362,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (answers != null && answers[0] instanceof UnsupportedAnswer) {
s_logger.warn("Unsupported Command: " + answers[0].getDetails());
logger.warn("Unsupported Command: " + answers[0].getDetails());
return answers[0];
}
@ -458,14 +455,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Long hostId = agent.getId();
final HostVO host = _hostDao.findById(hostId);
if (host != null && host.getType() != null && !host.getType().isVirtual()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("checking if agent (" + hostId + ") is alive");
if (logger.isDebugEnabled()) {
logger.debug("checking if agent (" + hostId + ") is alive");
}
final Answer answer = easySend(hostId, new CheckHealthCommand());
if (answer != null && answer.getResult()) {
final Status status = Status.Up;
if (s_logger.isDebugEnabled()) {
s_logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status);
if (logger.isDebugEnabled()) {
logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status);
}
return status;
}
@ -480,7 +477,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
final AgentAttache agent = findAttache(hostId);
if (agent == null) {
s_logger.debug("Unable to find agent for " + hostId);
logger.debug("Unable to find agent for " + hostId);
throw new AgentUnavailableException("Unable to find agent ", hostId);
}
@ -508,8 +505,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
return;
}
final long hostId = attache.getId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Remove Agent : " + hostId);
if (logger.isDebugEnabled()) {
logger.debug("Remove Agent : " + hostId);
}
AgentAttache removed = null;
boolean conflict = false;
@ -522,15 +519,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
if (conflict) {
s_logger.debug("Agent for host " + hostId + " is created when it is being disconnected");
logger.debug("Agent for host " + hostId + " is created when it is being disconnected");
}
if (removed != null) {
removed.disconnect(nextState);
}
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
if (logger.isDebugEnabled()) {
logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
}
monitor.second().processDisconnect(hostId, nextState);
}
@ -540,8 +537,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final long hostId = attache.getId();
final HostVO host = _hostDao.findById(hostId);
for (final Pair<Integer, Listener> monitor : _hostMonitors) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
if (logger.isDebugEnabled()) {
logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName());
}
for (int i = 0; i < cmd.length; i++) {
try {
@ -550,12 +547,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (e instanceof ConnectionException) {
final ConnectionException ce = (ConnectionException)e;
if (ce.isSetupError()) {
s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId +
logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId +
" due to " + e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
throw ce;
} else {
s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId +
logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId +
" due to " + e.getMessage());
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
return attache;
@ -564,7 +561,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
} else {
s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId +
logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId +
" due to " + e.getMessage(), e);
handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true);
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
@ -616,19 +613,19 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final Constructor<?> constructor = clazz.getConstructor();
resource = (ServerResource)constructor.newInstance();
} catch (final ClassNotFoundException e) {
s_logger.warn("Unable to find class " + host.getResource(), e);
logger.warn("Unable to find class " + host.getResource(), e);
} catch (final InstantiationException e) {
s_logger.warn("Unablet to instantiate class " + host.getResource(), e);
logger.warn("Unablet to instantiate class " + host.getResource(), e);
} catch (final IllegalAccessException e) {
s_logger.warn("Illegal access " + host.getResource(), e);
logger.warn("Illegal access " + host.getResource(), e);
} catch (final SecurityException e) {
s_logger.warn("Security error on " + host.getResource(), e);
logger.warn("Security error on " + host.getResource(), e);
} catch (final NoSuchMethodException e) {
s_logger.warn("NoSuchMethodException error on " + host.getResource(), e);
logger.warn("NoSuchMethodException error on " + host.getResource(), e);
} catch (final IllegalArgumentException e) {
s_logger.warn("IllegalArgumentException error on " + host.getResource(), e);
logger.warn("IllegalArgumentException error on " + host.getResource(), e);
} catch (final InvocationTargetException e) {
s_logger.warn("InvocationTargetException error on " + host.getResource(), e);
logger.warn("InvocationTargetException error on " + host.getResource(), e);
}
if (resource != null) {
@ -662,12 +659,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
resource.configure(host.getName(), params);
} catch (final ConfigurationException e) {
s_logger.warn("Unable to configure resource due to " + e.getMessage());
logger.warn("Unable to configure resource due to " + e.getMessage());
return null;
}
if (!resource.start()) {
s_logger.warn("Unable to start the resource");
logger.warn("Unable to start the resource");
return null;
}
}
@ -685,14 +682,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
//load the respective discoverer
final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType());
if (discoverer == null) {
s_logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType());
logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType());
resource = loadResourcesWithoutHypervisor(host);
} else {
resource = discoverer.reloadResource(host);
}
if (resource == null) {
s_logger.warn("Unable to load the resource: " + host.getId());
logger.warn("Unable to load the resource: " + host.getId());
return false;
}
@ -718,7 +715,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException {
s_logger.debug("create DirectAgentAttache for " + host.getId());
logger.debug("create DirectAgentAttache for " + host.getId());
final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates());
AgentAttache old = null;
@ -739,13 +736,13 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_connection.stop();
}
s_logger.info("Disconnecting agents: " + _agents.size());
logger.info("Disconnecting agents: " + _agents.size());
synchronized (_agents) {
for (final AgentAttache agent : _agents.values()) {
final HostVO host = _hostDao.findById(agent.getId());
if (host == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cant not find host " + agent.getId());
if (logger.isDebugEnabled()) {
logger.debug("Cant not find host " + agent.getId());
}
} else {
if (!agent.forForward()) {
@ -763,17 +760,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attache, final Status.Event event, final boolean transitState, final boolean removeAgent) {
final long hostId = attache.getId();
s_logger.info("Host " + hostId + " is disconnecting with event " + event);
logger.info("Host " + hostId + " is disconnecting with event " + event);
Status nextStatus = null;
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
s_logger.warn("Can't find host with " + hostId);
logger.warn("Can't find host with " + hostId);
nextStatus = Status.Removed;
} else {
final Status currentStatus = host.getStatus();
if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Host " + hostId + " is already " + currentStatus);
if (logger.isDebugEnabled()) {
logger.debug("Host " + hostId + " is already " + currentStatus);
}
nextStatus = currentStatus;
} else {
@ -781,18 +778,18 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
nextStatus = currentStatus.getNextStatus(event);
} catch (final NoTransitionException e) {
final String err = "Cannot find next status for " + event + " as current status is " + currentStatus + " for agent " + hostId;
s_logger.debug(err);
logger.debug(err);
throw new CloudRuntimeException(err);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus);
if (logger.isDebugEnabled()) {
logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus);
}
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deregistering link for " + hostId + " with state " + nextStatus);
if (logger.isDebugEnabled()) {
logger.debug("Deregistering link for " + hostId + " with state " + nextStatus);
}
removeAgent(attache, nextStatus);
@ -817,48 +814,48 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
* Why this can happen? Ask God not me. I hate there was no piece of comment for code handling race condition.
* God knew what race condition the code dealt with!
*/
s_logger.debug("Caught exception while getting agent's next status", ne);
logger.debug("Caught exception while getting agent's next status", ne);
}
if (nextStatus == Status.Alert) {
/* OK, we are going to the bad status, let's see what happened */
s_logger.info("Investigating why host " + hostId + " has disconnected with event " + event);
logger.info("Investigating why host " + hostId + " has disconnected with event " + event);
Status determinedState = investigate(attache);
// if state cannot be determined do nothing and bail out
if (determinedState == null) {
if (((System.currentTimeMillis() >> 10) - host.getLastPinged()) > AlertWait.value()) {
s_logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state");
logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state");
determinedState = Status.Alert;
} else {
s_logger.warn("Agent " + hostId + " state cannot be determined, do nothing");
logger.warn("Agent " + hostId + " state cannot be determined, do nothing");
return false;
}
}
final Status currentStatus = host.getStatus();
s_logger.info("The agent " + hostId + " state determined is " + determinedState);
logger.info("The agent " + hostId + " state determined is " + determinedState);
if (determinedState == Status.Down) {
String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs";
s_logger.error(message);
logger.error(message);
if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message);
}
event = Status.Event.HostDown;
} else if (determinedState == Status.Up) {
/* Got ping response from host, bring it back */
s_logger.info("Agent is determined to be up and running");
logger.info("Agent is determined to be up and running");
agentStatusTransitTo(host, Status.Event.Ping, _nodeId);
return false;
} else if (determinedState == Status.Disconnected) {
s_logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName());
logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName());
if (currentStatus == Status.Disconnected) {
if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) {
s_logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected.");
logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected.");
event = Status.Event.WaitedTooLong;
} else {
s_logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet.");
logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet.");
return false;
}
} else if (currentStatus == Status.Up) {
@ -880,7 +877,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
"In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName());
}
} else {
s_logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened");
logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened");
}
}
handleDisconnectWithoutInvestigation(attache, event, true, true);
@ -911,7 +908,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
handleDisconnectWithoutInvestigation(_attache, _event, true, false);
}
} catch (final Exception e) {
s_logger.error("Exception caught while handling disconnect: ", e);
logger.error("Exception caught while handling disconnect: ", e);
}
}
}
@ -921,34 +918,34 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
final Host h = _hostDao.findById(hostId);
if (h == null || h.getRemoved() != null) {
s_logger.debug("Host with id " + hostId + " doesn't exist");
logger.debug("Host with id " + hostId + " doesn't exist");
return null;
}
final Status status = h.getStatus();
if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) {
s_logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up");
logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up");
return null;
}
final Answer answer = send(hostId, cmd);
if (answer == null) {
s_logger.warn("send returns null answer");
logger.warn("send returns null answer");
return null;
}
if (s_logger.isDebugEnabled() && answer.getDetails() != null) {
s_logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails());
if (logger.isDebugEnabled() && answer.getDetails() != null) {
logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails());
}
return answer;
} catch (final AgentUnavailableException e) {
s_logger.warn(e.getMessage());
logger.warn(e.getMessage());
return null;
} catch (final OperationTimedoutException e) {
s_logger.warn("Operation timed out: " + e.getMessage());
logger.warn("Operation timed out: " + e.getMessage());
return null;
} catch (final Exception e) {
s_logger.warn("Exception while sending", e);
logger.warn("Exception while sending", e);
return null;
}
}
@ -970,23 +967,23 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
host = _hostDao.findById(hostId);
if (host == null || host.getRemoved() != null) {
s_logger.warn("Unable to find host " + hostId);
logger.warn("Unable to find host " + hostId);
return false;
}
if (host.getStatus() == Status.Disconnected) {
s_logger.info("Host is already disconnected, no work to be done");
logger.info("Host is already disconnected, no work to be done");
return true;
}
if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) {
s_logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus());
logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus());
return false;
}
final AgentAttache attache = findAttache(hostId);
if (attache == null) {
s_logger.info("Unable to disconnect host because it is not connected to this server: " + hostId);
logger.info("Unable to disconnect host because it is not connected to this server: " + hostId);
return false;
}
@ -996,8 +993,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received agent disconnect event for host " + hostId);
if (logger.isDebugEnabled()) {
logger.debug("Received agent disconnect event for host " + hostId);
}
AgentAttache attache = null;
attache = findAttache(hostId);
@ -1018,7 +1015,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException {
s_logger.debug("create ConnectedAgentAttache for " + host.getId());
logger.debug("create ConnectedAgentAttache for " + host.getId());
final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
@ -1044,7 +1041,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
attache = notifyMonitorsOfConnection(attache, startup, false);
}
} catch (final Exception e) {
s_logger.debug("Failed to handle host connection: " + e.toString());
logger.debug("Failed to handle host connection: " + e.toString());
ready = new ReadyCommand(null);
ready.setDetails(e.toString());
} finally {
@ -1061,7 +1058,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
easySend(attache.getId(), ready);
}
} catch (final Exception e) {
s_logger.debug("Failed to send ready command:" + e.toString());
logger.debug("Failed to send ready command:" + e.toString());
}
return attache;
}
@ -1080,28 +1077,28 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
protected void runInContext() {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id);
if (logger.isDebugEnabled()) {
logger.debug("Simulating start for resource " + resource.getName() + " id " + id);
}
if (tapLoadingAgents(id, TapAgentsAction.Add)) {
try {
final AgentAttache agentattache = findAttache(id);
if (agentattache == null) {
s_logger.debug("Creating agent for host " + id);
logger.debug("Creating agent for host " + id);
_resourceMgr.createHostAndAgent(id, resource, details, false, null, false);
s_logger.debug("Completed creating agent for host " + id);
logger.debug("Completed creating agent for host " + id);
} else {
s_logger.debug("Agent already created in another thread for host " + id + ", ignore this");
logger.debug("Agent already created in another thread for host " + id + ", ignore this");
}
} finally {
tapLoadingAgents(id, TapAgentsAction.Del);
}
} else {
s_logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this");
logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this");
}
} catch (final Exception e) {
s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e);
logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e);
}
}
}
@ -1127,7 +1124,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final AgentAttache attache = handleConnectedAgent(_link, startups, _request);
if (attache == null) {
s_logger.warn("Unable to create attache for agent: " + _request);
logger.warn("Unable to create attache for agent: " + _request);
}
}
}
@ -1149,7 +1146,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
link.send(response.toBytes());
} catch (final ClosedChannelException e) {
s_logger.debug("Failed to send startupanswer: " + e.toString());
logger.debug("Failed to send startupanswer: " + e.toString());
}
_connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request));
}
@ -1167,7 +1164,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (attache == null) {
if (!(cmd instanceof StartupCommand)) {
s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
} else {
//submit the task for execution
request.logD("Scheduling the first command ");
@ -1178,17 +1175,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final long hostId = attache.getId();
if (s_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
if (cmd instanceof PingRoutingCommand) {
logD = false;
s_logger.debug("Ping from " + hostId);
s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
logger.debug("Ping from " + hostId);
logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request);
} else if (cmd instanceof PingCommand) {
logD = false;
s_logger.debug("Ping from " + hostId);
s_logger.trace("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request);
logger.debug("Ping from " + hostId);
logger.trace("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request);
} else {
s_logger.debug("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request);
logger.debug("SeqA " + attache.getId() + "-" + request.getSequence() + ": Processing " + request);
}
}
@ -1212,7 +1209,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else if (cmd instanceof ShutdownCommand) {
final ShutdownCommand shutdown = (ShutdownCommand)cmd;
final String reason = shutdown.getReason();
s_logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " +
logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " +
shutdown.getDetail());
if (reason.equals(ShutdownCommand.Update)) {
//disconnectWithoutInvestigation(attache, Event.UpdateNeeded);
@ -1250,7 +1247,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId());
}
} else {
s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId +
logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId +
"; can't find the host in the DB");
}
}
@ -1258,8 +1255,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else if (cmd instanceof ReadyAnswer) {
final HostVO host = _hostDao.findById(attache.getId());
if (host == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cant not find host " + attache.getId());
if (logger.isDebugEnabled()) {
logger.debug("Cant not find host " + attache.getId());
}
}
answer = new Answer(cmd);
@ -1268,33 +1265,33 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
} catch (final Throwable th) {
s_logger.warn("Caught: ", th);
logger.warn("Caught: ", th);
answer = new Answer(cmd, false, th.getMessage());
}
answers[i] = answer;
}
final Response response = new Response(request, answers, _nodeId, attache.getId());
if (s_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
if (logD) {
s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
} else {
s_logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
}
}
try {
link.send(response.toBytes());
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send response because connection is closed: " + response);
logger.warn("Unable to send response because connection is closed: " + response);
}
}
protected void processResponse(final Link link, final Response response) {
final AgentAttache attache = (AgentAttache)link.attachment();
if (attache == null) {
s_logger.warn("Unable to process: " + response);
logger.warn("Unable to process: " + response);
} else if (!attache.processAnswers(response.getSequence(), response)) {
s_logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response);
logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response);
}
}
@ -1313,7 +1310,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
processRequest(task.getLink(), event);
}
} catch (final UnsupportedVersionException e) {
s_logger.warn(e.getMessage());
logger.warn(e.getMessage());
// upgradeAgent(task.getLink(), data, e.getReason());
}
} else if (type == Task.Type.CONNECT) {
@ -1323,7 +1320,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
if (attache != null) {
disconnectWithInvestigation(attache, Event.AgentDisconnected);
} else {
s_logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done.");
logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done.");
link.close();
link.terminated();
}
@ -1360,20 +1357,20 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) {
try {
_agentStatusLock.lock();
if (status_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
final ResourceState state = host.getResourceState();
final StringBuilder msg = new StringBuilder("Transition:");
msg.append("[Resource state = ").append(state);
msg.append(", Agent event = ").append(e.toString());
msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]");
status_logger.debug(msg);
logger.debug(msg);
}
host.setManagementServerId(msId);
try {
return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
} catch (final NoTransitionException e1) {
status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() +
logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() +
", mangement server id is " + msId);
throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is " + msId + "," +
e1.getMessage());
@ -1404,7 +1401,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
protected boolean isHostOwnerSwitched(final long hostId) {
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
s_logger.warn("Can't find the host " + hostId);
logger.warn("Can't find the host " + hostId);
return false;
}
return isHostOwnerSwitched(host);
@ -1429,7 +1426,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
} else {
/* Agent is still in connecting process, don't allow to disconnect right away */
if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) {
s_logger.info("Host " + hostId + " is being loaded so no disconnects needed.");
logger.info("Host " + hostId + " is being loaded so no disconnects needed.");
return;
}
@ -1501,14 +1498,14 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
public void pingBy(final long agentId) {
// Update PingMap with the latest time if agent entry exists in the PingMap
if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) {
s_logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap");
logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap");
}
}
protected class MonitorTask extends ManagedContextRunnable {
@Override
protected void runInContext() {
s_logger.trace("Agent Monitor is started.");
logger.trace("Agent Monitor is started.");
try {
final List<Long> behindAgents = findAgentsBehindOnPing();
@ -1524,17 +1521,17 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
* investigation and direct put agent to
* Disconnected
*/
status_logger.debug("Ping timeout but host " + agentId + " is in resource state of " + resourceState + ", so no investigation");
logger.debug("Ping timeout but host " + agentId + " is in resource state of " + resourceState + ", so no investigation");
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
final HostVO host = _hostDao.findById(agentId);
if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM
|| host.getType() == Host.Type.SecondaryStorageCmdExecutor)) {
s_logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId());
logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId());
disconnectWithoutInvestigation(agentId, Event.ShutdownRequested);
} else {
status_logger.debug("Ping timeout for host " + agentId + ", do invstigation");
logger.debug("Ping timeout for host " + agentId + ", do invstigation");
disconnectWithInvestigation(agentId, Event.PingTimeout);
}
}
@ -1555,10 +1552,10 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
}
} catch (final Throwable th) {
s_logger.error("Caught the following exception: ", th);
logger.error("Caught the following exception: ", th);
}
s_logger.trace("Agent Monitor is leaving the building!");
logger.trace("Agent Monitor is leaving the building!");
}
protected List<Long> findAgentsBehindOnPing() {
@ -1571,7 +1568,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
if (agentsBehind.size() > 0) {
s_logger.info("Found the following agents behind on ping: " + agentsBehind);
logger.info("Found the following agents behind on ping: " + agentsBehind);
}
return agentsBehind;

View File

@ -43,10 +43,6 @@ import javax.naming.ConfigurationException;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import org.apache.log4j.Logger;
import com.google.gson.Gson;
import org.apache.cloudstack.framework.config.ConfigDepot;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@ -95,10 +91,10 @@ import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.nio.Link;
import com.cloud.utils.nio.Task;
import com.google.gson.Gson;
@Local(value = {AgentManager.class, ClusteredAgentRebalanceService.class})
public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService {
final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class);
private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor"));
private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list
@ -144,7 +140,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
_sslEngines = new HashMap<String, SSLEngine>(7);
_nodeId = ManagementServerNode.getManagementServerId();
s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId);
ClusteredAgentAttache.initialize(this);
@ -162,8 +158,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return false;
}
_timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
if (logger.isDebugEnabled()) {
logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds");
}
// Schedule tasks for agent rebalancing
@ -177,8 +173,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
public void scheduleHostScanTask() {
_timer.schedule(new DirectAgentScanTimerTask(), 0);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Scheduled a direct agent scan task");
if (logger.isDebugEnabled()) {
logger.debug("Scheduled a direct agent scan task");
}
}
@ -187,8 +183,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
private void scanDirectAgentToLoad() {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Begin scanning directly connected hosts");
if (logger.isTraceEnabled()) {
logger.trace("Begin scanning directly connected hosts");
}
// for agents that are self-managed, threshold to be considered as disconnected after pingtimeout
@ -199,15 +195,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (hosts != null) {
hosts.addAll(appliances);
if (hosts.size() > 0) {
s_logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them...");
logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them...");
for (HostVO host : hosts) {
try {
AgentAttache agentattache = findAttache(host.getId());
if (agentattache != null) {
// already loaded, skip
if (agentattache.forForward()) {
if (s_logger.isInfoEnabled()) {
s_logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host");
if (logger.isInfoEnabled()) {
logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host");
}
removeAgent(agentattache, Status.Disconnected);
} else {
@ -215,18 +211,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")");
if (logger.isDebugEnabled()) {
logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")");
}
loadDirectlyConnectedHost(host, false);
} catch (Throwable e) {
s_logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e);
logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e);
}
}
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("End scanning directly connected hosts");
if (logger.isTraceEnabled()) {
logger.trace("End scanning directly connected hosts");
}
}
@ -236,7 +232,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
runDirectAgentScanTimerTask();
} catch (Throwable e) {
s_logger.error("Unexpected exception " + e.getMessage(), e);
logger.error("Unexpected exception " + e.getMessage(), e);
}
}
}
@ -247,7 +243,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
protected AgentAttache createAttache(long id) {
s_logger.debug("create forwarding ClusteredAgentAttache for " + id);
logger.debug("create forwarding ClusteredAgentAttache for " + id);
HostVO host = _hostDao.findById(id);
final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName());
AgentAttache old = null;
@ -256,8 +252,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
_agents.put(id, attache);
}
if (old != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Remove stale agent attache from current management server");
if (logger.isDebugEnabled()) {
logger.debug("Remove stale agent attache from current management server");
}
removeAgent(old, Status.Removed);
}
@ -266,7 +262,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForConnect(HostVO host, Link link) {
s_logger.debug("create ClusteredAgentAttache for " + host.getId());
logger.debug("create ClusteredAgentAttache for " + host.getId());
final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates());
link.attach(attache);
AgentAttache old = null;
@ -282,7 +278,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected AgentAttache createAttacheForDirectConnect(Host host, ServerResource resource) {
s_logger.debug("create ClusteredDirectAgentAttache for " + host.getId());
logger.debug("create ClusteredDirectAgentAttache for " + host.getId());
final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates());
AgentAttache old = null;
synchronized (_agents) {
@ -326,8 +322,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public boolean executeUserRequest(long hostId, Event event) throws AgentUnavailableException {
if (event == Event.AgentDisconnected) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received agent disconnect event for host " + hostId);
if (logger.isDebugEnabled()) {
logger.debug("Received agent disconnect event for host " + hostId);
}
AgentAttache attache = findAttache(hostId);
if (attache != null) {
@ -336,7 +332,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
HostTransferMapVO transferVO = _hostTransferDao.findById(hostId);
if (transferVO != null) {
if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) {
s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " +
logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " +
_nodeId);
return true;
}
@ -346,7 +342,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// don't process disconnect if the disconnect came for the host via delayed cluster notification,
// but the host has already reconnected to the current management server
if (!attache.forForward()) {
s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId +
logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId +
" as the host is directly connected to the current management server " + _nodeId);
return true;
}
@ -369,7 +365,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return result;
}
} catch (AgentUnavailableException e) {
s_logger.debug("cannot propagate agent reconnect because agent is not available", e);
logger.debug("cannot propagate agent reconnect because agent is not available", e);
return false;
}
@ -377,32 +373,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
public void notifyNodesInCluster(AgentAttache attache) {
s_logger.debug("Notifying other nodes of to disconnect");
logger.debug("Notifying other nodes of to disconnect");
Command[] cmds = new Command[] {new ChangeAgentCommand(attache.getId(), Event.AgentDisconnected)};
_clusterMgr.broadcast(attache.getId(), _gson.toJson(cmds));
}
// notifies MS peers to schedule a host scan task immediately, triggered during addHost operation
public void notifyNodesInClusterToScheduleHostScanTask() {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notifying other MS nodes to run host scan task");
if (logger.isDebugEnabled()) {
logger.debug("Notifying other MS nodes to run host scan task");
}
Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()};
_clusterMgr.broadcast(0, _gson.toJson(cmds));
}
protected static void logT(byte[] bytes, final String msg) {
s_logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " +
protected void logD(byte[] bytes, final String msg) {
logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " +
(Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
protected static void logD(byte[] bytes, final String msg) {
s_logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " +
(Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
protected static void logI(byte[] bytes, final String msg) {
s_logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " +
protected void logI(byte[] bytes, final String msg) {
logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " +
(Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg);
}
@ -427,7 +418,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return false;
}
try {
if (s_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
logD(bytes, "Routing to peer");
}
Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine);
@ -467,7 +458,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
ch.close();
} catch (IOException e) {
s_logger.warn("Unable to close peer socket connection to " + peerName);
logger.warn("Unable to close peer socket connection to " + peerName);
}
}
_peers.remove(peerName);
@ -483,14 +474,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
prevCh.close();
} catch (Exception e) {
s_logger.info("[ignored]"
logger.info("[ignored]"
+ "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage());
}
}
if (ch == null || ch == prevCh) {
ManagementServerHost ms = _clusterMgr.getPeer(peerName);
if (ms == null) {
s_logger.info("Unable to find peer: " + peerName);
logger.info("Unable to find peer: " + peerName);
return null;
}
String ip = ms.getServiceIP();
@ -513,13 +504,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
sslEngine.setEnabledProtocols(SSLUtils.getSupportedProtocols(sslEngine.getEnabledProtocols()));
Link.doHandshake(ch1, sslEngine, true);
s_logger.info("SSL: Handshake done");
logger.info("SSL: Handshake done");
} catch (Exception e) {
ch1.close();
throw new IOException("SSL: Fail to init SSL! " + e);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip);
if (logger.isDebugEnabled()) {
logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip);
}
_peers.put(peerName, ch1);
_sslEngines.put(peerName, sslEngine);
@ -528,15 +519,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
ch1.close();
} catch (IOException ex) {
s_logger.error("failed to close failed peer socket: " + ex);
logger.error("failed to close failed peer socket: " + ex);
}
s_logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e);
logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e);
return null;
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Found open channel for peer: " + peerName);
if (logger.isTraceEnabled()) {
logger.trace("Found open channel for peer: " + peerName);
}
return ch;
}
@ -562,8 +553,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
AgentAttache agent = findAttache(hostId);
if (agent == null || !agent.forForward()) {
if (isHostOwnerSwitched(host)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache");
if (logger.isDebugEnabled()) {
logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache");
}
agent = createAttache(hostId);
}
@ -582,10 +573,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (_peers != null) {
for (SocketChannel ch : _peers.values()) {
try {
s_logger.info("Closing: " + ch.toString());
logger.info("Closing: " + ch.toString());
ch.close();
} catch (IOException e) {
s_logger.info("[ignored] error on closing channel: " +ch.toString(), e);
logger.info("[ignored] error on closing channel: " +ch.toString(), e);
}
}
}
@ -622,7 +613,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final byte[] data = task.getData();
Version ver = Request.getVersion(data);
if (ver.ordinal() != Version.v1.ordinal() && ver.ordinal() != Version.v3.ordinal()) {
s_logger.warn("Wrong version for clustered agent request");
logger.warn("Wrong version for clustered agent request");
super.doTask(task);
return;
}
@ -642,7 +633,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
Request req = Request.parse(data);
Command[] cmds = req.getCommands();
CancelCommand cancel = (CancelCommand)cmds[0];
if (s_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
logD(data, "Cancel request received");
}
agent.cancel(cancel.getSequence());
@ -690,7 +681,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
AgentAttache attache = (AgentAttache)link.attachment();
if (attache != null) {
attache.sendNext(Request.getSequence(data));
} else if (s_logger.isDebugEnabled()) {
} else if (logger.isDebugEnabled()) {
logD(data, "No attache to process " + Request.parse(data).toString());
}
}
@ -703,11 +694,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
final Response response = Response.parse(data);
AgentAttache attache = findAttache(response.getAgentId());
if (attache == null) {
s_logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString());
logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString());
return;
}
if (!attache.processAnswers(response.getSequence(), response)) {
s_logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString());
logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString());
}
}
return;
@ -726,10 +717,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public void onManagementNodeLeft(List<? extends ManagementServerHost> nodeList, long selfNodeId) {
for (ManagementServerHost vo : nodeList) {
s_logger.info("Marking hosts as disconnected on Management server" + vo.getMsid());
logger.info("Marking hosts as disconnected on Management server" + vo.getMsid());
long lastPing = (System.currentTimeMillis() >> 10) - getTimeout();
_hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing);
s_logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid());
logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid());
cleanupTransferMap(vo.getMsid());
}
}
@ -757,7 +748,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
} catch (Exception e) {
s_logger.warn("Unable to rebalance host id=" + agentId, e);
logger.warn("Unable to rebalance host id=" + agentId, e);
}
}
return result;
@ -772,14 +763,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
protected volatile boolean cancelled = false;
public AgentLoadBalancerTask() {
s_logger.debug("Agent load balancer task created");
logger.debug("Agent load balancer task created");
}
@Override
public synchronized boolean cancel() {
if (!cancelled) {
cancelled = true;
s_logger.debug("Agent load balancer task cancelled");
logger.debug("Agent load balancer task cancelled");
return super.cancel();
}
return true;
@ -790,19 +781,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
if (!cancelled) {
startRebalanceAgents();
if (s_logger.isInfoEnabled()) {
s_logger.info("The agent load balancer task is now being cancelled");
if (logger.isInfoEnabled()) {
logger.info("The agent load balancer task is now being cancelled");
}
cancelled = true;
}
} catch (Throwable e) {
s_logger.error("Unexpected exception " + e.toString(), e);
logger.error("Unexpected exception " + e.toString(), e);
}
}
}
public void startRebalanceAgents() {
s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents");
List<ManagementServerHostVO> allMS = _mshostDao.listBy(ManagementServerHost.State.Up);
QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
sc.and(sc.entity().getManagementServerId(), Op.NNULL);
@ -814,16 +805,16 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) {
avLoad = allManagedAgents.size() / allMS.size();
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() +
if (logger.isDebugEnabled()) {
logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() +
"; number of managed agents is " + allManagedAgents.size());
}
return;
}
if (avLoad == 0L) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("As calculated average load is less than 1, rounding it to 1");
if (logger.isDebugEnabled()) {
logger.debug("As calculated average load is less than 1, rounding it to 1");
}
avLoad = 1;
}
@ -837,19 +828,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
break;
} else {
s_logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid());
}
}
if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) {
s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid());
for (HostVO host : hostsToRebalance) {
long hostId = host.getId();
s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId);
boolean result = true;
if (_hostTransferDao.findById(hostId) != null) {
s_logger.warn("Somebody else is already rebalancing host id: " + hostId);
logger.warn("Somebody else is already rebalancing host id: " + hostId);
continue;
}
@ -858,18 +849,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId);
Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance);
if (answer == null) {
s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid());
logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid());
result = false;
}
} catch (Exception ex) {
s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex);
logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex);
result = false;
} finally {
if (transfer != null) {
HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId);
if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
if (logger.isDebugEnabled()) {
logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode");
}
// just remove the mapping (if exists) as nothing was done on the peer management
// server yet
@ -879,7 +870,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
}
} else {
s_logger.debug("Found no hosts to rebalance from the management server " + node.getMsid());
logger.debug("Found no hosts to rebalance from the management server " + node.getMsid());
}
}
}
@ -893,8 +884,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
Command[] cmds = commands.toCommands();
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Forwarding " + cmds[0].toString() + " to " + peer);
if (logger.isDebugEnabled()) {
logger.debug("Forwarding " + cmds[0].toString() + " to " + peer);
}
String peerName = Long.toString(peer);
String cmdStr = _gson.toJson(cmds);
@ -902,7 +893,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
return answers;
} catch (Exception e) {
s_logger.warn("Caught exception while talking to " + currentOwnerId, e);
logger.warn("Caught exception while talking to " + currentOwnerId, e);
return null;
}
}
@ -926,8 +917,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
return null;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId);
if (logger.isDebugEnabled()) {
logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId);
}
Command[] cmds = new Command[1];
cmds[0] = new ChangeAgentCommand(agentId, event);
@ -939,8 +930,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
Answer[] answers = _gson.fromJson(ansStr, Answer[].class);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Result for agent change is " + answers[0].getResult());
if (logger.isDebugEnabled()) {
logger.debug("Result for agent change is " + answers[0].getResult());
}
return answers[0].getResult();
@ -951,12 +942,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId);
if (logger.isTraceEnabled()) {
logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId);
}
synchronized (_agentToTransferIds) {
if (_agentToTransferIds.size() > 0) {
s_logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer");
logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer");
// for (Long hostId : _agentToTransferIds) {
for (Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext();) {
Long hostId = iterator.next();
@ -973,14 +964,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
_hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut));
if (transferMap == null) {
s_logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host");
logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
}
if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) {
s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
continue;
@ -988,7 +979,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner());
if (ms != null && ms.getState() != ManagementServerHost.State.Up) {
s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms +
logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms +
", skipping rebalance for the host");
iterator.remove();
_hostTransferDao.completeAgentTransfer(hostId);
@ -1000,31 +991,31 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
_executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner()));
} catch (RejectedExecutionException ex) {
s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution");
continue;
}
} else {
s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() +
logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() +
" and listener queue size is " + attache.getNonRecurringListenersSize());
}
}
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Found no agents to be transfered by the management server " + _nodeId);
if (logger.isTraceEnabled()) {
logger.trace("Found no agents to be transfered by the management server " + _nodeId);
}
}
}
} catch (Throwable e) {
s_logger.error("Problem with the clustered agent transfer scan check!", e);
logger.error("Problem with the clustered agent transfer scan check!", e);
}
}
};
}
private boolean setToWaitForRebalance(final long hostId, long currentOwnerId, long futureOwnerId) {
s_logger.debug("Adding agent " + hostId + " to the list of agents to transfer");
logger.debug("Adding agent " + hostId + " to the list of agents to transfer");
synchronized (_agentToTransferIds) {
return _agentToTransferIds.add(hostId);
}
@ -1035,7 +1026,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
boolean result = true;
if (currentOwnerId == _nodeId) {
if (!startRebalance(hostId)) {
s_logger.debug("Failed to start agent rebalancing");
logger.debug("Failed to start agent rebalancing");
finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
return false;
}
@ -1046,23 +1037,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
} catch (Exception ex) {
s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
result = false;
}
if (result) {
s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId);
logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId);
finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted);
} else {
s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
}
} else if (futureOwnerId == _nodeId) {
HostVO host = _hostDao.findById(hostId);
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
if (logger.isDebugEnabled()) {
logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
}
AgentAttache attache = findAttache(hostId);
@ -1071,26 +1062,26 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
}
if (result) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
if (logger.isDebugEnabled()) {
logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
" as a part of rebalance process");
}
result = loadDirectlyConnectedHost(host, true);
} else {
s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification");
}
} catch (Exception ex) {
s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
" as a part of rebalance process due to:", ex);
result = false;
}
if (result) {
s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
" as a part of rebalance process");
} else {
s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId +
" as a part of rebalance process");
}
}
@ -1101,13 +1092,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
protected void finishRebalance(final long hostId, long futureOwnerId, Event event) {
boolean success = (event == Event.RebalanceCompleted) ? true : false;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
if (logger.isDebugEnabled()) {
logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
}
AgentAttache attache = findAttache(hostId);
if (attache == null || !(attache instanceof ClusteredAgentAttache)) {
s_logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already");
logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already");
_hostTransferDao.completeAgentTransfer(hostId);
return;
}
@ -1122,7 +1113,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
// 2) Get all transfer requests and route them to peer
Request requestToTransfer = forwardAttache.getRequestToTransfer();
while (requestToTransfer != null) {
s_logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " +
logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " +
_nodeId + " to " + futureOwnerId);
boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes());
if (!routeResult) {
@ -1132,23 +1123,23 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
requestToTransfer = forwardAttache.getRequestToTransfer();
}
s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId);
logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId);
} else {
failRebalance(hostId);
}
s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance");
logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance");
_hostTransferDao.completeAgentTransfer(hostId);
}
protected void failRebalance(final long hostId) {
try {
s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
_hostTransferDao.completeAgentTransfer(hostId);
handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true);
} catch (Exception ex) {
s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
}
}
@ -1156,7 +1147,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
HostVO host = _hostDao.findById(hostId);
if (host == null || host.getRemoved() != null) {
s_logger.warn("Unable to find host record, fail start rebalancing process");
logger.warn("Unable to find host record, fail start rebalancing process");
return false;
}
@ -1166,17 +1157,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true);
ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
if (forwardAttache == null) {
s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
return false;
}
s_logger.debug("Putting agent id=" + hostId + " to transfer mode");
logger.debug("Putting agent id=" + hostId + " to transfer mode");
forwardAttache.setTransferMode(true);
_agents.put(hostId, forwardAttache);
} else {
if (attache == null) {
s_logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing");
logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing");
} else {
s_logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " +
logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " +
attache.getNonRecurringListenersSize() + ", can't start host rebalancing");
}
return false;
@ -1213,19 +1204,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Rebalancing host id=" + hostId);
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing host id=" + hostId);
}
rebalanceHost(hostId, currentOwnerId, futureOwnerId);
} catch (Exception e) {
s_logger.warn("Unable to rebalance host id=" + hostId, e);
logger.warn("Unable to rebalance host id=" + hostId, e);
}
}
}
private String handleScheduleHostScanTaskCommand(ScheduleHostScanTaskCommand cmd) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd));
if (logger.isDebugEnabled()) {
logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd));
}
try {
@ -1233,7 +1224,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} catch (Exception e) {
// Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan
// happens at fixed intervals anyways. So handling any exceptions that may be thrown
s_logger.warn("Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() +
logger.warn("Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() +
", ignoring as regular host scan happens at fixed interval anyways", e);
return null;
}
@ -1260,8 +1251,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public String dispatch(ClusterServicePdu pdu) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
if (logger.isDebugEnabled()) {
logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
}
Command[] cmds = null;
@ -1269,24 +1260,24 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
cmds = _gson.fromJson(pdu.getJsonPackage(), Command[].class);
} catch (Throwable e) {
assert (false);
s_logger.error("Excection in gson decoding : ", e);
logger.error("Excection in gson decoding : ", e);
}
if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted
ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0];
if (s_logger.isDebugEnabled()) {
s_logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
if (logger.isDebugEnabled()) {
logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
}
boolean result = false;
try {
result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Result is " + result);
if (logger.isDebugEnabled()) {
logger.debug("Result is " + result);
}
} catch (AgentUnavailableException e) {
s_logger.warn("Agent is unavailable", e);
logger.warn("Agent is unavailable", e);
return null;
}
@ -1296,21 +1287,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) {
TransferAgentCommand cmd = (TransferAgentCommand)cmds[0];
if (s_logger.isDebugEnabled()) {
s_logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
if (logger.isDebugEnabled()) {
logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent());
}
boolean result = false;
try {
result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Result is " + result);
if (logger.isDebugEnabled()) {
logger.debug("Result is " + result);
}
} catch (AgentUnavailableException e) {
s_logger.warn("Agent is unavailable", e);
logger.warn("Agent is unavailable", e);
return null;
} catch (OperationTimedoutException e) {
s_logger.warn("Operation timed out", e);
logger.warn("Operation timed out", e);
return null;
}
Answer[] answers = new Answer[1];
@ -1319,14 +1310,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
} else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) {
PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0];
s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
boolean result = false;
try {
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent());
s_logger.debug("Result is " + result);
logger.debug("Result is " + result);
} catch (AgentUnavailableException ex) {
s_logger.warn("Agent is unavailable", ex);
logger.warn("Agent is unavailable", ex);
return null;
}
@ -1341,30 +1332,30 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
try {
long startTick = System.currentTimeMillis();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
if (logger.isDebugEnabled()) {
logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage());
}
Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError());
if (answers != null) {
String jsonReturn = _gson.toJson(answers);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " +
if (logger.isDebugEnabled()) {
logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " +
(System.currentTimeMillis() - startTick) + " ms, return result: " + jsonReturn);
}
return jsonReturn;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " +
if (logger.isDebugEnabled()) {
logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " +
(System.currentTimeMillis() - startTick) + " ms, return null result");
}
}
} catch (AgentUnavailableException e) {
s_logger.warn("Agent is unavailable", e);
logger.warn("Agent is unavailable", e);
} catch (OperationTimedoutException e) {
s_logger.warn("Timed Out", e);
logger.warn("Timed Out", e);
}
return null;
@ -1389,8 +1380,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
protected void runInContext() {
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Agent rebalance task check, management server id:" + _nodeId);
if (logger.isTraceEnabled()) {
logger.trace("Agent rebalance task check, management server id:" + _nodeId);
}
// initiate agent lb task will be scheduled and executed only once, and only when number of agents
// loaded exceeds _connectedAgentsThreshold
@ -1408,18 +1399,18 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
if (allHostsCount > 0.0) {
double load = managedHostsCount / allHostsCount;
if (load >= ConnectedAgentThreshold.value()) {
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " +
logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " +
ConnectedAgentThreshold.value());
scheduleRebalanceAgents();
_agentLbHappened = true;
} else {
s_logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " +
logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " +
ConnectedAgentThreshold.value());
}
}
}
} catch (Throwable e) {
s_logger.error("Problem with the clustered agent transfer scan check!", e);
logger.error("Problem with the clustered agent transfer scan check!", e);
}
}
};
@ -1428,13 +1419,13 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
@Override
public void rescan() {
// schedule a scan task immediately
if (s_logger.isDebugEnabled()) {
s_logger.debug("Scheduling a host scan task");
if (logger.isDebugEnabled()) {
logger.debug("Scheduling a host scan task");
}
// schedule host scan task on current MS
scheduleHostScanTask();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notifying all peer MS to schedule host scan task");
if (logger.isDebugEnabled()) {
logger.debug("Notifying all peer MS to schedule host scan task");
}
}

View File

@ -27,7 +27,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.host.Host;
@ -41,7 +40,6 @@ import com.cloud.utils.db.SearchCriteria.Op;
@Component
@Local(value = AgentLoadBalancerPlanner.class)
public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements AgentLoadBalancerPlanner {
private static final Logger s_logger = Logger.getLogger(AgentLoadBalancerPlanner.class);
@Inject
HostDao _hostDao = null;
@ -54,7 +52,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
List<HostVO> allHosts = sc.list();
if (allHosts.size() <= avLoad) {
s_logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad +
logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad +
"; so it doesn't participate in agent rebalancing process");
return null;
}
@ -66,7 +64,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
List<HostVO> directHosts = sc.list();
if (directHosts.isEmpty()) {
s_logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId +
logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId +
"; so it doesn't participate in agent rebalancing process");
return null;
}
@ -92,23 +90,23 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
int hostsLeft = directHosts.size();
List<HostVO> hostsToReturn = new ArrayList<HostVO>();
s_logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() +
logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() +
" and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away...");
for (Long cluster : hostToClusterMap.keySet()) {
List<HostVO> hostsInCluster = hostToClusterMap.get(cluster);
hostsLeft = hostsLeft - hostsInCluster.size();
if (hostsToReturn.size() < hostsToGive) {
s_logger.debug("Trying cluster id=" + cluster);
logger.debug("Trying cluster id=" + cluster);
if (hostsInCluster.size() > hostsLeftToGive) {
s_logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive);
logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive);
if (hostsLeft >= hostsLeftToGive) {
continue;
} else {
break;
}
} else {
s_logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster);
logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster);
hostsToReturn.addAll(hostsInCluster);
hostsLeftToGive = hostsLeftToGive - hostsInCluster.size();
}
@ -117,7 +115,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements
}
}
s_logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts");
logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts");
return hostsToReturn;
}

View File

@ -20,7 +20,6 @@ import java.util.Map;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.jobs.AsyncJob;
@ -34,7 +33,6 @@ import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.dao.VMInstanceDao;
public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatcher {
private static final Logger s_logger = Logger.getLogger(VmWorkJobDispatcher.class);
@Inject private VirtualMachineManagerImpl _vmMgr;
@Inject
@ -65,23 +63,23 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
try {
workClz = Class.forName(job.getCmd());
} catch (ClassNotFoundException e) {
s_logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e);
logger.error("VM work class " + cmd + " is not found" + ", job origin: " + job.getRelated(), e);
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, e.getMessage());
return;
}
work = VmWorkSerializer.deserialize(workClz, job.getCmdInfo());
if(work == null) {
s_logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
logger.error("Unable to deserialize VM work " + job.getCmd() + ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work");
return;
}
if (s_logger.isDebugEnabled())
s_logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
if (logger.isDebugEnabled())
logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
try {
if (_handlers == null || _handlers.isEmpty()) {
s_logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()
logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()
+ ", job origin: " + job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found");
return;
@ -90,7 +88,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
VmWorkJobHandler handler = _handlers.get(work.getHandlerName());
if (handler == null) {
s_logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd()
logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd()
+ ", job info: " + job.getCmdInfo() + ", job origin: " + job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler");
return;
@ -105,14 +103,14 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch
CallContext.unregister();
}
} finally {
if (s_logger.isDebugEnabled())
s_logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
if (logger.isDebugEnabled())
logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated());
}
} catch(InvalidParameterValueException e) {
s_logger.error("Unable to complete " + job + ", job origin:" + job.getRelated());
logger.error("Unable to complete " + job + ", job origin:" + job.getRelated());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));
} catch(Throwable e) {
s_logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e);
logger.error("Unable to complete " + job + ", job origin:" + job.getRelated(), e);
//RuntimeException ex = new RuntimeException("Job failed due to exception " + e.getMessage());
_asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, _asyncJobMgr.marshallResultObject(e));

View File

@ -24,7 +24,6 @@ import java.util.Map;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.jobs.AsyncJob;
@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao;
* Current code base uses blocking calls to wait for job completion
*/
public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDispatcher {
private static final Logger s_logger = Logger.getLogger(VmWorkJobWakeupDispatcher.class);
@Inject
private VmWorkJobDao _workjobDao;
@ -69,7 +67,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
try {
List<AsyncJobJoinMapVO> joinRecords = _joinMapDao.listJoinRecords(job.getId());
if (joinRecords.size() != 1) {
s_logger.warn("AsyncJob-" + job.getId()
logger.warn("AsyncJob-" + job.getId()
+ " received wakeup call with un-supported joining job number: " + joinRecords.size());
// if we fail wakeup-execution for any reason, avoid release sync-source if there is any
@ -84,7 +82,7 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
try {
workClz = Class.forName(job.getCmd());
} catch (ClassNotFoundException e) {
s_logger.error("VM work class " + job.getCmd() + " is not found", e);
logger.error("VM work class " + job.getCmd() + " is not found", e);
return;
}
@ -105,14 +103,14 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
handler.invoke(_vmMgr);
} else {
assert (false);
s_logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() +
logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() +
" when waking up job-" + job.getId());
}
} finally {
CallContext.unregister();
}
} catch (Throwable e) {
s_logger.warn("Unexpected exception in waking up job-" + job.getId());
logger.warn("Unexpected exception in waking up job-" + job.getId());
// if we fail wakeup-execution for any reason, avoid release sync-source if there is any
job.setSyncSource(null);
@ -132,11 +130,11 @@ public class VmWorkJobWakeupDispatcher extends AdapterBase implements AsyncJobDi
method.setAccessible(true);
} catch (SecurityException e) {
assert (false);
s_logger.error("Unexpected exception", e);
logger.error("Unexpected exception", e);
return null;
} catch (NoSuchMethodException e) {
assert (false);
s_logger.error("Unexpected exception", e);
logger.error("Unexpected exception", e);
return null;
}

View File

@ -28,7 +28,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity;
@ -53,7 +52,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component(value = "EngineClusterDao")
@Local(value = EngineClusterDao.class)
public class EngineClusterDaoImpl extends GenericDaoBase<EngineClusterVO, Long> implements EngineClusterDao {
private static final Logger s_logger = Logger.getLogger(EngineClusterDaoImpl.class);
protected final SearchBuilder<EngineClusterVO> PodSearch;
protected final SearchBuilder<EngineClusterVO> HyTypeWithoutGuidSearch;
@ -274,7 +272,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase<EngineClusterVO, Long>
int rows = update(vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
EngineClusterVO dbCluster = findByIdIncludingRemoved(vo.getId());
if (dbCluster != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -301,7 +299,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase<EngineClusterVO, Long>
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore");
}
}
return rows > 0;

View File

@ -30,7 +30,6 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
@ -131,7 +130,6 @@ import com.cloud.vm.VmWorkTakeVolumeSnapshot;
import com.cloud.vm.dao.UserVmDao;
public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
private static final Logger s_logger = Logger.getLogger(VolumeOrchestrator.class);
@Inject
EntityManager _entityMgr;
@ -343,8 +341,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (pool == null) {
//pool could not be found in the VM's pod/cluster.
if (s_logger.isDebugEnabled()) {
s_logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid());
if (logger.isDebugEnabled()) {
logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid());
}
StringBuilder addDetails = new StringBuilder(msg);
addDetails.append(", Could not find any storage pool to create Volume in the pod/cluster of the VM ");
@ -361,8 +359,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (pool != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found a suitable pool for create volume: " + pool.getId());
if (logger.isDebugEnabled()) {
logger.debug("Found a suitable pool for create volume: " + pool.getId());
}
break;
}
@ -370,7 +368,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (pool == null) {
s_logger.info(msg);
logger.info(msg);
throw new StorageUnavailableException(msg, -1);
}
@ -389,7 +387,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
_snapshotSrv.syncVolumeSnapshotsToRegionStore(snapVolId, snapStore);
} catch (Exception ex) {
// log but ignore the sync error to avoid any potential S3 down issue, it should be sync next time
s_logger.warn(ex.getMessage(), ex);
logger.warn(ex.getMessage(), ex);
}
}
@ -398,15 +396,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.debug("Failed to create volume from snapshot:" + result.getResult());
logger.debug("Failed to create volume from snapshot:" + result.getResult());
throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult());
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("Failed to create volume from snapshot", e);
logger.debug("Failed to create volume from snapshot", e);
throw new CloudRuntimeException("Failed to create volume from snapshot", e);
} catch (ExecutionException e) {
s_logger.debug("Failed to create volume from snapshot", e);
logger.debug("Failed to create volume from snapshot", e);
throw new CloudRuntimeException("Failed to create volume from snapshot", e);
}
@ -466,15 +464,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.debug("copy volume failed: " + result.getResult());
logger.debug("copy volume failed: " + result.getResult());
throw new CloudRuntimeException("copy volume failed: " + result.getResult());
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("Failed to copy volume: " + volume.getId(), e);
logger.debug("Failed to copy volume: " + volume.getId(), e);
throw new CloudRuntimeException("Failed to copy volume", e);
} catch (ExecutionException e) {
s_logger.debug("Failed to copy volume: " + volume.getId(), e);
logger.debug("Failed to copy volume: " + volume.getId(), e);
throw new CloudRuntimeException("Failed to copy volume", e);
}
}
@ -504,12 +502,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools);
if (pool == null) {
s_logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName());
logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName());
throw new CloudRuntimeException("Unable to find suitable primary storage when creating volume " + volume.getName());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to create " + volume + " on " + pool);
if (logger.isDebugEnabled()) {
logger.debug("Trying to create " + volume + " on " + pool);
}
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
for (int i = 0; i < 2; i++) {
@ -526,20 +524,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeApiResult result = future.get();
if (result.isFailed()) {
if (result.getResult().contains("request template reload") && (i == 0)) {
s_logger.debug("Retry template re-deploy for vmware");
logger.debug("Retry template re-deploy for vmware");
continue;
} else {
s_logger.debug("create volume failed: " + result.getResult());
logger.debug("create volume failed: " + result.getResult());
throw new CloudRuntimeException("create volume failed:" + result.getResult());
}
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.error("create volume failed", e);
logger.error("create volume failed", e);
throw new CloudRuntimeException("create volume failed", e);
} catch (ExecutionException e) {
s_logger.error("create volume failed", e);
logger.error("create volume failed", e);
throw new CloudRuntimeException("create volume failed", e);
}
}
@ -674,10 +672,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (rootDisksize != null ) {
rootDisksize = rootDisksize * 1024 * 1024 * 1024;
if (rootDisksize > size) {
s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
size = rootDisksize;
} else {
s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template");
logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template");
}
}
@ -818,8 +816,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
Long volTemplateId = existingVolume.getTemplateId();
long vmTemplateId = vm.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId
if (logger.isDebugEnabled()) {
logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId
+ ", updating templateId in the new Volume");
}
templateIdToUse = vmTemplateId;
@ -833,16 +831,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
stateTransitTo(existingVolume, Volume.Event.DestroyRequested);
} catch (NoTransitionException e) {
s_logger.debug("Unable to destroy existing volume: " + e.toString());
logger.debug("Unable to destroy existing volume: " + e.toString());
}
// In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk
if (vm.getHypervisorType() == HypervisorType.VMware) {
s_logger.info("Expunging volume " + existingVolume.getId() + " from primary data store");
logger.info("Expunging volume " + existingVolume.getId() + " from primary data store");
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId()));
try {
future.get();
} catch (Exception e) {
s_logger.debug("Failed to expunge volume:" + existingVolume.getId(), e);
logger.debug("Failed to expunge volume:" + existingVolume.getId(), e);
}
}
@ -859,8 +857,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override
@DB
public void cleanupVolumes(long vmId) throws ConcurrentOperationException {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cleaning storage for vm: " + vmId);
if (logger.isDebugEnabled()) {
logger.debug("Cleaning storage for vm: " + vmId);
}
final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
final List<VolumeVO> toBeExpunged = new ArrayList<VolumeVO>();
@ -875,12 +873,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (!volumeAlreadyDestroyed) {
volService.destroyVolume(vol.getId());
} else {
s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
}
toBeExpunged.add(vol);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detaching " + vol);
if (logger.isDebugEnabled()) {
logger.debug("Detaching " + vol);
}
_volsDao.detachVolume(vol.getId());
}
@ -894,9 +892,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
future.get();
} catch (InterruptedException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
logger.debug("failed expunge volume" + expunge.getId(), e);
} catch (ExecutionException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
logger.debug("failed expunge volume" + expunge.getId(), e);
}
}
}
@ -938,7 +936,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.error("Migrate volume failed:" + result.getResult());
logger.error("Migrate volume failed:" + result.getResult());
throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
} else {
// update the volumeId for snapshots on secondary
@ -949,10 +947,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("migrate volume failed", e);
logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
} catch (ExecutionException e) {
s_logger.debug("migrate volume failed", e);
logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
}
}
@ -964,15 +962,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.debug("migrate volume failed:" + result.getResult());
logger.debug("migrate volume failed:" + result.getResult());
return null;
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("migrate volume failed", e);
logger.debug("migrate volume failed", e);
return null;
} catch (ExecutionException e) {
s_logger.debug("migrate volume failed", e);
logger.debug("migrate volume failed", e);
return null;
}
}
@ -1003,13 +1001,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
CommandResult result = future.get();
if (result.isFailed()) {
s_logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
throw new CloudRuntimeException("Failed to migrated vm " + vm + " along with its volumes. ");
}
} catch (InterruptedException e) {
s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
} catch (ExecutionException e) {
s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
}
}
@ -1020,12 +1018,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
for (VolumeVO volume : vols) {
if (volume.getState() != Volume.State.Ready) {
s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state");
logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state");
throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state");
}
if (volume.getPoolId() == destPool.getId()) {
s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId());
logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId());
continue;
}
@ -1033,7 +1031,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (volumesNeedToMigrate.isEmpty()) {
s_logger.debug("No volume need to be migrated");
logger.debug("No volume need to be migrated");
return true;
}
@ -1049,8 +1047,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override
public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Preparing " + vols.size() + " volumes for " + vm);
if (logger.isDebugEnabled()) {
logger.debug("Preparing " + vols.size() + " volumes for " + vm);
}
for (VolumeVO vol : vols) {
@ -1138,21 +1136,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
tasks.add(task);
} else {
if (vol.isRecreatable()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner");
if (logger.isDebugEnabled()) {
logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner");
}
VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
tasks.add(task);
} else {
if (assignedPool.getId() != vol.getPoolId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol);
if (logger.isDebugEnabled()) {
logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol);
}
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
if (diskOffering.getUseLocalStorage()) {
// Currently migration of local volume is not supported so bail out
if (s_logger.isDebugEnabled()) {
s_logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
if (logger.isDebugEnabled()) {
logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
}
throw new CloudRuntimeException("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
} else {
@ -1165,8 +1163,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
storageMigrationEnabled = StorageMigrationEnabled.value();
}
if(storageMigrationEnabled){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
if (logger.isDebugEnabled()) {
logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
}
VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool);
tasks.add(task);
@ -1187,8 +1185,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol,
Volume.class, vol.getId());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
if (logger.isDebugEnabled()) {
logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
}
StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
@ -1205,7 +1203,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
DataStore destPool = null;
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
s_logger.debug("existing pool: " + destPool.getId());
logger.debug("existing pool: " + destPool.getId());
} else {
StoragePool pool = dest.getStorageForDisks().get(vol);
destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
@ -1222,8 +1220,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
dest.getStorageForDisks().put(newVol, poolWithOldVol);
dest.getStorageForDisks().remove(vol);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Created new volume " + newVol + " for old volume " + vol);
if (logger.isDebugEnabled()) {
logger.debug("Created new volume " + newVol + " for old volume " + vol);
}
}
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
@ -1245,7 +1243,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
if (templ == null) {
s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
}
@ -1271,10 +1269,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
result = future.get();
if (result.isFailed()) {
if (result.getResult().contains("request template reload") && (i == 0)) {
s_logger.debug("Retry template re-deploy for vmware");
logger.debug("Retry template re-deploy for vmware");
continue;
} else {
s_logger.debug("Unable to create " + newVol + ":" + result.getResult());
logger.debug("Unable to create " + newVol + ":" + result.getResult());
throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId());
}
}
@ -1291,10 +1289,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
newVol = _volsDao.findById(newVol.getId());
break; //break out of template-redeploy retry loop
} catch (InterruptedException e) {
s_logger.error("Unable to create " + newVol, e);
logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
} catch (ExecutionException e) {
s_logger.error("Unable to create " + newVol, e);
logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
}
}
@ -1306,8 +1304,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException {
if (dest == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm);
if (logger.isDebugEnabled()) {
logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm);
}
throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm);
}
@ -1318,8 +1316,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
if (logger.isDebugEnabled()) {
logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
}
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
@ -1398,7 +1396,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
if (volume.getState().equals(Volume.State.Creating)) {
s_logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop");
logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop");
_volsDao.remove(volume.getId());
}
}
@ -1413,11 +1411,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (volume.getState() == Volume.State.Migrating) {
VolumeVO duplicateVol = _volsDao.findByPoolIdName(destPoolId, volume.getName());
if (duplicateVol != null) {
s_logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId);
logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId);
_volsDao.remove(duplicateVol.getId());
}
s_logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId);
logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId);
volume.setState(Volume.State.Ready);
_volsDao.update(volumeId, volume);
}
@ -1428,7 +1426,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
_snapshotSrv.cleanupVolumeDuringSnapshotFailure(volumeId, snapshotId);
VolumeVO volume = _volsDao.findById(volumeId);
if (volume.getState() == Volume.State.Snapshotting) {
s_logger.debug("change volume state back to Ready: " + volume.getId());
logger.debug("change volume state back to Ready: " + volume.getId());
volume.setState(Volume.State.Ready);
_volsDao.update(volume.getId(), volume);
}
@ -1453,7 +1451,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId());
}
} catch (Exception e) {
s_logger.debug("clean up job failure, will continue", e);
logger.debug("clean up job failure, will continue", e);
}
}
}
@ -1486,7 +1484,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
//FIXME - why recalculate and not decrement
_resourceLimitMgr.recalculateResourceCount(volume.getAccountId(), volume.getDomainId(), ResourceType.primary_storage.getOrdinal());
} catch (Exception e) {
s_logger.debug("Failed to destroy volume" + volume.getId(), e);
logger.debug("Failed to destroy volume" + volume.getId(), e);
throw new CloudRuntimeException("Failed to destroy volume" + volume.getId(), e);
}
}
@ -1517,7 +1515,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
needUpdate = true;
if (needUpdate) {
s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);
logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);
vol.setPath(path);
vol.setChainInfo(chainInfo);
_volsDao.update(volumeId, vol);

View File

@ -28,7 +28,6 @@ import java.util.Map.Entry;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -52,7 +51,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {CapacityDao.class})
public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements CapacityDao {
private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
private static final String SUBTRACT_ALLOCATED_SQL =
@ -523,7 +521,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception updating capacity for host: " + hostId, e);
logger.warn("Exception updating capacity for host: " + hostId, e);
}
}
@ -988,7 +986,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
}
pstmt.executeUpdate();
} catch (Exception e) {
s_logger.warn("Error updating CapacityVO", e);
logger.warn("Error updating CapacityVO", e);
}
}
@ -1008,7 +1006,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
return rs.getFloat(1);
}
} catch (Exception e) {
s_logger.warn("Error checking cluster threshold", e);
logger.warn("Error checking cluster threshold", e);
}
return 0;
}

View File

@ -18,7 +18,6 @@ package com.cloud.certificate.dao;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.certificate.CertificateVO;
@ -30,7 +29,6 @@ import com.cloud.utils.db.GenericDaoBase;
@DB
public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long> implements CertificateDao {
private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class);
public CertificateDaoImpl() {
@ -44,7 +42,7 @@ public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long> impl
update(cert.getId(), cert);
return cert.getId();
} catch (Exception e) {
s_logger.warn("Unable to read the certificate: " + e);
logger.warn("Unable to read the certificate: " + e);
return new Long(0);
}
}

View File

@ -22,7 +22,6 @@ import java.util.List;
import javax.annotation.PostConstruct;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.cluster.agentlb.HostTransferMapVO;
@ -36,7 +35,6 @@ import com.cloud.utils.db.SearchCriteria;
@Local(value = {HostTransferMapDao.class})
@DB
public class HostTransferMapDaoImpl extends GenericDaoBase<HostTransferMapVO, Long> implements HostTransferMapDao {
private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
protected SearchBuilder<HostTransferMapVO> AllFieldsSearch;
protected SearchBuilder<HostTransferMapVO> IntermediateStateSearch;

View File

@ -26,7 +26,6 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.TableGenerator;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.DataCenterDetailVO;
@ -58,7 +57,6 @@ import com.cloud.utils.net.NetUtils;
@Component
@Local(value = {DataCenterDao.class})
public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implements DataCenterDao {
private static final Logger s_logger = Logger.getLogger(DataCenterDaoImpl.class);
protected SearchBuilder<DataCenterVO> NameSearch;
protected SearchBuilder<DataCenterVO> ListZonesByDomainIdSearch;
@ -412,7 +410,7 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
Long dcId = Long.parseLong(tokenOrIdOrName);
return findById(dcId);
} catch (NumberFormatException nfe) {
s_logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe);
logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe);
}
}
}

View File

@ -23,7 +23,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.DataCenterIpAddressVO;
@ -41,7 +40,6 @@ import com.cloud.utils.net.NetUtils;
@Local(value = {DataCenterIpAddressDao.class})
@DB
public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddressVO, Long> implements DataCenterIpAddressDao {
private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class);
private final SearchBuilder<DataCenterIpAddressVO> AllFieldsSearch;
private final GenericSearchBuilder<DataCenterIpAddressVO, Integer> AllIpCount;
@ -144,8 +142,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
@Override
public void releaseIpAddress(String ipAddress, long dcId, Long instanceId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
if (logger.isDebugEnabled()) {
logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
}
SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
sc.setParameters("ip", ipAddress);
@ -162,8 +160,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
@Override
public void releaseIpAddress(long nicId, String reservationId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId);
if (logger.isDebugEnabled()) {
logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId);
}
SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
sc.setParameters("instance", nicId);
@ -178,8 +176,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
@Override
public void releaseIpAddress(long nicId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Releasing ip address for instance=" + nicId);
if (logger.isDebugEnabled()) {
logger.debug("Releasing ip address for instance=" + nicId);
}
SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
sc.setParameters("instance", nicId);

View File

@ -25,7 +25,6 @@ import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.DataCenterLinkLocalIpAddressVO;
@ -43,7 +42,6 @@ import com.cloud.utils.net.NetUtils;
@Local(value = {DataCenterLinkLocalIpAddressDaoImpl.class})
@DB
public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase<DataCenterLinkLocalIpAddressVO, Long> implements DataCenterLinkLocalIpAddressDao {
private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class);
private final SearchBuilder<DataCenterLinkLocalIpAddressVO> AllFieldsSearch;
private final GenericSearchBuilder<DataCenterLinkLocalIpAddressVO, Integer> AllIpCount;
@ -107,8 +105,8 @@ public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase<DataCent
@Override
public void releaseIpAddress(String ipAddress, long dcId, long instanceId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
if (logger.isDebugEnabled()) {
logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
}
SearchCriteria<DataCenterLinkLocalIpAddressVO> sc = AllFieldsSearch.create();
sc.setParameters("ip", ipAddress);

View File

@ -25,7 +25,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.HostPodVO;
@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {HostPodDao.class})
public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements HostPodDao {
private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class);
protected SearchBuilder<HostPodVO> DataCenterAndNameSearch;
protected SearchBuilder<HostPodVO> DataCenterIdSearch;
@ -102,7 +100,7 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
currentPodCidrSubnets.put(podId, cidrPair);
}
} catch (SQLException ex) {
s_logger.warn("DB exception " + ex.getMessage(), ex);
logger.warn("DB exception " + ex.getMessage(), ex);
return null;
}

View File

@ -25,7 +25,6 @@ import java.util.Set;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.domain.Domain;
@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {DomainDao.class})
public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements DomainDao {
private static final Logger s_logger = Logger.getLogger(DomainDaoImpl.class);
protected SearchBuilder<DomainVO> DomainNameLikeSearch;
protected SearchBuilder<DomainVO> ParentDomainNameLikeSearch;
@ -112,7 +110,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
DomainVO parentDomain = findById(parent);
if (parentDomain == null) {
s_logger.error("Unable to load parent domain: " + parent);
logger.error("Unable to load parent domain: " + parent);
return null;
}
@ -122,7 +120,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
parentDomain = this.lockRow(parent, true);
if (parentDomain == null) {
s_logger.error("Unable to lock parent domain: " + parent);
logger.error("Unable to lock parent domain: " + parent);
return null;
}
@ -137,7 +135,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
txn.commit();
return domain;
} catch (Exception e) {
s_logger.error("Unable to create domain due to " + e.getMessage(), e);
logger.error("Unable to create domain due to " + e.getMessage(), e);
txn.rollback();
return null;
}
@ -148,23 +146,23 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
public boolean remove(Long id) {
// check for any active users / domains assigned to the given domain id and don't remove the domain if there are any
if (id != null && id.longValue() == Domain.ROOT_DOMAIN) {
s_logger.error("Can not remove domain " + id + " as it is ROOT domain");
logger.error("Can not remove domain " + id + " as it is ROOT domain");
return false;
} else {
if(id == null) {
s_logger.error("Can not remove domain without id.");
logger.error("Can not remove domain without id.");
return false;
}
}
DomainVO domain = findById(id);
if (domain == null) {
s_logger.info("Unable to remove domain as domain " + id + " no longer exists");
logger.info("Unable to remove domain as domain " + id + " no longer exists");
return true;
}
if (domain.getParent() == null) {
s_logger.error("Invalid domain " + id + ", orphan?");
logger.error("Invalid domain " + id + ", orphan?");
return false;
}
@ -177,7 +175,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
txn.start();
DomainVO parentDomain = super.lockRow(domain.getParent(), true);
if (parentDomain == null) {
s_logger.error("Unable to load parent domain: " + domain.getParent());
logger.error("Unable to load parent domain: " + domain.getParent());
return false;
}
@ -198,7 +196,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
txn.commit();
} catch (SQLException ex) {
success = false;
s_logger.error("error removing domain: " + id, ex);
logger.error("error removing domain: " + id, ex);
txn.rollback();
}
return success;

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.event.Event.State;
@ -36,7 +35,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {EventDao.class})
public class EventDaoImpl extends GenericDaoBase<EventVO, Long> implements EventDao {
public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName());
protected final SearchBuilder<EventVO> CompletedEventSearch;
protected final SearchBuilder<EventVO> ToArchiveOrDeleteEventSearch;

View File

@ -26,7 +26,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.Vlan;
@ -44,7 +43,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {UsageEventDao.class})
public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implements UsageEventDao {
public static final Logger s_logger = Logger.getLogger(UsageEventDaoImpl.class.getName());
private final SearchBuilder<UsageEventVO> latestEventsSearch;
private final SearchBuilder<UsageEventVO> IpeventsSearch;
@ -103,8 +101,8 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
// Copy events from cloud db to usage db
String sql = COPY_EVENTS;
if (recentEventId == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("no recent event date, copying all events");
if (logger.isDebugEnabled()) {
logger.debug("no recent event date, copying all events");
}
sql = COPY_ALL_EVENTS;
}
@ -122,7 +120,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error copying events from cloud db to usage db", ex);
logger.error("error copying events from cloud db to usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();
@ -131,8 +129,8 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
// Copy event details from cloud db to usage db
sql = COPY_EVENT_DETAILS;
if (recentEventId == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("no recent event date, copying all event detailss");
if (logger.isDebugEnabled()) {
logger.debug("no recent event date, copying all event detailss");
}
sql = COPY_ALL_EVENT_DETAILS;
}
@ -150,7 +148,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error copying event details from cloud db to usage db", ex);
logger.error("error copying event details from cloud db to usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();
@ -173,7 +171,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
}
return 0;
} catch (Exception ex) {
s_logger.error("error getting most recent event id", ex);
logger.error("error getting most recent event id", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();
@ -185,7 +183,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
try {
return listLatestEvents(endDate);
} catch (Exception ex) {
s_logger.error("error getting most recent event date", ex);
logger.error("error getting most recent event date", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();
@ -205,7 +203,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
}
return 0;
} catch (Exception ex) {
s_logger.error("error getting max event id", ex);
logger.error("error getting max event id", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();

View File

@ -21,7 +21,6 @@ import java.util.Map;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.event.UsageEventDetailsVO;
@ -33,7 +32,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageEventDetailsDao.class})
public class UsageEventDetailsDaoImpl extends GenericDaoBase<UsageEventDetailsVO, Long> implements UsageEventDetailsDao {
public static final Logger s_logger = Logger.getLogger(UsageEventDetailsDaoImpl.class.getName());
protected final SearchBuilder<UsageEventDetailsVO> EventDetailsSearch;
protected final SearchBuilder<UsageEventDetailsVO> DetailSearch;

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.gpu.HostGpuGroupsVO;
@ -32,7 +31,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = HostGpuGroupsDao.class)
public class HostGpuGroupsDaoImpl extends GenericDaoBase<HostGpuGroupsVO, Long> implements HostGpuGroupsDao {
private static final Logger s_logger = Logger.getLogger(HostGpuGroupsDaoImpl.class);
private final SearchBuilder<HostGpuGroupsVO> _hostIdGroupNameSearch;
private final SearchBuilder<HostGpuGroupsVO> _searchByHostId;

View File

@ -28,7 +28,6 @@ import java.util.Map.Entry;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.VgpuTypesInfo;
@ -43,7 +42,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = VGPUTypesDao.class)
public class VGPUTypesDaoImpl extends GenericDaoBase<VGPUTypesVO, Long> implements VGPUTypesDao {
private static final Logger s_logger = Logger.getLogger(VGPUTypesDaoImpl.class);
private final SearchBuilder<VGPUTypesVO> _searchByGroupId;
private final SearchBuilder<VGPUTypesVO> _searchByGroupIdVGPUType;

View File

@ -31,7 +31,6 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.persistence.TableGenerator;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.VgpuTypesInfo;
@ -71,9 +70,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@DB
@TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1)
public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao { //FIXME: , ExternalIdDao {
private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class);
private static final Logger status_logger = Logger.getLogger(Status.class);
private static final Logger state_logger = Logger.getLogger(ResourceState.class);
protected SearchBuilder<HostVO> TypePodDcStatusSearch;
@ -289,7 +285,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
try {
HostTransferSearch = _hostTransferDao.createSearchBuilder();
} catch (Throwable e) {
s_logger.debug("error", e);
logger.debug("error", e);
}
HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL);
UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(),
@ -445,8 +441,8 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
sb.append(" ");
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Following hosts got reset: " + sb.toString());
if (logger.isTraceEnabled()) {
logger.trace("Following hosts got reset: " + sb.toString());
}
}
@ -505,19 +501,19 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Resetting hosts suitable for reconnect");
if (logger.isDebugEnabled()) {
logger.debug("Resetting hosts suitable for reconnect");
}
// reset hosts that are suitable candidates for reconnect
resetHosts(managementServerId, lastPingSecondsAfter);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Completed resetting hosts suitable for reconnect");
if (logger.isDebugEnabled()) {
logger.debug("Completed resetting hosts suitable for reconnect");
}
List<HostVO> assignedHosts = new ArrayList<HostVO>();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Acquiring hosts for clusters already owned by this management server");
if (logger.isDebugEnabled()) {
logger.debug("Acquiring hosts for clusters already owned by this management server");
}
List<Long> clusters = findClustersOwnedByManagementServer(managementServerId);
if (clusters.size() > 0) {
@ -535,17 +531,17 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
sb.append(host.getId());
sb.append(" ");
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString());
if (logger.isTraceEnabled()) {
logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString());
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Completed acquiring hosts for clusters already owned by this management server");
if (logger.isDebugEnabled()) {
logger.debug("Completed acquiring hosts for clusters already owned by this management server");
}
if (assignedHosts.size() < limit) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Acquiring hosts for clusters not owned by any management server");
if (logger.isDebugEnabled()) {
logger.debug("Acquiring hosts for clusters not owned by any management server");
}
// for remaining hosts not owned by any MS check if they can be owned (by owning full cluster)
clusters = findClustersForHostsNotOwnedByAnyManagementServer();
@ -585,12 +581,12 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
break;
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString());
if (logger.isTraceEnabled()) {
logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString());
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Completed acquiring hosts for clusters not owned by any management server");
if (logger.isDebugEnabled()) {
logger.debug("Completed acquiring hosts for clusters not owned by any management server");
}
}
txn.commit();
@ -754,7 +750,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
}
}
} catch (SQLException e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
return result;
}
@ -865,15 +861,15 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
l.add(info);
}
} catch (SQLException e) {
s_logger.debug("SQLException caught", e);
logger.debug("SQLException caught", e);
}
return l;
}
@Override
public long getNextSequence(long hostId) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("getNextSequence(), hostId: " + hostId);
if (logger.isTraceEnabled()) {
logger.trace("getNextSequence(), hostId: " + hostId);
}
TableGenerator tg = _tgs.get("host_req_sq");
@ -953,7 +949,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
HostVO ho = findById(host.getId());
assert ho != null : "How how how? : " + host.getId();
if (status_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString());
str.append(". Name=").append(host.getName());
@ -975,7 +971,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
.append(":old update count=")
.append(oldUpdateCount)
.append("]");
status_logger.debug(str.toString());
logger.debug(str.toString());
} else {
StringBuilder msg = new StringBuilder("Agent status update: [");
msg.append("id = " + host.getId());
@ -985,11 +981,11 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
msg.append("; new status = " + newStatus);
msg.append("; old update count = " + oldUpdateCount);
msg.append("; new update count = " + newUpdateCount + "]");
status_logger.debug(msg.toString());
logger.debug(msg.toString());
}
if (ho.getState() == newStatus) {
status_logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus);
logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus);
return true;
}
}
@ -1015,7 +1011,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
int result = update(ub, sc, null);
assert result <= 1 : "How can this update " + result + " rows? ";
if (state_logger.isDebugEnabled() && result == 0) {
if (logger.isDebugEnabled() && result == 0) {
HostVO ho = findById(host.getId());
assert ho != null : "How how how? : " + host.getId();
@ -1025,7 +1021,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
str.append("; old state = " + oldState);
str.append("; event = " + event);
str.append("; new state = " + newState + "]");
state_logger.debug(str.toString());
logger.debug(str.toString());
} else {
StringBuilder msg = new StringBuilder("Resource state update: [");
msg.append("id = " + host.getId());
@ -1033,7 +1029,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
msg.append("; old state = " + oldState);
msg.append("; event = " + event);
msg.append("; new state = " + newState + "]");
state_logger.debug(msg.toString());
logger.debug(msg.toString());
}
return result > 0;

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria;
@Local(value = HypervisorCapabilitiesDao.class)
public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapabilitiesVO, Long> implements HypervisorCapabilitiesDao {
private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class);
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeSearch;
protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeAndVersionSearch;

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.DB;
@ -33,7 +32,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = FirewallRulesCidrsDao.class)
public class FirewallRulesCidrsDaoImpl extends GenericDaoBase<FirewallRulesCidrsVO, Long> implements FirewallRulesCidrsDao {
private static final Logger s_logger = Logger.getLogger(FirewallRulesCidrsDaoImpl.class);
protected final SearchBuilder<FirewallRulesCidrsVO> CidrsSearch;
protected FirewallRulesCidrsDaoImpl() {

View File

@ -26,7 +26,6 @@ import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.Vlan.VlanType;
@ -50,7 +49,6 @@ import com.cloud.utils.net.Ip;
@Local(value = {IPAddressDao.class})
@DB
public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implements IPAddressDao {
private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class);
protected SearchBuilder<IPAddressVO> AllFieldsSearch;
protected SearchBuilder<IPAddressVO> VlanDbIdSearchUnallocated;
@ -322,7 +320,7 @@ public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implemen
ipCount = rs.getInt(1);
}
} catch (Exception e) {
s_logger.warn("Exception counting IP addresses", e);
logger.warn("Exception counting IP addresses", e);
}
return ipCount;

View File

@ -23,7 +23,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.DB;
@ -38,7 +37,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Local(value = PortProfileDao.class)
@DB()
public class PortProfileDaoImpl extends GenericDaoBase<PortProfileVO, Long> implements PortProfileDao {
protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class);
final SearchBuilder<PortProfileVO> nameSearch;
final SearchBuilder<PortProfileVO> accessVlanSearch;

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.RemoteAccessVpn;
@ -31,7 +30,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = {RemoteAccessVpnDao.class})
public class RemoteAccessVpnDaoImpl extends GenericDaoBase<RemoteAccessVpnVO, Long> implements RemoteAccessVpnDao {
private static final Logger s_logger = Logger.getLogger(RemoteAccessVpnDaoImpl.class);
private final SearchBuilder<RemoteAccessVpnVO> AllFieldsSearch;

View File

@ -22,7 +22,6 @@ import javax.annotation.PostConstruct;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = {Site2SiteVpnConnectionDao.class})
public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase<Site2SiteVpnConnectionVO, Long> implements Site2SiteVpnConnectionDao {
private static final Logger s_logger = Logger.getLogger(Site2SiteVpnConnectionDaoImpl.class);
@Inject
protected IPAddressDao _addrDao;

View File

@ -19,7 +19,6 @@ package com.cloud.network.dao;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
@ -32,7 +31,6 @@ public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase<Site2SiteVpnGatew
@Inject
protected IPAddressDao _addrDao;
private static final Logger s_logger = Logger.getLogger(Site2SiteVpnGatewayDaoImpl.class);
private final SearchBuilder<Site2SiteVpnGatewayVO> AllFieldsSearch;

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.UserIpv6AddressVO;
@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria.Op;
@Component
@Local(value = UserIpv6AddressDao.class)
public class UserIpv6AddressDaoImpl extends GenericDaoBase<UserIpv6AddressVO, Long> implements UserIpv6AddressDao {
private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class);
protected final SearchBuilder<UserIpv6AddressVO> AllFieldsSearch;
protected GenericSearchBuilder<UserIpv6AddressVO, Long> CountFreePublicIps;

View File

@ -27,7 +27,6 @@ import java.util.Set;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.security.VmRulesetLogVO;
@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {VmRulesetLogDao.class})
public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> implements VmRulesetLogDao {
protected static final Logger s_logger = Logger.getLogger(VmRulesetLogDaoImpl.class);
private SearchBuilder<VmRulesetLogVO> VmIdSearch;
private String InsertOrUpdateSQl = "INSERT INTO op_vm_ruleset_log (instance_id, created, logsequence) "
+ " VALUES(?, now(), 1) ON DUPLICATE KEY UPDATE logsequence=logsequence+1";
@ -100,19 +98,19 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
} catch (SQLTransactionRollbackException e1) {
if (i < maxTries - 1) {
int delayMs = (i + 1) * 1000;
s_logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs);
logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs);
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
s_logger.debug("[ignored] interupted while inserting security group rule log.");
logger.debug("[ignored] interupted while inserting security group rule logger.");
}
} else
s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("Inserted or updated " + numUpdated + " rows");
if (logger.isTraceEnabled()) {
logger.trace("Inserted or updated " + numUpdated + " rows");
}
return numUpdated;
}
@ -136,8 +134,8 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
vmIds.add(vmId);
}
int numUpdated = executeWithRetryOnDeadlock(txn, pstmt, vmIds);
if (s_logger.isTraceEnabled()) {
s_logger.trace("Inserted or updated " + numUpdated + " rows");
if (logger.isTraceEnabled()) {
logger.trace("Inserted or updated " + numUpdated + " rows");
}
if (numUpdated > 0)
count += stmtSize;
@ -147,7 +145,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
}
} catch (SQLException sqe) {
s_logger.warn("Failed to execute multi insert ", sqe);
logger.warn("Failed to execute multi insert ", sqe);
}
return count;
@ -175,10 +173,10 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
queryResult = stmtInsert.executeBatch();
txn.commit();
if (s_logger.isTraceEnabled())
s_logger.trace("Updated or inserted " + workItems.size() + " log items");
if (logger.isTraceEnabled())
logger.trace("Updated or inserted " + workItems.size() + " log items");
} catch (SQLException e) {
s_logger.warn("Failed to execute batch update statement for ruleset log: ", e);
logger.warn("Failed to execute batch update statement for ruleset log: ", e);
txn.rollback();
success = false;
}
@ -187,7 +185,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
workItems.toArray(arrayItems);
for (int i = 0; i < queryResult.length; i++) {
if (queryResult[i] < 0) {
s_logger.debug("Batch query update failed for vm " + arrayItems[i]);
logger.debug("Batch query update failed for vm " + arrayItems[i]);
}
}
}

View File

@ -23,7 +23,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.vpc.NetworkACLItemCidrsDao;
@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = NetworkACLItemCidrsDao.class)
public class NetworkACLItemCidrsDaoImpl extends GenericDaoBase<NetworkACLItemCidrsVO, Long> implements NetworkACLItemCidrsDao {
private static final Logger s_logger = Logger.getLogger(NetworkACLItemCidrsDaoImpl.class);
protected final SearchBuilder<NetworkACLItemCidrsVO> cidrsSearch;
protected NetworkACLItemCidrsDaoImpl() {

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.vpc.NetworkACLItem.State;
@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Local(value = NetworkACLItemDao.class)
@DB()
public class NetworkACLItemDaoImpl extends GenericDaoBase<NetworkACLItemVO, Long> implements NetworkACLItemDao {
private static final Logger s_logger = Logger.getLogger(NetworkACLItemDaoImpl.class);
protected final SearchBuilder<NetworkACLItemVO> AllFieldsSearch;
protected final SearchBuilder<NetworkACLItemVO> NotRevokedSearch;

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.network.vpc.PrivateIpVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Local(value = PrivateIpDao.class)
@DB()
public class PrivateIpDaoImpl extends GenericDaoBase<PrivateIpVO, Long> implements PrivateIpDao {
private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class);
private final SearchBuilder<PrivateIpVO> AllFieldsSearch;
private final GenericSearchBuilder<PrivateIpVO, Integer> CountAllocatedByNetworkId;
@ -92,8 +90,8 @@ public class PrivateIpDaoImpl extends GenericDaoBase<PrivateIpVO, Long> implemen
@Override
public void releaseIpAddress(String ipAddress, long networkId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId);
if (logger.isDebugEnabled()) {
logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId);
}
SearchCriteria<PrivateIpVO> sc = AllFieldsSearch.create();
sc.setParameters("ip", ipAddress);

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.projects.ProjectAccount;
@ -39,7 +38,6 @@ public class ProjectAccountDaoImpl extends GenericDaoBase<ProjectAccountVO, Long
final GenericSearchBuilder<ProjectAccountVO, Long> AdminSearch;
final GenericSearchBuilder<ProjectAccountVO, Long> ProjectAccountSearch;
final GenericSearchBuilder<ProjectAccountVO, Long> CountByRoleSearch;
public static final Logger s_logger = Logger.getLogger(ProjectAccountDaoImpl.class.getName());
protected ProjectAccountDaoImpl() {
AllFieldsSearch = createSearchBuilder();
@ -150,7 +148,7 @@ public class ProjectAccountDaoImpl extends GenericDaoBase<ProjectAccountVO, Long
int rowsRemoved = remove(sc);
if (rowsRemoved > 0) {
s_logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects");
logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects");
}
}

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.projects.Project;
@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {ProjectDao.class})
public class ProjectDaoImpl extends GenericDaoBase<ProjectVO, Long> implements ProjectDao {
private static final Logger s_logger = Logger.getLogger(ProjectDaoImpl.class);
protected final SearchBuilder<ProjectVO> AllFieldsSearch;
protected GenericSearchBuilder<ProjectVO, Long> CountByDomain;
protected GenericSearchBuilder<ProjectVO, Long> ProjectAccountSearch;
@ -79,7 +77,7 @@ public class ProjectDaoImpl extends GenericDaoBase<ProjectVO, Long> implements P
ProjectVO projectToRemove = findById(projectId);
projectToRemove.setName(null);
if (!update(projectId, projectToRemove)) {
s_logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove");
logger.warn("Failed to reset name for the project id=" + projectId + " as a part of project remove");
return false;
}

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.projects.ProjectInvitation.State;
@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = {ProjectInvitationDao.class})
public class ProjectInvitationDaoImpl extends GenericDaoBase<ProjectInvitationVO, Long> implements ProjectInvitationDao {
private static final Logger s_logger = Logger.getLogger(ProjectInvitationDaoImpl.class);
protected final SearchBuilder<ProjectInvitationVO> AllFieldsSearch;
protected final SearchBuilder<ProjectInvitationVO> InactiveSearch;
@ -91,7 +89,7 @@ public class ProjectInvitationDaoImpl extends GenericDaoBase<ProjectInvitationVO
for (ProjectInvitationVO invitationToExpire : invitationsToExpire) {
invitationToExpire.setState(State.Expired);
if (!update(invitationToExpire.getId(), invitationToExpire)) {
s_logger.warn("Fail to expire invitation " + invitationToExpire.toString());
logger.warn("Fail to expire invitation " + invitationToExpire.toString());
success = false;
}
}
@ -113,7 +111,7 @@ public class ProjectInvitationDaoImpl extends GenericDaoBase<ProjectInvitationVO
sc.setParameters("id", id);
if (findOneBy(sc) == null) {
s_logger.warn("Unable to find project invitation by id " + id);
logger.warn("Unable to find project invitation by id " + id);
return false;
}
@ -165,7 +163,7 @@ public class ProjectInvitationDaoImpl extends GenericDaoBase<ProjectInvitationVO
sc.setParameters("projectId", projectId);
int numberRemoved = remove(sc);
s_logger.debug("Removed " + numberRemoved + " invitations for project id=" + projectId);
logger.debug("Removed " + numberRemoved + " invitations for project id=" + projectId);
}
}

View File

@ -24,7 +24,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.storage.LaunchPermissionVO;
@ -41,7 +40,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {LaunchPermissionDao.class})
public class LaunchPermissionDaoImpl extends GenericDaoBase<LaunchPermissionVO, Long> implements LaunchPermissionDao {
private static final Logger s_logger = Logger.getLogger(LaunchPermissionDaoImpl.class);
private static final String REMOVE_LAUNCH_PERMISSION = "DELETE FROM `cloud`.`launch_permission`" + " WHERE template_id = ? AND account_id = ?";
private static final String LIST_PERMITTED_TEMPLATES =
@ -82,7 +80,7 @@ public class LaunchPermissionDaoImpl extends GenericDaoBase<LaunchPermissionVO,
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error removing launch permissions", e);
logger.warn("Error removing launch permissions", e);
throw new CloudRuntimeException("Error removing launch permissions", e);
}
}
@ -147,7 +145,7 @@ public class LaunchPermissionDaoImpl extends GenericDaoBase<LaunchPermissionVO,
permittedTemplates.add(template);
}
} catch (Exception e) {
s_logger.warn("Error listing permitted templates", e);
logger.warn("Error listing permitted templates", e);
}
return permittedTemplates;
}

View File

@ -24,7 +24,6 @@ import javax.annotation.PostConstruct;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.server.ResourceTag.ResourceObjectType;
@ -53,7 +52,6 @@ import com.cloud.vm.dao.VMInstanceDao;
@Component
@Local(value = {SnapshotDao.class})
public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements SnapshotDao {
public static final Logger s_logger = Logger.getLogger(SnapshotDaoImpl.class.getName());
// TODO: we should remove these direct sqls
private static final String GET_LAST_SNAPSHOT =
"SELECT snapshots.id FROM snapshot_store_ref, snapshots where snapshots.id = snapshot_store_ref.snapshot_id AND snapshosts.volume_id = ? AND snapshot_store_ref.role = ? ORDER BY created DESC";
@ -208,7 +206,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
return rs.getLong(1);
}
} catch (Exception ex) {
s_logger.info("[ignored]"
logger.info("[ignored]"
+ "caught something while getting sec. host id: " + ex.getLocalizedMessage());
}
return null;
@ -228,7 +226,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
return rs.getLong(1);
}
} catch (Exception ex) {
s_logger.error("error getting last snapshot", ex);
logger.error("error getting last snapshot", ex);
}
return 0;
}
@ -246,7 +244,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
pstmt.executeUpdate();
return 1;
} catch (Exception ex) {
s_logger.error("error getting last snapshot", ex);
logger.error("error getting last snapshot", ex);
}
return 0;
}
@ -263,7 +261,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
pstmt.executeUpdate();
return 1;
} catch (Exception ex) {
s_logger.error("error set secondary storage host id", ex);
logger.error("error set secondary storage host id", ex);
}
return 0;
}

View File

@ -24,7 +24,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.host.Status;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {StoragePoolHostDao.class})
public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Long> implements StoragePoolHostDao {
public static final Logger s_logger = Logger.getLogger(StoragePoolHostDaoImpl.class.getName());
protected final SearchBuilder<StoragePoolHostVO> PoolSearch;
protected final SearchBuilder<StoragePoolHostVO> HostSearch;
@ -115,10 +113,10 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
result.add(findById(id));
}
}catch (SQLException e) {
s_logger.warn("listByHostStatus:Exception: ", e);
logger.warn("listByHostStatus:Exception: ", e);
}
} catch (Exception e) {
s_logger.warn("listByHostStatus:Exception: ", e);
logger.warn("listByHostStatus:Exception: ", e);
}
return result;
}
@ -138,7 +136,7 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
l.add(new Pair<Long, Integer>(rs.getLong(1), rs.getInt(2)));
}
} catch (SQLException e) {
s_logger.debug("SQLException: ", e);
logger.debug("SQLException: ", e);
}
return l;
}

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.storage.Upload.Mode;
@ -33,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = {UploadDao.class})
public class UploadDaoImpl extends GenericDaoBase<UploadVO, Long> implements UploadDao {
public static final Logger s_logger = Logger.getLogger(UploadDaoImpl.class.getName());
protected final SearchBuilder<UploadVO> typeUploadStatusSearch;
protected final SearchBuilder<UploadVO> typeHostAndUploadStatusSearch;
protected final SearchBuilder<UploadVO> typeModeAndStatusSearch;

View File

@ -30,7 +30,6 @@ import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.dc.dao.DataCenterDao;
@ -65,7 +64,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {VMTemplateDao.class})
public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implements VMTemplateDao {
private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class);
@Inject
VMTemplateZoneDao _templateZoneDao;
@ -234,7 +232,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
l.add(rs.getLong(1));
}
} catch (SQLException e) {
s_logger.debug("Exception: ", e);
logger.debug("Exception: ", e);
}
return l;
}
@ -288,7 +286,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
routerTmpltName = (String)params.get("routing.uniquename");
s_logger.debug("Found parameter routing unique name " + routerTmpltName);
logger.debug("Found parameter routing unique name " + routerTmpltName);
if (routerTmpltName == null) {
routerTmpltName = "routing";
}
@ -297,8 +295,8 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
if (consoleProxyTmpltName == null) {
consoleProxyTmpltName = "routing";
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Use console proxy template : " + consoleProxyTmpltName);
if (logger.isDebugEnabled()) {
logger.debug("Use console proxy template : " + consoleProxyTmpltName);
}
UniqueNameSearch = createSearchBuilder();
@ -512,10 +510,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
* (rs.next()) { Pair<Long, Long> templateZonePair = new Pair<Long,
* Long>(rs.getLong(1), -1L); templateZonePairList.add(templateZonePair); }
*
* } catch (Exception e) { s_logger.warn("Error listing templates", e); }
* } catch (Exception e) { logger.warn("Error listing templates", e); }
* finally { try { if (rs != null) { rs.close(); } if (pstmt != null) {
* pstmt.close(); } txn.commit(); } catch (SQLException sqle) {
* s_logger.warn("Error in cleaning up", sqle); } }
* logger.warn("Error in cleaning up", sqle); } }
*
* return templateZonePairList; }
*
@ -696,9 +694,9 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
* null)); continue; } else if (keyword == null && name == null){
* templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(),
* null)); } } } } } catch (Exception e) {
* s_logger.warn("Error listing templates", e); } finally { try { if (rs !=
* logger.warn("Error listing templates", e); } finally { try { if (rs !=
* null) { rs.close(); } if (pstmt != null) { pstmt.close(); } txn.commit();
* } catch( SQLException sqle) { s_logger.warn("Error in cleaning up",
* } catch( SQLException sqle) { logger.warn("Error in cleaning up",
* sqle); } }
*
* return templateZonePairList; }
@ -1021,7 +1019,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
* while (rs.next()) { final Pair<Long, Long> templateZonePair = new
* Pair<Long, Long>( rs.getLong(1), -1L);
* templateZonePairList.add(templateZonePair); } txn.commit(); } catch
* (Exception e) { s_logger.warn("Error listing S3 templates", e); if (txn
* (Exception e) { logger.warn("Error listing S3 templates", e); if (txn
* != null) { txn.rollback(); } } finally { closeResources(pstmt, rs); if
* (txn != null) { txn.close(); } }
*
@ -1050,7 +1048,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
builder.set(vo, "updated", new Date());
int rows = update((VMTemplateVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VMTemplateVO dbTemplate = findByIdIncludingRemoved(vo.getId());
if (dbTemplate != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -1083,7 +1081,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore");
logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore");
}
}
return rows > 0;

View File

@ -29,7 +29,6 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -53,7 +52,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
@Local(value = {VMTemplateHostDao.class})
public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long> implements VMTemplateHostDao {
public static final Logger s_logger = Logger.getLogger(VMTemplateHostDaoImpl.class.getName());
@Inject
HostDao _hostDao;
protected final SearchBuilder<VMTemplateHostVO> HostSearch;
@ -172,7 +170,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long
pstmt.setLong(8, instance.getTemplateId());
pstmt.executeUpdate();
} catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
}
@ -241,7 +239,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long
result.add(toEntityBean(rs, false));
}
} catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
return result;
}
@ -273,10 +271,10 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long
result.add(findById(id));
}
}catch (SQLException e) {
s_logger.warn("listByTemplateStatus:Exception: "+e.getMessage(), e);
logger.warn("listByTemplateStatus:Exception: "+e.getMessage(), e);
}
} catch (Exception e) {
s_logger.warn("listByTemplateStatus:Exception: "+e.getMessage(), e);
logger.warn("listByTemplateStatus:Exception: "+e.getMessage(), e);
}
return result;
@ -391,7 +389,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long
builder.set(vo, "updated", new Date());
int rows = update((VMTemplateHostVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VMTemplateHostVO dbVol = findByIdIncludingRemoved(templateHost.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -424,7 +422,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase<VMTemplateHostVO, Long
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + templateHost.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + templateHost.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;

View File

@ -24,7 +24,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -44,7 +43,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
@Local(value = {VMTemplatePoolDao.class})
public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolVO, Long> implements VMTemplatePoolDao {
public static final Logger s_logger = Logger.getLogger(VMTemplatePoolDaoImpl.class.getName());
protected final SearchBuilder<VMTemplateStoragePoolVO> PoolSearch;
protected final SearchBuilder<VMTemplateStoragePoolVO> TemplateSearch;
@ -160,7 +158,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
result.add(toEntityBean(rs, false));
}
} catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
return result;
@ -184,10 +182,10 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
result.add(findById(id));
}
}catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
} catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
return result;
@ -208,10 +206,10 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
result.add(findById(id));
}
}catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
} catch (Exception e) {
s_logger.warn("Exception: ", e);
logger.warn("Exception: ", e);
}
return result;
@ -259,7 +257,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
builder.set(vo, "updated", new Date());
int rows = update((VMTemplateStoragePoolVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VMTemplateStoragePoolVO dbVol = findByIdIncludingRemoved(templatePool.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -292,7 +290,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase<VMTemplateStoragePoolV
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.storage.VMTemplateZoneVO;
@ -32,7 +31,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {VMTemplateZoneDao.class})
public class VMTemplateZoneDaoImpl extends GenericDaoBase<VMTemplateZoneVO, Long> implements VMTemplateZoneDao {
public static final Logger s_logger = Logger.getLogger(VMTemplateZoneDaoImpl.class.getName());
protected final SearchBuilder<VMTemplateZoneVO> ZoneSearch;
protected final SearchBuilder<VMTemplateZoneVO> TemplateSearch;

View File

@ -26,7 +26,6 @@ import java.util.List;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.exception.InvalidParameterValueException;
@ -55,7 +54,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = VolumeDao.class)
public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements VolumeDao {
private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class);
protected final SearchBuilder<VolumeVO> DetachedAccountIdSearch;
protected final SearchBuilder<VolumeVO> TemplateZoneSearch;
protected final GenericSearchBuilder<VolumeVO, SumCount> TotalSizeByPoolSearch;
@ -268,7 +266,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
else if (scope == ScopeType.ZONE)
sql = SELECT_HYPERTYPE_FROM_ZONE_VOLUME;
else
s_logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId);
logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId);
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, volumeId);
@ -297,7 +295,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
} else if (type.equals(HypervisorType.VMware)) {
return ImageFormat.OVA;
} else {
s_logger.warn("Do not support hypervisor " + type.toString());
logger.warn("Do not support hypervisor " + type.toString());
return null;
}
}
@ -483,7 +481,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
builder.set(vo, "updated", new Date());
int rows = update((VolumeVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VolumeVO dbVol = findByIdIncludingRemoved(vo.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -516,7 +514,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore");
logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore");
}
}
return rows > 0;

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -38,7 +37,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
@Local(value = {VolumeHostDao.class})
public class VolumeHostDaoImpl extends GenericDaoBase<VolumeHostVO, Long> implements VolumeHostDao {
private static final Logger s_logger = Logger.getLogger(VolumeHostDaoImpl.class);
protected final SearchBuilder<VolumeHostVO> HostVolumeSearch;
protected final SearchBuilder<VolumeHostVO> ZoneVolumeSearch;
protected final SearchBuilder<VolumeHostVO> VolumeSearch;
@ -141,7 +139,7 @@ public class VolumeHostDaoImpl extends GenericDaoBase<VolumeHostVO, Long> implem
builder.set(vo, "updated", new Date());
int rows = update((VolumeHostVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VolumeHostVO dbVol = findByIdIncludingRemoved(volHost.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -174,7 +172,7 @@ public class VolumeHostDaoImpl extends GenericDaoBase<VolumeHostVO, Long> implem
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;

View File

@ -24,7 +24,6 @@ import java.sql.SQLException;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.maint.Version;
@ -39,7 +38,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {SystemIntegrityChecker.class})
public class DatabaseIntegrityChecker extends AdapterBase implements SystemIntegrityChecker {
private static final Logger s_logger = Logger.getLogger(DatabaseIntegrityChecker.class);
@Inject
VersionDao _dao;
@ -103,32 +101,32 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
}
catch (Exception e)
{
s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e);
}
}
catch (Exception e)
{
s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e);
}
}
if (noDuplicate) {
s_logger.debug("No duplicate hosts with the same local storage found in database");
logger.debug("No duplicate hosts with the same local storage found in database");
} else {
s_logger.error(helpInfo.toString());
logger.error(helpInfo.toString());
}
txn.commit();
return noDuplicate;
}catch (Exception e)
{
s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e);
}
}
catch (Exception e)
{
s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage());
throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e);
}
finally
@ -139,7 +137,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
}
}catch(Exception e)
{
s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage());
logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage());
}
}
}
@ -152,7 +150,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
String tableName = rs.getString(1);
if (tableName.equalsIgnoreCase("usage_event") || tableName.equalsIgnoreCase("usage_port_forwarding") || tableName.equalsIgnoreCase("usage_network_offering")) {
num++;
s_logger.debug("Checking 21to22PremiumUprage table " + tableName + " found");
logger.debug("Checking 21to22PremiumUprage table " + tableName + " found");
}
if (num == 3) {
return true;
@ -168,7 +166,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
boolean found = false;
while (rs.next()) {
if (column.equalsIgnoreCase(rs.getString(1))) {
s_logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column));
logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column));
found = true;
break;
}
@ -225,33 +223,33 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
}
}
if (!hasUsage) {
s_logger.debug("No cloud_usage found in database, no need to check missed premium upgrade");
logger.debug("No cloud_usage found in database, no need to check missed premium upgrade");
txn.commit();
return true;
}
if (!check21to22PremiumUprage(conn)) {
s_logger.error("21to22 premium upgrade missed");
logger.error("21to22 premium upgrade missed");
txn.commit();
return false;
}
if (!check221to222PremiumUprage(conn)) {
s_logger.error("221to222 premium upgrade missed");
logger.error("221to222 premium upgrade missed");
txn.commit();
return false;
}
if (!check222to224PremiumUpgrade(conn)) {
s_logger.error("222to224 premium upgrade missed");
logger.error("222to224 premium upgrade missed");
txn.commit();
return false;
}
txn.commit();
return true;
} catch (Exception e) {
s_logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage());
logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage());
throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(), e);
}
}catch (Exception e) {
s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage());
logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage());
throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(),e);
}
finally
@ -262,7 +260,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
}
}catch(Exception e)
{
s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage());
logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage());
}
}
}
@ -271,19 +269,19 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
public void check() {
GlobalLock lock = GlobalLock.getInternLock("DatabaseIntegrity");
try {
s_logger.info("Grabbing lock to check for database integrity.");
logger.info("Grabbing lock to check for database integrity.");
if (!lock.lock(20 * 60)) {
throw new CloudRuntimeException("Unable to acquire lock to check for database integrity.");
}
try {
s_logger.info("Performing database integrity check");
logger.info("Performing database integrity check");
if (!checkDuplicateHostWithTheSameLocalStorage()) {
throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage detected error");
}
if (!checkMissedPremiumUpgradeFor228()) {
s_logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!");
logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!");
throw new CloudRuntimeException("Detected missed premium upgrade");
}
} finally {
@ -299,7 +297,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg
try {
check();
} catch (Exception e) {
s_logger.error("System integrity check exception", e);
logger.error("System integrity check exception", e);
System.exit(1);
}
return true;

View File

@ -24,7 +24,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.upgrade.dao.VersionVO.Step;
@ -42,7 +41,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Local(value = VersionDao.class)
@DB()
public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements VersionDao {
private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class);
final GenericSearchBuilder<VersionVO, String> CurrentVersionSearch;
final SearchBuilder<VersionVO> AllFieldsSearch;
@ -76,7 +74,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
@DB
public String getCurrentVersion() {
try (Connection conn = TransactionLegacy.getStandaloneConnection();) {
s_logger.debug("Checking to see if the database is at a version before it was the version table is created");
logger.debug("Checking to see if the database is at a version before it was the version table is created");
try (
PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'version'");
@ -91,8 +89,8 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
pstmt_domain.executeQuery();
return "2.1.8";
} catch (final SQLException e) {
s_logger.debug("Assuming the exception means domain_id is not there.");
s_logger.debug("No version table and no nics table, returning 2.1.7");
logger.debug("Assuming the exception means domain_id is not there.");
logger.debug("No version table and no nics table, returning 2.1.7");
return "2.1.7";
}
} else {
@ -100,7 +98,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
ResultSet rs_static_nat = pstmt_static_nat.executeQuery();){
return "2.2.1";
} catch (final SQLException e) {
s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
return "2.2.2";
}
}
@ -127,7 +125,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
}
// Use nics table information and is_static_nat field from firewall_rules table to determine version information
s_logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2");
logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2");
try (PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'nics'");
ResultSet rs = pstmt.executeQuery();){
if (!rs.next()) {
@ -138,7 +136,7 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long> implements V
throw new CloudRuntimeException("Unable to determine the current version, version table exists and empty, " +
"nics table doesn't exist, is_static_nat field exists in firewall_rules table");
} catch (final SQLException e) {
s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2");
return "2.2.2";
}
}

View File

@ -27,7 +27,6 @@ import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import javax.ejb.Local;
@ -42,7 +41,6 @@ import java.util.TimeZone;
@Component
@Local(value = {UsageDao.class})
public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements UsageDao {
public static final Logger s_logger = Logger.getLogger(UsageDaoImpl.class.getName());
private static final String DELETE_ALL = "DELETE FROM cloud_usage";
private static final String DELETE_ALL_BY_ACCOUNTID = "DELETE FROM cloud_usage WHERE account_id = ?";
private static final String DELETE_ALL_BY_INTERVAL = "DELETE FROM cloud_usage WHERE end_date < DATE_SUB(CURRENT_DATE(), INTERVAL ? DAY)";
@ -92,7 +90,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error retrieving usage vm instances for account id: " + accountId);
logger.error("error retrieving usage vm instances for account id: " + accountId);
} finally {
txn.close();
}
@ -132,7 +130,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving account to cloud_usage db", ex);
logger.error("error saving account to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
@ -162,7 +160,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving account to cloud_usage db", ex);
logger.error("error saving account to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
@ -203,7 +201,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving user stats to cloud_usage db", ex);
logger.error("error saving user stats to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
@ -230,7 +228,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving user stats to cloud_usage db", ex);
logger.error("error saving user stats to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
@ -247,7 +245,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
return Long.valueOf(rs.getLong(1));
}
} catch (Exception ex) {
s_logger.error("error getting last account id", ex);
logger.error("error getting last account id", ex);
}
return null;
}
@ -264,7 +262,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
return Long.valueOf(rs.getLong(1));
}
} catch (Exception ex) {
s_logger.error("error getting last user stats id", ex);
logger.error("error getting last user stats id", ex);
}
return null;
}
@ -283,7 +281,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
templateList.add(Long.valueOf(rs.getLong(1)));
}
} catch (Exception ex) {
s_logger.error("error listing public templates", ex);
logger.error("error listing public templates", ex);
}
return templateList;
}
@ -300,7 +298,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
return Long.valueOf(rs.getLong(1));
}
} catch (Exception ex) {
s_logger.error("error getting last vm disk stats id", ex);
logger.error("error getting last vm disk stats id", ex);
}
return null;
}
@ -333,7 +331,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving vm disk stats to cloud_usage db", ex);
logger.error("error saving vm disk stats to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
@ -379,7 +377,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving vm disk stats to cloud_usage db", ex);
logger.error("error saving vm disk stats to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
@ -446,7 +444,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving usage records to cloud_usage db", ex);
logger.error("error saving usage records to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
@ -464,7 +462,7 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error removing old cloud_usage records for interval: " + days);
logger.error("error removing old cloud_usage records for interval: " + days);
} finally {
txn.close();
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageIPAddressVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageIPAddressDao.class})
public class UsageIPAddressDaoImpl extends GenericDaoBase<UsageIPAddressVO, Long> implements UsageIPAddressDao {
public static final Logger s_logger = Logger.getLogger(UsageIPAddressDaoImpl.class.getName());
protected static final String UPDATE_RELEASED = "UPDATE usage_ip_address SET released = ? WHERE account_id = ? AND public_ip_address = ? and released IS NULL";
protected static final String GET_USAGE_RECORDS_BY_ACCOUNT =
@ -77,7 +75,7 @@ public class UsageIPAddressDaoImpl extends GenericDaoBase<UsageIPAddressVO, Long
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.error("Error updating usageIPAddressVO:"+e.getMessage(), e);
logger.error("Error updating usageIPAddressVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -142,7 +140,7 @@ public class UsageIPAddressDaoImpl extends GenericDaoBase<UsageIPAddressVO, Long
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -23,7 +23,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageJobVO;
@ -36,7 +35,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {UsageJobDao.class})
public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements UsageJobDao {
private static final Logger s_logger = Logger.getLogger(UsageJobDaoImpl.class.getName());
private static final String GET_LAST_JOB_SUCCESS_DATE_MILLIS =
"SELECT end_millis FROM cloud_usage.usage_job WHERE end_millis > 0 and success = 1 ORDER BY end_millis DESC LIMIT 1";
@ -53,7 +51,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
return rs.getLong(1);
}
} catch (Exception ex) {
s_logger.error("error getting last usage job success date", ex);
logger.error("error getting last usage job success date", ex);
} finally {
txn.close();
}
@ -79,7 +77,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error updating job success date", ex);
logger.error("error updating job success date", ex);
throw new CloudRuntimeException(ex.getMessage());
} finally {
txn.close();

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageLoadBalancerPolicyVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageLoadBalancerPolicyDao.class})
public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase<UsageLoadBalancerPolicyVO, Long> implements UsageLoadBalancerPolicyDao {
public static final Logger s_logger = Logger.getLogger(UsageLoadBalancerPolicyDaoImpl.class.getName());
protected static final String REMOVE_BY_USERID_LBID = "DELETE FROM usage_load_balancer_policy WHERE account_id = ? AND id = ?";
protected static final String UPDATE_DELETED = "UPDATE usage_load_balancer_policy SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL";
@ -66,7 +64,7 @@ public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase<UsageLoadBala
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error removing UsageLoadBalancerPolicyVO", e);
logger.warn("Error removing UsageLoadBalancerPolicyVO", e);
} finally {
txn.close();
}
@ -92,7 +90,7 @@ public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase<UsageLoadBala
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsageLoadBalancerPolicyVO"+e.getMessage(), e);
logger.warn("Error updating UsageLoadBalancerPolicyVO"+e.getMessage(), e);
} finally {
txn.close();
}
@ -161,7 +159,7 @@ public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase<UsageLoadBala
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -24,7 +24,6 @@ import java.util.Map;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageNetworkVO;
@ -35,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {UsageNetworkDao.class})
public class UsageNetworkDaoImpl extends GenericDaoBase<UsageNetworkVO, Long> implements UsageNetworkDao {
private static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName());
private static final String SELECT_LATEST_STATS =
"SELECT u.account_id, u.zone_id, u.host_id, u.host_type, u.network_id, u.bytes_sent, u.bytes_received, u.agg_bytes_received, u.agg_bytes_sent, u.event_time_millis "
+ "FROM cloud_usage.usage_network u INNER JOIN (SELECT netusage.account_id as acct_id, netusage.zone_id as z_id, max(netusage.event_time_millis) as max_date "
@ -79,7 +77,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase<UsageNetworkVO, Long> im
}
return returnMap;
} catch (Exception ex) {
s_logger.error("error getting recent usage network stats", ex);
logger.error("error getting recent usage network stats", ex);
} finally {
txn.close();
}
@ -99,7 +97,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase<UsageNetworkVO, Long> im
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error deleting old usage network stats", ex);
logger.error("error deleting old usage network stats", ex);
}
}
@ -128,7 +126,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase<UsageNetworkVO, Long> im
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving usage_network to cloud_usage db", ex);
logger.error("error saving usage_network to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageNetworkOfferingVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageNetworkOfferingDao.class})
public class UsageNetworkOfferingDaoImpl extends GenericDaoBase<UsageNetworkOfferingVO, Long> implements UsageNetworkOfferingDao {
public static final Logger s_logger = Logger.getLogger(UsageNetworkOfferingDaoImpl.class.getName());
protected static final String UPDATE_DELETED =
"UPDATE usage_network_offering SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND network_offering_id = ? and deleted IS NULL";
@ -76,7 +74,7 @@ public class UsageNetworkOfferingDaoImpl extends GenericDaoBase<UsageNetworkOffe
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsageNetworkOfferingVO:"+e.getMessage(), e);
logger.warn("Error updating UsageNetworkOfferingVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -148,7 +146,7 @@ public class UsageNetworkOfferingDaoImpl extends GenericDaoBase<UsageNetworkOffe
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsagePortForwardingRuleVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsagePortForwardingRuleDao.class})
public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase<UsagePortForwardingRuleVO, Long> implements UsagePortForwardingRuleDao {
public static final Logger s_logger = Logger.getLogger(UsagePortForwardingRuleDaoImpl.class.getName());
protected static final String REMOVE_BY_USERID_PFID = "DELETE FROM usage_port_forwarding WHERE account_id = ? AND id = ?";
protected static final String UPDATE_DELETED = "UPDATE usage_port_forwarding SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL";
@ -66,7 +64,7 @@ public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase<UsagePortForw
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error removing UsagePortForwardingRuleVO", e);
logger.warn("Error removing UsagePortForwardingRuleVO", e);
} finally {
txn.close();
}
@ -92,7 +90,7 @@ public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase<UsagePortForw
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsagePortForwardingRuleVO:"+e.getMessage(), e);
logger.warn("Error updating UsagePortForwardingRuleVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -161,7 +159,7 @@ public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase<UsagePortForw
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageSecurityGroupVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageSecurityGroupDao.class})
public class UsageSecurityGroupDaoImpl extends GenericDaoBase<UsageSecurityGroupVO, Long> implements UsageSecurityGroupDao {
public static final Logger s_logger = Logger.getLogger(UsageSecurityGroupDaoImpl.class.getName());
protected static final String UPDATE_DELETED =
"UPDATE usage_security_group SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND security_group_id = ? and deleted IS NULL";
@ -76,7 +74,7 @@ public class UsageSecurityGroupDaoImpl extends GenericDaoBase<UsageSecurityGroup
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsageSecurityGroupVO:"+e.getMessage(), e);
logger.warn("Error updating UsageSecurityGroupVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -144,7 +142,7 @@ public class UsageSecurityGroupDaoImpl extends GenericDaoBase<UsageSecurityGroup
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records:"+e.getMessage(), e);
logger.warn("Error getting usage records:"+e.getMessage(), e);
} finally {
txn.close();
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageStorageVO;
@ -40,7 +39,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageStorageDao.class})
public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> implements UsageStorageDao {
public static final Logger s_logger = Logger.getLogger(UsageStorageDaoImpl.class.getName());
protected static final String REMOVE_BY_USERID_STORAGEID = "DELETE FROM usage_storage WHERE account_id = ? AND id = ? AND storage_type = ?";
protected static final String UPDATE_DELETED = "UPDATE usage_storage SET deleted = ? WHERE account_id = ? AND id = ? AND storage_type = ? and deleted IS NULL";
@ -108,7 +106,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> im
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.error("Error removing usageStorageVO", e);
logger.error("Error removing usageStorageVO", e);
} finally {
txn.close();
}
@ -136,7 +134,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> im
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.error("Error updating UsageStorageVO:"+e.getMessage(), e);
logger.error("Error updating UsageStorageVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -210,7 +208,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> im
}
}catch (Exception e) {
txn.rollback();
s_logger.error("getUsageRecords:Exception:"+e.getMessage(), e);
logger.error("getUsageRecords:Exception:"+e.getMessage(), e);
} finally {
txn.close();
}

View File

@ -25,7 +25,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVMInstanceVO;
@ -36,7 +35,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageVMInstanceDao.class})
public class UsageVMInstanceDaoImpl extends GenericDaoBase<UsageVMInstanceVO, Long> implements UsageVMInstanceDao {
public static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName());
protected static final String UPDATE_USAGE_INSTANCE_SQL = "UPDATE usage_vm_instance SET end_date = ? "
+ "WHERE account_id = ? and vm_instance_id = ? and usage_type = ? and end_date IS NULL";
@ -64,7 +62,7 @@ public class UsageVMInstanceDaoImpl extends GenericDaoBase<UsageVMInstanceVO, Lo
pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
s_logger.warn(e);
logger.warn(e);
} finally {
txn.close();
}
@ -85,7 +83,7 @@ public class UsageVMInstanceDaoImpl extends GenericDaoBase<UsageVMInstanceVO, Lo
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error deleting usage vm instance with vmId: " + instance.getVmInstanceId() + ", for account with id: " + instance.getAccountId());
logger.error("error deleting usage vm instance with vmId: " + instance.getVmInstanceId() + ", for account with id: " + instance.getAccountId());
} finally {
txn.close();
}
@ -143,7 +141,7 @@ public class UsageVMInstanceDaoImpl extends GenericDaoBase<UsageVMInstanceVO, Lo
usageInstances.add(usageInstance);
}
} catch (Exception ex) {
s_logger.error("error retrieving usage vm instances for account id: " + accountId, ex);
logger.error("error retrieving usage vm instances for account id: " + accountId, ex);
} finally {
txn.close();
}

View File

@ -26,7 +26,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVMSnapshotVO;
@ -37,7 +36,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageVMSnapshotDao.class})
public class UsageVMSnapshotDaoImpl extends GenericDaoBase<UsageVMSnapshotVO, Long> implements UsageVMSnapshotDao {
public static final Logger s_logger = Logger.getLogger(UsageVMSnapshotDaoImpl.class.getName());
protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT id, zone_id, account_id, domain_id, vm_id, disk_offering_id, size, created, processed "
+ " FROM usage_vmsnapshot" + " WHERE account_id = ? " + " AND ( (created BETWEEN ? AND ?) OR "
+ " (created < ? AND processed is NULL) ) ORDER BY created asc";
@ -62,7 +60,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase<UsageVMSnapshotVO, Lo
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsageVMSnapshotVO", e);
logger.warn("Error updating UsageVMSnapshotVO", e);
} finally {
txn.close();
}
@ -112,7 +110,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase<UsageVMSnapshotVO, Lo
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}
@ -163,7 +161,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase<UsageVMSnapshotVO, Lo
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVPNUserVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageVPNUserDao.class})
public class UsageVPNUserDaoImpl extends GenericDaoBase<UsageVPNUserVO, Long> implements UsageVPNUserDao {
public static final Logger s_logger = Logger.getLogger(UsageVPNUserDaoImpl.class.getName());
protected static final String UPDATE_DELETED = "UPDATE usage_vpn_user SET deleted = ? WHERE account_id = ? AND user_id = ? and deleted IS NULL";
protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT zone_id, account_id, domain_id, user_id, user_name, created, deleted " + "FROM usage_vpn_user "
@ -71,7 +69,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase<UsageVPNUserVO, Long> im
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e);
logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -141,7 +139,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase<UsageVPNUserVO, Long> im
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -24,7 +24,6 @@ import java.util.Map;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVmDiskVO;
@ -35,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {UsageVmDiskDao.class})
public class UsageVmDiskDaoImpl extends GenericDaoBase<UsageVmDiskVO, Long> implements UsageVmDiskDao {
private static final Logger s_logger = Logger.getLogger(UsageVmDiskDaoImpl.class.getName());
private static final String SELECT_LATEST_STATS =
"SELECT uvd.account_id, uvd.zone_id, uvd.vm_id, uvd.volume_id, uvd.io_read, uvd.io_write, uvd.agg_io_read, uvd.agg_io_write, "
+ "uvd.bytes_read, uvd.bytes_write, uvd.agg_bytes_read, uvd.agg_bytes_write, uvd.event_time_millis "
@ -83,7 +81,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase<UsageVmDiskVO, Long> impl
}
return returnMap;
} catch (Exception ex) {
s_logger.error("error getting recent usage disk stats", ex);
logger.error("error getting recent usage disk stats", ex);
} finally {
txn.close();
}
@ -103,7 +101,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase<UsageVmDiskVO, Long> impl
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error deleting old usage disk stats", ex);
logger.error("error deleting old usage disk stats", ex);
}
}
@ -135,7 +133,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase<UsageVmDiskVO, Long> impl
txn.commit();
} catch (Exception ex) {
txn.rollback();
s_logger.error("error saving usage_vm_disk to cloud_usage db", ex);
logger.error("error saving usage_vm_disk to cloud_usage db", ex);
throw new CloudRuntimeException(ex.getMessage());
}
}

View File

@ -27,7 +27,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import com.cloud.exception.CloudException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.usage.UsageVolumeVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UsageVolumeDao.class})
public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> implements UsageVolumeDao {
public static final Logger s_logger = Logger.getLogger(UsageVolumeDaoImpl.class.getName());
protected static final String REMOVE_BY_USERID_VOLID = "DELETE FROM usage_volume WHERE account_id = ? AND id = ?";
protected static final String UPDATE_DELETED = "UPDATE usage_volume SET deleted = ? WHERE account_id = ? AND id = ? and deleted IS NULL";
@ -71,7 +69,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e);
logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e);
} finally {
txn.close();
}
@ -93,7 +91,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error updating UsageVolumeVO", e);
logger.warn("Error updating UsageVolumeVO", e);
} finally {
txn.close();
}
@ -171,7 +169,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
}
} catch (Exception e) {
txn.rollback();
s_logger.warn("Error getting usage records", e);
logger.warn("Error getting usage records", e);
} finally {
txn.close();
}

View File

@ -23,7 +23,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.user.Account;
@ -44,7 +43,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {AccountDao.class})
public class AccountDaoImpl extends GenericDaoBase<AccountVO, Long> implements AccountDao {
private static final Logger s_logger = Logger.getLogger(AccountDaoImpl.class);
private static final String FIND_USER_ACCOUNT_BY_API_KEY = "SELECT u.id, u.username, u.account_id, u.secret_key, u.state, "
+ "a.id, a.account_name, a.type, a.domain_id, a.state " + "FROM `cloud`.`user` u, `cloud`.`account` a "
+ "WHERE u.account_id = a.id AND u.api_key = ? and u.removed IS NULL";
@ -148,7 +146,7 @@ public class AccountDaoImpl extends GenericDaoBase<AccountVO, Long> implements A
userAcctPair = new Pair<User, Account>(u, a);
}
} catch (Exception e) {
s_logger.warn("Exception finding user/acct by api key: " + apiKey, e);
logger.warn("Exception finding user/acct by api key: " + apiKey, e);
}
return userAcctPair;
}
@ -266,7 +264,7 @@ public class AccountDaoImpl extends GenericDaoBase<AccountVO, Long> implements A
if (!account.getNeedsCleanup()) {
account.setNeedsCleanup(true);
if (!update(accountId, account)) {
s_logger.warn("Failed to mark account id=" + accountId + " for cleanup");
logger.warn("Failed to mark account id=" + accountId + " for cleanup");
}
}
}
@ -286,7 +284,7 @@ public class AccountDaoImpl extends GenericDaoBase<AccountVO, Long> implements A
domain_id = account_vo.getDomainId();
}
catch (Exception e) {
s_logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage());
logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage());
}
finally {
return domain_id;

View File

@ -25,7 +25,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.user.UserStatisticsVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {UserStatisticsDao.class})
public class UserStatisticsDaoImpl extends GenericDaoBase<UserStatisticsVO, Long> implements UserStatisticsDao {
private static final Logger s_logger = Logger.getLogger(UserStatisticsDaoImpl.class);
private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH =
"SELECT us.id, us.data_center_id, us.account_id, us.public_ip_address, us.device_id, us.device_type, us.network_id, us.agg_bytes_received, us.agg_bytes_sent "
+ "FROM user_statistics us, account a " + "WHERE us.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY us.id";
@ -111,7 +109,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase<UserStatisticsVO, Long
userStats.add(toEntityBean(rs, false));
}
} catch (Exception ex) {
s_logger.error("error saving user stats to cloud_usage db", ex);
logger.error("error saving user stats to cloud_usage db", ex);
}
return userStats;
}
@ -129,7 +127,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase<UserStatisticsVO, Long
userStats.add(toEntityBean(rs, false));
}
} catch (Exception ex) {
s_logger.error("error lisitng updated user stats", ex);
logger.error("error lisitng updated user stats", ex);
}
return userStats;
}

View File

@ -25,7 +25,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.user.VmDiskStatisticsVO;
@ -38,7 +37,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
@Local(value = {VmDiskStatisticsDao.class})
public class VmDiskStatisticsDaoImpl extends GenericDaoBase<VmDiskStatisticsVO, Long> implements VmDiskStatisticsDao {
private static final Logger s_logger = Logger.getLogger(VmDiskStatisticsDaoImpl.class);
private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH =
"SELECT bcf.id, bcf.data_center_id, bcf.account_id, bcf.vm_id, bcf.volume_id, bcf.agg_io_read, bcf.agg_io_write, bcf.agg_bytes_read, bcf.agg_bytes_write "
+ "FROM vm_disk_statistics bcf, account a " + "WHERE bcf.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY bcf.id";
@ -106,7 +104,7 @@ public class VmDiskStatisticsDaoImpl extends GenericDaoBase<VmDiskStatisticsVO,
vmDiskStats.add(toEntityBean(rs, false));
}
} catch (Exception ex) {
s_logger.error("error saving vm disk stats to cloud_usage db", ex);
logger.error("error saving vm disk stats to cloud_usage db", ex);
}
return vmDiskStats;
}
@ -124,7 +122,7 @@ public class VmDiskStatisticsDaoImpl extends GenericDaoBase<VmDiskStatisticsVO,
vmDiskStats.add(toEntityBean(rs, false));
}
} catch (Exception ex) {
s_logger.error("error lisitng updated vm disk stats", ex);
logger.error("error lisitng updated vm disk stats", ex);
}
return vmDiskStats;
}

View File

@ -25,7 +25,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.info.ConsoleProxyLoadInfo;
@ -42,7 +41,6 @@ import com.cloud.vm.VirtualMachine.State;
@Component
@Local(value = {ConsoleProxyDao.class})
public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> implements ConsoleProxyDao {
private static final Logger s_logger = Logger.getLogger(ConsoleProxyDaoImpl.class);
//
// query SQL for returnning console proxy assignment info as following
@ -217,7 +215,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
l.add(new Pair<Long, Integer>(rs.getLong(1), rs.getInt(2)));
}
} catch (SQLException e) {
s_logger.debug("Caught SQLException: ", e);
logger.debug("Caught SQLException: ", e);
}
return l;
}
@ -242,7 +240,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
l.add(new Pair<Long, Integer>(rs.getLong(1), rs.getInt(2)));
}
} catch (SQLException e) {
s_logger.debug("Caught SQLException: ", e);
logger.debug("Caught SQLException: ", e);
}
return l;
}
@ -261,7 +259,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
return rs.getInt(1);
}
} catch (SQLException e) {
s_logger.debug("Caught SQLException: ", e);
logger.debug("Caught SQLException: ", e);
}
return 0;
}
@ -279,7 +277,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
return rs.getInt(1);
}
} catch (SQLException e) {
s_logger.debug("Caught SQLException: ", e);
logger.debug("Caught SQLException: ", e);
}
return 0;
}
@ -301,7 +299,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
l.add(info);
}
} catch (SQLException e) {
s_logger.debug("Exception: ", e);
logger.debug("Exception: ", e);
}
return l;
}
@ -323,7 +321,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
l.add(rs.getLong(1));
}
} catch (SQLException e) {
s_logger.debug("Caught SQLException: ", e);
logger.debug("Caught SQLException: ", e);
}
return l;
}

View File

@ -21,7 +21,6 @@ import java.util.List;
import javax.annotation.PostConstruct;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.DB;
@ -35,7 +34,6 @@ import com.cloud.vm.UserVmCloneSettingVO;
@Local(value = {UserVmCloneSettingDao.class})
@DB()
public class UserVmCloneSettingDaoImpl extends GenericDaoBase<UserVmCloneSettingVO, Long> implements UserVmCloneSettingDao {
public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class);
protected SearchBuilder<UserVmCloneSettingVO> vmIdSearch;
protected SearchBuilder<UserVmCloneSettingVO> cloneTypeSearch;

View File

@ -30,7 +30,6 @@ import javax.annotation.PostConstruct;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import com.cloud.server.ResourceTag.ResourceObjectType;
import com.cloud.tags.dao.ResourceTagDao;
@ -55,7 +54,6 @@ import com.cloud.vm.dao.UserVmData.SecurityGroupData;
@Local(value = {UserVmDao.class})
public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements UserVmDao {
public static final Logger s_logger = Logger.getLogger(UserVmDaoImpl.class);
protected SearchBuilder<UserVmVO> AccountPodSearch;
protected SearchBuilder<UserVmVO> AccountDataCenterSearch;
@ -378,13 +376,13 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
}
catch (Exception e) {
s_logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage());
logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage());
throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e);
}
txn.commit();
return result;
} catch (Exception e) {
s_logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage());
logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage());
throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e);
}
finally {
@ -396,7 +394,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
catch (Exception e)
{
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
}
}
@ -433,7 +431,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
catch (Exception e)
{
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e);
}
curr_index += VM_DETAILS_BATCH_SIZE;
@ -441,7 +439,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
catch (Exception e)
{
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e);
}
}
@ -469,20 +467,20 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
catch (Exception e)
{
s_logger.error("listVmDetails: Exception:" + e.getMessage());
logger.error("listVmDetails: Exception:" + e.getMessage());
throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e);
}
}
catch (Exception e)
{
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e);
}
}
txn.commit();
return userVmDataHash;
} catch (Exception e) {
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
throw new CloudRuntimeException("listVmDetails:Exception : ", e);
}
finally {
@ -494,7 +492,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
catch (Exception e)
{
s_logger.error("listVmDetails:Exception:" + e.getMessage());
logger.error("listVmDetails:Exception:" + e.getMessage());
}
}
@ -656,7 +654,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
}
}
} catch (SQLException e) {
s_logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage());
logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage());
throw new CloudRuntimeException("GetVmsDetailsByNames: Exception: " + e.getMessage());
}

View File

@ -29,7 +29,6 @@ import javax.annotation.PostConstruct;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.host.HostVO;
@ -66,7 +65,6 @@ import com.cloud.vm.VirtualMachine.Type;
@Local(value = {VMInstanceDao.class})
public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implements VMInstanceDao {
public static final Logger s_logger = Logger.getLogger(VMInstanceDaoImpl.class);
private static final int MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT = 3;
protected SearchBuilder<VMInstanceVO> VMClusterSearch;
@ -439,8 +437,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
@Override
public boolean updateState(State oldState, Event event, State newState, VirtualMachine vm, Object opaque) {
if (newState == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString());
if (logger.isDebugEnabled()) {
logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString());
}
return false;
}
@ -479,7 +477,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
if (result == 0) {
VMInstanceVO vo = findByIdIncludingRemoved(vm.getId());
if (s_logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
if (vo != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated())
@ -488,16 +486,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
.append("; time=").append(vo.getUpdateTime());
str.append("} Stale Data: {Host=").append(oldHostId).append("; State=").append(oldState).append("; updated=").append(oldUpdated).append("; time=")
.append(oldUpdateDate).append("}");
s_logger.debug(str.toString());
logger.debug(str.toString());
} else {
s_logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed");
logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed");
}
}
if (vo != null && vo.getState() == newState) {
// allow for concurrent update if target state has already been matched
s_logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState);
logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState);
return true;
}
}

View File

@ -22,7 +22,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
@ -38,7 +37,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO;
@Component
@Local(value = {VMSnapshotDao.class})
public class VMSnapshotDaoImpl extends GenericDaoBase<VMSnapshotVO, Long> implements VMSnapshotDao {
private static final Logger s_logger = Logger.getLogger(VMSnapshotDaoImpl.class);
private final SearchBuilder<VMSnapshotVO> SnapshotSearch;
private final SearchBuilder<VMSnapshotVO> ExpungingSnapshotSearch;
private final SearchBuilder<VMSnapshotVO> SnapshotStatusSearch;
@ -145,7 +143,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase<VMSnapshotVO, Long> implem
builder.set(vo, "updated", new Date());
int rows = update((VMSnapshotVO)vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VMSnapshotVO dbVol = findByIdIncludingRemoved(vo.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -178,7 +176,7 @@ public class VMSnapshotDaoImpl extends GenericDaoBase<VMSnapshotVO, Long> implem
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore");
logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore");
}
}
return rows > 0;

View File

@ -23,7 +23,6 @@ import javax.annotation.PostConstruct;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO;
@ -39,7 +38,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Local(value = {VMEntityDao.class})
public class VMEntityDaoImpl extends GenericDaoBase<VMEntityVO, Long> implements VMEntityDao {
public static final Logger s_logger = Logger.getLogger(VMEntityDaoImpl.class);
@Inject
protected VMReservationDao _vmReservationDao;

View File

@ -18,7 +18,6 @@ package org.apache.cloudstack.region.dao;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.region.RegionVO;
@ -30,7 +29,6 @@ import com.cloud.utils.db.SearchCriteria;
@Component
@Local(value = {RegionDao.class})
public class RegionDaoImpl extends GenericDaoBase<RegionVO, Integer> implements RegionDao {
private static final Logger s_logger = Logger.getLogger(RegionDaoImpl.class);
protected SearchBuilder<RegionVO> NameSearch;
protected SearchBuilder<RegionVO> AllFieldsSearch;

View File

@ -26,7 +26,6 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.Listener;
@ -60,7 +59,6 @@ import com.cloud.utils.fsm.NoTransitionException;
import com.cloud.utils.fsm.StateMachine2;
public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentManager {
private static final Logger logger = Logger.getLogger(DirectAgentManagerSimpleImpl.class);
private final Map<Long, ServerResource> hostResourcesMap = new HashMap<Long, ServerResource>();
@Inject
HostDao hostDao;

View File

@ -24,7 +24,6 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions;
@ -71,7 +70,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy {
private static final Logger s_logger = Logger.getLogger(DefaultVMSnapshotStrategy.class);
@Inject
VMSnapshotHelper vmSnapshotHelper;
@Inject
@ -148,7 +146,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
answer = (CreateVMSnapshotAnswer)agentMgr.send(hostId, ccmd);
if (answer != null && answer.getResult()) {
processAnswer(vmSnapshotVO, userVm, answer, hostId);
s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName());
logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName());
result = true;
for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) {
@ -159,21 +157,21 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed";
if (answer != null && answer.getDetails() != null)
errMsg = errMsg + " due to " + answer.getDetails();
s_logger.error(errMsg);
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
} catch (OperationTimedoutException e) {
s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString());
logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString());
throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString());
} catch (AgentUnavailableException e) {
s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e);
logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e);
throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString());
} finally {
if (!result) {
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
} catch (NoTransitionException e1) {
s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage());
logger.error("Cannot set vm snapshot state due to: " + e1.getMessage());
}
}
}
@ -186,7 +184,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested);
} catch (NoTransitionException e) {
s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested");
logger.debug("Failed to change vm snapshot state with event ExpungeRequested");
throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage());
}
@ -214,7 +212,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
return true;
} else {
String errMsg = (answer == null) ? null : answer.getDetails();
s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
}
} catch (OperationTimedoutException e) {
@ -247,7 +245,7 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
});
} catch (Exception e) {
String errMsg = "Error while process answer: " + as.getClass() + " due to " + e.getMessage();
s_logger.error(errMsg, e);
logger.error(errMsg, e);
throw new CloudRuntimeException(errMsg);
}
}
@ -367,21 +365,21 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed";
if (answer != null && answer.getDetails() != null)
errMsg = errMsg + " due to " + answer.getDetails();
s_logger.error(errMsg);
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
} catch (OperationTimedoutException e) {
s_logger.debug("Failed to revert vm snapshot", e);
logger.debug("Failed to revert vm snapshot", e);
throw new CloudRuntimeException(e.getMessage());
} catch (AgentUnavailableException e) {
s_logger.debug("Failed to revert vm snapshot", e);
logger.debug("Failed to revert vm snapshot", e);
throw new CloudRuntimeException(e.getMessage());
} finally {
if (!result) {
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
} catch (NoTransitionException e1) {
s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage());
logger.error("Cannot set vm snapshot state due to: " + e1.getMessage());
}
}
}

View File

@ -28,7 +28,6 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.Storage;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
@ -54,7 +53,6 @@ import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachineProfile;
public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
@Inject
StorageManager storageMgr;
protected @Inject
@ -116,8 +114,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
}
List<Long> poolIdsByCapacity = _capacityDao.orderHostsByFreeCapacity(clusterId, capacityType);
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity);
if (logger.isDebugEnabled()) {
logger.debug("List of pools in descending order of free capacity: "+ poolIdsByCapacity);
}
//now filter the given list of Pools by this ordered list
@ -146,8 +144,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
Long clusterId = plan.getClusterId();
List<Long> poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
if (logger.isDebugEnabled()) {
logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
}
// now filter the given list of Pools by this ordered list
@ -189,12 +187,12 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId());
if (logger.isDebugEnabled()) {
logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId());
}
if (avoid.shouldAvoid(pool)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool is in avoid set, skipping this pool");
if (logger.isDebugEnabled()) {
logger.debug("StoragePool is in avoid set, skipping this pool");
}
return false;
}
@ -203,14 +201,14 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
if (clusterId != null) {
ClusterVO cluster = _clusterDao.findById(clusterId);
if (!(cluster.getHypervisorType() == dskCh.getHypervisorType())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
if (logger.isDebugEnabled()) {
logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
}
return false;
}
} else if (pool.getHypervisor() != null && !pool.getHypervisor().equals(HypervisorType.Any) && !(pool.getHypervisor() == dskCh.getHypervisorType())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool does not have required hypervisorType, skipping this pool");
if (logger.isDebugEnabled()) {
logger.debug("StoragePool does not have required hypervisorType, skipping this pool");
}
return false;
}
@ -235,13 +233,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
//LXC ROOT disks supports NFS and local storage pools only
if(!(Storage.StoragePoolType.NetworkFilesystem.equals(poolType) ||
Storage.StoragePoolType.Filesystem.equals(poolType)) ){
s_logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool");
logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool");
return false;
}
} else if (Volume.Type.DATADISK.equals(volType)){
//LXC DATA disks supports RBD storage pool only
if(!Storage.StoragePoolType.RBD.equals(poolType)){
s_logger.debug("StoragePool does not support LXC DATA disk, skipping this pool");
logger.debug("StoragePool does not support LXC DATA disk, skipping this pool");
return false;
}
}

View File

@ -23,7 +23,6 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@ -38,7 +37,6 @@ import com.cloud.vm.VirtualMachineProfile;
@Local(value = StoragePoolAllocator.class)
public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class);
StoragePoolAllocator _firstFitStoragePoolAllocator;
StoragePoolAllocator _localStoragePoolAllocator;
@ -50,9 +48,9 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
@Override
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool");
logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool");
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
return null;
}

View File

@ -28,7 +28,6 @@ import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.capacity.dao.CapacityDao;
@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao;
@Component
@Local(value = StoragePoolAllocator.class)
public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
@Inject
StoragePoolHostDao _poolHostDao;
@ -64,18 +62,18 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
if (!dskCh.useLocalStorage()) {
return null;
}
if (s_logger.isTraceEnabled()) {
if (logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
}
@ -89,7 +87,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
if (pool != null && pool.isLocal()) {
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
suitablePools.add(storagePool);
} else {
avoid.addPool(pool.getId());
@ -128,8 +126,8 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
if (logger.isDebugEnabled()) {
logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
return suitablePools;

View File

@ -23,7 +23,6 @@ import java.util.Map;
import javax.inject.Inject;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -41,7 +40,6 @@ import com.cloud.vm.VirtualMachineProfile;
@Component
public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
@Inject
PrimaryDataStoreDao _storagePoolDao;
@Inject
@ -50,18 +48,18 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
if (dskCh.useLocalStorage()) {
return null;
}
if (s_logger.isTraceEnabled()) {
if (logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in disabled state
List<StoragePoolVO> disabledPools = _storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
logger.trace("Ignoring pool " + pool + " as it is in disabled state.");
}
}
}
@ -114,8 +112,8 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
long dcId = plan.getDataCenterId();
List<Long> poolIdsByVolCount = _volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
if (logger.isDebugEnabled()) {
logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount);
}
// now filter the given list of Pools by this ordered list

View File

@ -30,7 +30,6 @@ import java.util.concurrent.CopyOnWriteArrayList;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.api.response.StorageProviderResponse;
@ -48,7 +47,6 @@ import com.cloud.utils.component.Registry;
@Component
public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager, Registry<DataStoreProvider> {
private static final Logger s_logger = Logger.getLogger(DataStoreProviderManagerImpl.class);
List<DataStoreProvider> providers;
protected Map<String, DataStoreProvider> providerMap = new ConcurrentHashMap<String, DataStoreProvider>();
@ -123,18 +121,18 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
String providerName = provider.getName();
if (providerMap.get(providerName) != null) {
s_logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique");
logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique");
return false;
}
s_logger.debug("registering data store provider:" + provider.getName());
logger.debug("registering data store provider:" + provider.getName());
providerMap.put(providerName, provider);
try {
boolean registrationResult = provider.configure(copyParams);
if (!registrationResult) {
providerMap.remove(providerName);
s_logger.debug("Failed to register data store provider: " + providerName);
logger.debug("Failed to register data store provider: " + providerName);
return false;
}
@ -146,7 +144,7 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
imageStoreProviderMgr.registerDriver(provider.getName(), (ImageStoreDriver)provider.getDataStoreDriver());
}
} catch (Exception e) {
s_logger.debug("configure provider failed", e);
logger.debug("configure provider failed", e);
providerMap.remove(providerName);
return false;
}

View File

@ -21,7 +21,6 @@ import java.util.Map;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -36,7 +35,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
public class ObjectInDataStoreDaoImpl extends GenericDaoBase<ObjectInDataStoreVO, Long> implements ObjectInDataStoreDao {
private static final Logger s_logger = Logger.getLogger(ObjectInDataStoreDaoImpl.class);
private SearchBuilder<ObjectInDataStoreVO> updateStateSearch;
@Override
@ -69,7 +67,7 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase<ObjectInDataStoreVO
builder.set(vo, "updated", new Date());
int rows = update(vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
ObjectInDataStoreVO dbVol = findByIdIncludingRemoved(vo.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -102,7 +100,7 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase<ObjectInDataStoreVO
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;

View File

@ -25,7 +25,6 @@ import java.util.Map;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -46,7 +45,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO, Long> implements SnapshotDataStoreDao {
private static final Logger s_logger = Logger.getLogger(SnapshotDataStoreDaoImpl.class);
private SearchBuilder<SnapshotDataStoreVO> updateStateSearch;
private SearchBuilder<SnapshotDataStoreVO> storeSearch;
private SearchBuilder<SnapshotDataStoreVO> destroyedSearch;
@ -140,7 +138,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
builder.set(dataObj, "updated", new Date());
int rows = update(dataObj, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
SnapshotDataStoreVO dbVol = findByIdIncludingRemoved(dataObj.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(dataObj.toString());
@ -173,7 +171,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;
@ -234,7 +232,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
}
}
} catch (SQLException e) {
s_logger.debug("Failed to find latest snapshot for volume: " + volumeId + " due to: " + e.toString());
logger.debug("Failed to find latest snapshot for volume: " + volumeId + " due to: " + e.toString());
}
return null;
}
@ -255,7 +253,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
}
}
} catch (SQLException e) {
s_logger.debug("Failed to find oldest snapshot for volume: " + volumeId + " due to: " + e.toString());
logger.debug("Failed to find oldest snapshot for volume: " + volumeId + " due to: " + e.toString());
}
return null;
}
@ -278,7 +276,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
}
}
} catch (SQLException e) {
s_logger.debug("Failed to find parent snapshot: " + e.toString());
logger.debug("Failed to find parent snapshot: " + e.toString());
}
return null;
}
@ -326,14 +324,14 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
List<SnapshotDataStoreVO> snapshots = listBy(sc);
// create an entry for each record, but with empty install path since the content is not yet on region-wide store yet
if (snapshots != null) {
s_logger.info("Duplicate " + snapshots.size() + " snapshot cache store records to region store");
logger.info("Duplicate " + snapshots.size() + " snapshot cache store records to region store");
for (SnapshotDataStoreVO snap : snapshots) {
SnapshotDataStoreVO snapStore = findByStoreSnapshot(DataStoreRole.Image, storeId, snap.getSnapshotId());
if (snapStore != null) {
s_logger.info("There is already entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId);
logger.info("There is already entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId);
continue;
}
s_logger.info("Persisting an entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId);
logger.info("Persisting an entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId);
SnapshotDataStoreVO ss = new SnapshotDataStoreVO();
ss.setSnapshotId(snap.getSnapshotId());
ss.setDataStoreId(storeId);
@ -377,7 +375,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
sc.setParameters("destroyed", false);
List<SnapshotDataStoreVO> snaps = listBy(sc);
if (snaps != null) {
s_logger.info("Update to cache store role for " + snaps.size() + " entries in snapshot_store_ref");
logger.info("Update to cache store role for " + snaps.size() + " entries in snapshot_store_ref");
for (SnapshotDataStoreVO snap : snaps) {
snap.setRole(DataStoreRole.ImageCache);
update(snap.getId(), snap);

View File

@ -26,7 +26,6 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -56,7 +55,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO, Long> implements TemplateDataStoreDao {
private static final Logger s_logger = Logger.getLogger(TemplateDataStoreDaoImpl.class);
private SearchBuilder<TemplateDataStoreVO> updateStateSearch;
private SearchBuilder<TemplateDataStoreVO> storeSearch;
private SearchBuilder<TemplateDataStoreVO> cacheSearch;
@ -174,7 +172,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
}
int rows = update(dataObj, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
TemplateDataStoreVO dbVol = findByIdIncludingRemoved(dataObj.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(dataObj.toString());
@ -207,7 +205,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;
@ -457,7 +455,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
List<TemplateDataStoreVO> tmpls = listBy(sc);
// create an entry for each template record, but with empty install path since the content is not yet on region-wide store yet
if (tmpls != null) {
s_logger.info("Duplicate " + tmpls.size() + " template cache store records to region store");
logger.info("Duplicate " + tmpls.size() + " template cache store records to region store");
for (TemplateDataStoreVO tmpl : tmpls) {
long templateId = tmpl.getTemplateId();
VMTemplateVO template = _tmpltDao.findById(templateId);
@ -465,15 +463,15 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
throw new CloudRuntimeException("No template is found for template id: " + templateId);
}
if (template.getTemplateType() == TemplateType.SYSTEM) {
s_logger.info("No need to duplicate system template since it will be automatically downloaded while adding region store");
logger.info("No need to duplicate system template since it will be automatically downloaded while adding region store");
continue;
}
TemplateDataStoreVO tmpStore = findByStoreTemplate(storeId, tmpl.getTemplateId());
if (tmpStore != null) {
s_logger.info("There is already entry for template " + tmpl.getTemplateId() + " on region store " + storeId);
logger.info("There is already entry for template " + tmpl.getTemplateId() + " on region store " + storeId);
continue;
}
s_logger.info("Persisting an entry for template " + tmpl.getTemplateId() + " on region store " + storeId);
logger.info("Persisting an entry for template " + tmpl.getTemplateId() + " on region store " + storeId);
TemplateDataStoreVO ts = new TemplateDataStoreVO();
ts.setTemplateId(tmpl.getTemplateId());
ts.setDataStoreId(storeId);
@ -508,7 +506,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
sc.setParameters("destroyed", false);
List<TemplateDataStoreVO> tmpls = listBy(sc);
if (tmpls != null) {
s_logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref");
logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref");
for (TemplateDataStoreVO tmpl : tmpls) {
tmpl.setDataStoreRole(DataStoreRole.ImageCache);
update(tmpl.getId(), tmpl);
@ -537,7 +535,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Failed expiring download urls for dcId: " + dcId, e);
logger.warn("Failed expiring download urls for dcId: " + dcId, e);
}
}

View File

@ -25,7 +25,6 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -48,7 +47,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Long> implements VolumeDataStoreDao {
private static final Logger s_logger = Logger.getLogger(VolumeDataStoreDaoImpl.class);
private SearchBuilder<VolumeDataStoreVO> updateStateSearch;
private SearchBuilder<VolumeDataStoreVO> volumeSearch;
private SearchBuilder<VolumeDataStoreVO> storeSearch;
@ -141,7 +139,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
}
int rows = update(dataObj, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
VolumeDataStoreVO dbVol = findByIdIncludingRemoved(dataObj.getId());
if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(dataObj.toString());
@ -174,7 +172,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore");
}
}
return rows > 0;
@ -281,14 +279,14 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
}
// create an entry for each record, but with empty install path since the content is not yet on region-wide store yet
if (vols != null) {
s_logger.info("Duplicate " + vols.size() + " volume cache store records to region store");
logger.info("Duplicate " + vols.size() + " volume cache store records to region store");
for (VolumeDataStoreVO vol : vols) {
VolumeDataStoreVO volStore = findByStoreVolume(storeId, vol.getVolumeId());
if (volStore != null) {
s_logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId);
logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId);
continue;
}
s_logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId);
logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId);
VolumeDataStoreVO vs = new VolumeDataStoreVO();
vs.setVolumeId(vol.getVolumeId());
vs.setDataStoreId(storeId);
@ -337,7 +335,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase<VolumeDataStoreVO, Lo
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Failed expiring download urls for dcId: " + dcId, e);
logger.warn("Failed expiring download urls for dcId: " + dcId, e);
}
}

View File

@ -20,7 +20,6 @@ package org.apache.cloudstack.storage.volume.db;
import java.util.Date;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
@ -36,7 +35,6 @@ import com.cloud.utils.db.UpdateBuilder;
@Component
public class TemplatePrimaryDataStoreDaoImpl extends GenericDaoBase<TemplatePrimaryDataStoreVO, Long> implements TemplatePrimaryDataStoreDao {
private static final Logger s_logger = Logger.getLogger(TemplatePrimaryDataStoreDaoImpl.class);
protected final SearchBuilder<TemplatePrimaryDataStoreVO> updateSearchBuilder;
public TemplatePrimaryDataStoreDaoImpl() {
@ -81,7 +79,7 @@ public class TemplatePrimaryDataStoreDaoImpl extends GenericDaoBase<TemplatePrim
builder.set(vo, "lastUpdated", new Date());
int rows = update(vo, sc);
if (rows == 0 && s_logger.isDebugEnabled()) {
if (rows == 0 && logger.isDebugEnabled()) {
TemplatePrimaryDataStoreVO template = findByIdIncludingRemoved(vo.getId());
if (template != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
@ -114,7 +112,7 @@ public class TemplatePrimaryDataStoreDaoImpl extends GenericDaoBase<TemplatePrim
.append("; updatedTime=")
.append(oldUpdatedTime);
} else {
s_logger.debug("Unable to update template: id=" + vo.getId() + ", as there is no such template exists in the database anymore");
logger.debug("Unable to update template: id=" + vo.getId() + ", as there is no such template exists in the database anymore");
}
}
return rows > 0;

View File

@ -23,7 +23,6 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ManagerBase;
@ -31,7 +30,6 @@ import com.cloud.utils.component.ManagerBase;
@Component
@Local(value = {ClusterFenceManager.class})
public class ClusterFenceManagerImpl extends ManagerBase implements ClusterFenceManager, ClusterManagerListener {
private static final Logger s_logger = Logger.getLogger(ClusterFenceManagerImpl.class);
@Inject
ClusterManager _clusterMgr;
@ -52,7 +50,7 @@ public class ClusterFenceManagerImpl extends ManagerBase implements ClusterFence
@Override
public void onManagementNodeIsolated() {
s_logger.error("Received node isolation notification, will perform self-fencing and shut myself down");
logger.error("Received node isolation notification, will perform self-fencing and shut myself down");
System.exit(SELF_FENCING_EXIT_CODE);
}
}

View File

@ -46,7 +46,6 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.log4j.Logger;
import com.cloud.cluster.dao.ManagementServerHostDao;
import com.cloud.cluster.dao.ManagementServerHostPeerDao;
@ -70,7 +69,6 @@ import com.cloud.utils.net.NetUtils;
@Local(value = {ClusterManager.class})
public class ClusterManagerImpl extends ManagerBase implements ClusterManager, Configurable {
private static final Logger s_logger = Logger.getLogger(ClusterManagerImpl.class);
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1000; // 1 second
private static final int DEFAULT_OUTGOING_WORKERS = 5;
@ -167,7 +165,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
for (final ClusterServiceRequestPdu pdu : candidates) {
s_logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage());
logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage());
synchronized (pdu) {
pdu.notifyAll();
}
@ -251,13 +249,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
try {
peerService = getPeerService(pdu.getDestPeer());
} catch (final RemoteException e) {
s_logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer());
logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer());
}
if (peerService != null) {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " +
if (logger.isDebugEnabled()) {
logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " +
pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
}
@ -267,8 +265,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
final String strResult = peerService.execute(pdu);
profiler.stop();
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " +
if (logger.isDebugEnabled()) {
logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " +
profiler.getDurationInMillis() + "ms. agent: " + pdu.getAgentId() + ", pdu seq: " + pdu.getSequenceId() +
", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
}
@ -279,15 +277,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
} catch (final RemoteException e) {
invalidatePeerService(pdu.getDestPeer());
if (s_logger.isInfoEnabled()) {
s_logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" +
if (logger.isInfoEnabled()) {
logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" +
e.getMessage());
}
}
}
}
} catch (final Throwable e) {
s_logger.error("Unexcpeted exception: ", e);
logger.error("Unexcpeted exception: ", e);
}
}
}
@ -311,7 +309,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
requestPdu.notifyAll();
}
} else {
s_logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage());
logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage());
}
} else {
String result = _dispatcher.dispatch(pdu);
@ -333,7 +331,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
});
} catch (final Throwable e) {
s_logger.error("Unexcpeted exception: ", e);
logger.error("Unexcpeted exception: ", e);
}
}
}
@ -366,12 +364,12 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
continue; // Skip myself.
}
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Forwarding " + cmds + " to " + peer.getMsid());
if (logger.isDebugEnabled()) {
logger.debug("Forwarding " + cmds + " to " + peer.getMsid());
}
executeAsync(peerName, agentId, cmds, true);
} catch (final Exception e) {
s_logger.warn("Caught exception while talkign to " + peer.getMsid());
logger.warn("Caught exception while talkign to " + peer.getMsid());
}
}
}
@ -388,8 +386,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
@Override
public String execute(final String strPeer, final long agentId, final String cmds, final boolean stopOnError) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds);
if (logger.isDebugEnabled()) {
logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds);
}
final ClusterServiceRequestPdu pdu = new ClusterServiceRequestPdu();
@ -408,8 +406,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult());
if (logger.isDebugEnabled()) {
logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult());
}
if (pdu.getResponseResult() != null && pdu.getResponseResult().length() > 0) {
@ -438,7 +436,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
// Note : we don't check duplicates
synchronized (_listeners) {
s_logger.info("register cluster listener " + listener.getClass());
logger.info("register cluster listener " + listener.getClass());
_listeners.add(listener);
}
@ -447,18 +445,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
@Override
public void unregisterListener(final ClusterManagerListener listener) {
synchronized (_listeners) {
s_logger.info("unregister cluster listener " + listener.getClass());
logger.info("unregister cluster listener " + listener.getClass());
_listeners.remove(listener);
}
}
public void notifyNodeJoined(final List<ManagementServerHostVO> nodeList) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notify management server node join to listeners.");
if (logger.isDebugEnabled()) {
logger.debug("Notify management server node join to listeners.");
for (final ManagementServerHostVO mshost : nodeList) {
s_logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
}
}
@ -472,13 +470,13 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
public void notifyNodeLeft(final List<ManagementServerHostVO> nodeList) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notify management server node left to listeners.");
if (logger.isDebugEnabled()) {
logger.debug("Notify management server node left to listeners.");
}
for (final ManagementServerHostVO mshost : nodeList) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
if (logger.isDebugEnabled()) {
logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
}
cancelClusterRequestToPeer(String.valueOf(mshost.getMsid()));
}
@ -493,8 +491,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
public void notifyNodeIsolated() {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notify management server node isolation to listeners");
if (logger.isDebugEnabled()) {
logger.debug("Notify management server node isolation to listeners");
}
synchronized (_listeners) {
@ -549,16 +547,16 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
profilerHeartbeatUpdate.start();
txn.transitToUserManagedConnection(getHeartbeatConnection());
if (s_logger.isTraceEnabled()) {
s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId);
if (logger.isTraceEnabled()) {
logger.trace("Cluster manager heartbeat update, id:" + _mshostId);
}
_mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime());
profilerHeartbeatUpdate.stop();
profilerPeerScan.start();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Cluster manager peer-scan, id:" + _mshostId);
if (logger.isTraceEnabled()) {
logger.trace("Cluster manager peer-scan, id:" + _mshostId);
}
if (!_peerScanInited) {
@ -573,18 +571,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
profiler.stop();
if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " +
if (logger.isDebugEnabled()) {
logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " +
profilerHeartbeatUpdate.toString() + ", profilerPeerScan: " + profilerPeerScan.toString());
}
}
}
} catch (final CloudRuntimeException e) {
s_logger.error("Runtime DB exception ", e.getCause());
logger.error("Runtime DB exception ", e.getCause());
if (e.getCause() instanceof ClusterInvalidSessionException) {
s_logger.error("Invalid cluster session found, fence it");
logger.error("Invalid cluster session found, fence it");
queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated));
}
@ -594,7 +592,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
} catch (final ActiveFencingException e) {
queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated));
} catch (final Throwable e) {
s_logger.error("Unexpected exception in cluster heartbeat", e);
logger.error("Unexpected exception in cluster heartbeat", e);
if (isRootCauseConnectionRelated(e.getCause())) {
invalidHeartbeatConnection();
}
@ -633,7 +631,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
if (conn != null) {
_heartbeatConnection.reset(conn);
} else {
s_logger.error("DB communication problem detected, fence it");
logger.error("DB communication problem detected, fence it");
queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated));
}
// The stand-alone connection does not have to be closed here because there will be another reference to it.
@ -666,11 +664,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
profiler.stop();
if (profiler.getDurationInMillis() > 1000) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms");
if (logger.isDebugEnabled()) {
logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms");
}
} else {
s_logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms");
logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms");
}
}
break;
@ -684,11 +682,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
profiler.stop();
if (profiler.getDurationInMillis() > 1000) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms");
if (logger.isDebugEnabled()) {
logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms");
}
} else {
s_logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms");
logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms");
}
}
break;
@ -703,7 +701,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
} catch (final Throwable e) {
s_logger.warn("Unexpected exception during cluster notification. ", e);
logger.warn("Unexpected exception during cluster notification. ", e);
}
}
@ -770,18 +768,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
if (orphanList.size() > 0) {
for (final Long orphanMsid : orphanList) {
// construct fake ManagementServerHostVO based on orphan MSID
s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid);
logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid);
inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date()));
}
} else {
s_logger.info("We are good, no orphan management server msid in host table is found");
logger.info("We are good, no orphan management server msid in host table is found");
}
if (inactiveList.size() > 0) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp");
if (logger.isInfoEnabled()) {
logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp");
for (final ManagementServerHostVO host : inactiveList) {
s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() +
logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() +
", version: " + host.getVersion());
}
}
@ -789,7 +787,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
final List<ManagementServerHostVO> downHostList = new ArrayList<ManagementServerHostVO>();
for (final ManagementServerHostVO host : inactiveList) {
if (!pingManagementNode(host)) {
s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable");
logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable");
downHostList.add(host);
}
}
@ -798,7 +796,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList));
}
} else {
s_logger.info("No inactive management server node found");
logger.info("No inactive management server node found");
}
}
@ -823,7 +821,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
if (_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) {
final String msg =
"We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation";
s_logger.error(msg);
logger.error(msg);
throw new ActiveFencingException(msg);
}
@ -833,24 +831,24 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
final ManagementServerHostVO current = getInListById(entry.getKey(), currentList);
if (current == null) {
if (entry.getKey().longValue() != _mshostId.longValue()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP());
if (logger.isDebugEnabled()) {
logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP());
}
removedNodeList.add(entry.getValue());
}
} else {
if (current.getRunid() == 0) {
if (entry.getKey().longValue() != _mshostId.longValue()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" +
if (logger.isDebugEnabled()) {
logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" +
entry.getValue().getServiceIP());
}
invalidatedNodeList.add(entry.getValue());
}
} else {
if (entry.getValue().getRunid() != current.getRunid()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP());
if (logger.isDebugEnabled()) {
logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP());
}
entry.getValue().setRunid(current.getRunid());
@ -870,7 +868,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
try {
JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId());
} catch (final Exception e) {
s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString());
logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString());
}
}
@ -885,15 +883,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
while (it.hasNext()) {
final ManagementServerHostVO mshost = it.next();
if (!pingManagementNode(mshost)) {
s_logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable");
logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable");
_activePeers.remove(mshost.getId());
try {
JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId());
} catch (final Exception e) {
s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString());
logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString());
}
} else {
s_logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable");
logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable");
it.remove();
}
}
@ -908,15 +906,15 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
if (!_activePeers.containsKey(mshost.getId())) {
_activePeers.put(mshost.getId(), mshost);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP());
if (logger.isDebugEnabled()) {
logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP());
}
newNodeList.add(mshost);
try {
JmxUtil.registerMBean("ClusterManager", "Node " + mshost.getId(), new ClusterManagerMBeanImpl(this, mshost));
} catch (final Exception e) {
s_logger.warn("Unable to regiester cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e));
logger.warn("Unable to regiester cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e));
}
}
}
@ -928,8 +926,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
profiler.stop();
if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " +
if (logger.isDebugEnabled()) {
logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " +
profilerQueryActiveList.toString() + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString() + ", profilerInvalidatedNodeList: " +
profilerInvalidatedNodeList.toString() + ", profilerRemovedList: " + profilerRemovedList.toString());
}
@ -948,8 +946,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
@Override
@DB
public boolean start() {
if (s_logger.isInfoEnabled()) {
s_logger.info("Starting Cluster manager, msid : " + _msId);
if (logger.isInfoEnabled()) {
logger.info("Starting Cluster manager, msid : " + _msId);
}
final ManagementServerHostVO mshost = Transaction.execute(new TransactionCallback<ManagementServerHostVO>() {
@ -973,14 +971,14 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
mshost.setAlertCount(0);
mshost.setState(ManagementServerHost.State.Up);
_mshostDao.persist(mshost);
if (s_logger.isInfoEnabled()) {
s_logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started");
if (logger.isInfoEnabled()) {
logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started");
}
} else {
_mshostDao.update(mshost.getId(), _runId, NetUtils.getHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(),
DateUtil.currentGMTTime());
if (s_logger.isInfoEnabled()) {
s_logger.info("Management server " + _msId + ", runId " + _runId + " is being started");
if (logger.isInfoEnabled()) {
logger.info("Management server " + _msId + ", runId " + _runId + " is being started");
}
}
@ -989,8 +987,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
});
_mshostId = mshost.getId();
if (s_logger.isInfoEnabled()) {
s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort());
if (logger.isInfoEnabled()) {
logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort());
}
_mshostPeerDao.clearPeerInfo(_mshostId);
@ -999,8 +997,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS);
_notificationExecutor.submit(getNotificationTask());
if (s_logger.isInfoEnabled()) {
s_logger.info("Cluster manager was started successfully");
if (logger.isInfoEnabled()) {
logger.info("Cluster manager was started successfully");
}
return true;
@ -1009,8 +1007,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
@Override
@DB
public boolean stop() {
if (s_logger.isInfoEnabled()) {
s_logger.info("Stopping Cluster manager, msid : " + _msId);
if (logger.isInfoEnabled()) {
logger.info("Stopping Cluster manager, msid : " + _msId);
}
if (_mshostId != null) {
@ -1028,8 +1026,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
} catch (final InterruptedException e) {
}
if (s_logger.isInfoEnabled()) {
s_logger.info("Cluster manager is stopped");
if (logger.isInfoEnabled()) {
logger.info("Cluster manager is stopped");
}
return true;
@ -1037,8 +1035,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
if (s_logger.isInfoEnabled()) {
s_logger.info("Start configuring cluster manager : " + name);
if (logger.isInfoEnabled()) {
logger.info("Start configuring cluster manager : " + name);
}
final Properties dbProps = DbProperties.getDbProperties();
@ -1048,8 +1046,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
_clusterNodeIP = _clusterNodeIP.trim();
if (s_logger.isInfoEnabled()) {
s_logger.info("Cluster node IP : " + _clusterNodeIP);
if (logger.isInfoEnabled()) {
logger.info("Cluster node IP : " + _clusterNodeIP);
}
if (!NetUtils.isLocalAddress(_clusterNodeIP)) {
@ -1074,8 +1072,8 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
checkConflicts();
if (s_logger.isInfoEnabled()) {
s_logger.info("Cluster manager is configured.");
if (logger.isInfoEnabled()) {
logger.info("Cluster manager is configured.");
}
return true;
}
@ -1133,7 +1131,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
final String targetIp = mshost.getServiceIP();
if ("127.0.0.1".equals(targetIp) || "0.0.0.0".equals(targetIp)) {
s_logger.info("ping management node cluster service can not be performed on self");
logger.info("ping management node cluster service can not be performed on self");
return false;
}
@ -1141,7 +1139,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
while (--retry > 0) {
SocketChannel sch = null;
try {
s_logger.info("Trying to connect to " + targetIp);
logger.info("Trying to connect to " + targetIp);
sch = SocketChannel.open();
sch.configureBlocking(true);
sch.socket().setSoTimeout(5000);
@ -1151,7 +1149,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
return true;
} catch (final IOException e) {
if (e instanceof ConnectException) {
s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e);
logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e);
return false;
}
} finally {
@ -1169,7 +1167,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
}
}
s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries");
logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries");
return false;
}
@ -1186,25 +1184,25 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C
if ("127.0.0.1".equals(_clusterNodeIP)) {
if (pingManagementNode(peer.getMsid())) {
final String msg = "Detected another management node with localhost IP is already running, please check your cluster configuration";
s_logger.error(msg);
logger.error(msg);
throw new ConfigurationException(msg);
} else {
final String msg =
"Detected another management node with localhost IP is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node";
s_logger.info(msg);
logger.info(msg);
}
} else {
if (pingManagementNode(peer.getMsid())) {
final String msg =
"Detected that another management node with the same IP " + peer.getServiceIP() +
" is already running, please check your cluster configuration";
s_logger.error(msg);
logger.error(msg);
throw new ConfigurationException(msg);
} else {
final String msg =
"Detected that another management node with the same IP " + peer.getServiceIP() +
" is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node";
s_logger.info(msg);
logger.info(msg);
}
}
}

View File

@ -23,7 +23,6 @@ import java.util.Properties;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.framework.config.ConfigDepot;
import com.cloud.cluster.dao.ManagementServerHostDao;
@ -34,7 +33,6 @@ import com.cloud.utils.db.DbProperties;
public class ClusterServiceServletAdapter extends AdapterBase implements ClusterServiceAdapter {
private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class);
private static final int DEFAULT_SERVICE_PORT = 9090;
private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds
@ -59,7 +57,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster
try {
init();
} catch (ConfigurationException e) {
s_logger.error("Unable to init ClusterServiceServletAdapter");
logger.error("Unable to init ClusterServiceServletAdapter");
throw new RemoteException("Unable to init ClusterServiceServletAdapter");
}
@ -75,7 +73,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster
try {
init();
} catch (ConfigurationException e) {
s_logger.error("Unable to init ClusterServiceServletAdapter");
logger.error("Unable to init ClusterServiceServletAdapter");
return null;
}
@ -126,7 +124,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster
Properties dbProps = DbProperties.getDbProperties();
_clusterServicePort = NumbersUtil.parseInt(dbProps.getProperty("cluster.servlet.port"), DEFAULT_SERVICE_PORT);
if (s_logger.isInfoEnabled())
s_logger.info("Cluster servlet port : " + _clusterServicePort);
if (logger.isInfoEnabled())
logger.info("Cluster servlet port : " + _clusterServicePort);
}
}

View File

@ -26,7 +26,6 @@ import java.util.TimeZone;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.cluster.ClusterInvalidSessionException;
import com.cloud.cluster.ManagementServerHost;
@ -43,7 +42,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Local(value = {ManagementServerHostDao.class})
public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServerHostVO, Long> implements ManagementServerHostDao {
private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class);
private final SearchBuilder<ManagementServerHostVO> MsIdSearch;
private final SearchBuilder<ManagementServerHostVO> ActiveSearch;
@ -100,7 +98,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
s_logger.warn("Unexpected exception, ", e);
logger.warn("Unexpected exception, ", e);
throw new RuntimeException(e.getMessage(), e);
}
}
@ -120,7 +118,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
txn.commit();
return true;
} catch (Exception e) {
s_logger.warn("Unexpected exception, ", e);
logger.warn("Unexpected exception, ", e);
throw new RuntimeException(e.getMessage(), e);
}
}
@ -142,11 +140,11 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
txn.commit();
if (count < 1) {
s_logger.info("Invalid cluster session detected, runId " + runid + " is no longer valid");
logger.info("Invalid cluster session detected, runId " + runid + " is no longer valid");
throw new CloudRuntimeException("Invalid cluster session detected, runId " + runid + " is no longer valid", new ClusterInvalidSessionException("runId " + runid + " is no longer valid"));
}
} catch (Exception e) {
s_logger.warn("Unexpected exception, ", e);
logger.warn("Unexpected exception, ", e);
throw new RuntimeException(e.getMessage(), e);
}
}
@ -182,7 +180,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
changedRows = pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
s_logger.warn("Unexpected exception, ", e);
logger.warn("Unexpected exception, ", e);
throw new RuntimeException(e.getMessage(), e);
}
@ -223,7 +221,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase<ManagementServer
int count = pstmt.executeUpdate();
if (count < 1) {
s_logger.info("Invalid cluster session detected, runId " + runId + " is no longer valid");
logger.info("Invalid cluster session detected, runId " + runId + " is no longer valid");
throw new CloudRuntimeException("Invalid cluster session detected, runId " + runId + " is no longer valid", new ClusterInvalidSessionException("runId " + runId + " is no longer valid"));
}
} catch (SQLException e) {

View File

@ -20,7 +20,6 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.cluster.ManagementServerHost;
import com.cloud.cluster.ManagementServerHostPeerVO;
@ -32,7 +31,6 @@ import com.cloud.utils.db.TransactionLegacy;
@Local(value = {ManagementServerHostPeerDao.class})
public class ManagementServerHostPeerDaoImpl extends GenericDaoBase<ManagementServerHostPeerVO, Long> implements ManagementServerHostPeerDao {
private static final Logger s_logger = Logger.getLogger(ManagementServerHostPeerDaoImpl.class);
private final SearchBuilder<ManagementServerHostPeerVO> ClearPeerSearch;
private final SearchBuilder<ManagementServerHostPeerVO> FindForUpdateSearch;
@ -87,7 +85,7 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase<ManagementSe
}
txn.commit();
} catch (Exception e) {
s_logger.warn("Unexpected exception, ", e);
logger.warn("Unexpected exception, ", e);
txn.rollback();
}
}

View File

@ -26,7 +26,6 @@ import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentLifecycle;
@ -41,7 +40,6 @@ import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value = {ConfigurationDao.class})
public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String> implements ConfigurationDao {
private static final Logger s_logger = Logger.getLogger(ConfigurationDaoImpl.class);
private Map<String, String> _configs = null;
private boolean _premium;
@ -148,7 +146,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
stmt.executeUpdate();
return true;
} catch (Exception e) {
s_logger.warn("Unable to update Configuration Value", e);
logger.warn("Unable to update Configuration Value", e);
}
return false;
}
@ -165,7 +163,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
return true;
}
} catch (Exception e) {
s_logger.warn("Unable to update Configuration Value", e);
logger.warn("Unable to update Configuration Value", e);
}
return false;
}
@ -199,7 +197,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
}
return returnValue;
} catch (Exception e) {
s_logger.warn("Unable to update Configuration Value", e);
logger.warn("Unable to update Configuration Value", e);
throw new CloudRuntimeException("Unable to initialize configuration variable: " + name);
}

View File

@ -65,7 +65,6 @@ import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import org.apache.log4j.Logger;
import com.cloud.utils.DateUtil;
import com.cloud.utils.NumbersUtil;
@ -115,7 +114,6 @@ import com.cloud.utils.net.NetUtils;
**/
@DB
public abstract class GenericDaoBase<T, ID extends Serializable> extends ComponentLifecycleBase implements GenericDao<T, ID>, ComponentMethodInterceptable {
private final static Logger s_logger = Logger.getLogger(GenericDaoBase.class);
protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT");
@ -255,26 +253,26 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
_searchEnhancer.setSuperclass(_entityBeanType);
_searchEnhancer.setCallback(new UpdateBuilder(this));
if (s_logger.isTraceEnabled()) {
s_logger.trace("Select SQL: " + _partialSelectSql.first().toString());
s_logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql"));
s_logger.trace("Select by Id SQL: " + _selectByIdSql);
s_logger.trace("Table References: " + _tables);
s_logger.trace("Insert SQLs:");
if (logger.isTraceEnabled()) {
logger.trace("Select SQL: " + _partialSelectSql.first().toString());
logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql"));
logger.trace("Select by Id SQL: " + _selectByIdSql);
logger.trace("Table References: " + _tables);
logger.trace("Insert SQLs:");
for (final Pair<String, Attribute[]> insertSql : _insertSqls) {
s_logger.trace(insertSql.first());
logger.trace(insertSql.first());
}
s_logger.trace("Delete SQLs");
logger.trace("Delete SQLs");
for (final Pair<String, Attribute[]> deletSql : _deleteSqls) {
s_logger.trace(deletSql.first());
logger.trace(deletSql.first());
}
s_logger.trace("Collection SQLs");
logger.trace("Collection SQLs");
for (Attribute attr : _ecAttributes) {
EcInfo info = (EcInfo)attr.attache;
s_logger.trace(info.insertSql);
s_logger.trace(info.selectSql);
logger.trace(info.insertSql);
logger.trace(info.selectSql);
}
}
@ -413,7 +411,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
}
if (s_logger.isDebugEnabled() && lock != null) {
if (logger.isDebugEnabled() && lock != null) {
txn.registerLock(pstmt.toString());
}
final ResultSet rs = pstmt.executeQuery();
@ -778,8 +776,8 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("join search statement is " + pstmt);
if (logger.isTraceEnabled()) {
logger.trace("join search statement is " + pstmt);
}
return count;
}
@ -1597,7 +1595,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
try {
_cache.put(new Element(_idField.get(entity), entity));
} catch (final Exception e) {
s_logger.debug("Can't put it in the cache", e);
logger.debug("Can't put it in the cache", e);
}
}
@ -1619,7 +1617,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
try {
_cache.put(new Element(_idField.get(entity), entity));
} catch (final Exception e) {
s_logger.debug("Can't put it in the cache", e);
logger.debug("Can't put it in the cache", e);
}
}
@ -1798,7 +1796,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
final int idle = NumbersUtil.parseInt((String)params.get("cache.time.to.idle"), 300);
_cache = new Cache(getName(), maxElements, false, live == -1, live == -1 ? Integer.MAX_VALUE : live, idle);
cm.addCache(_cache);
s_logger.info("Cache created: " + _cache.toString());
logger.info("Cache created: " + _cache.toString());
} else {
_cache = null;
}

Some files were not shown because too many files have changed in this diff Show More